org.hyperledger.fabric.protos.orderer.Kafka Maven / Gradle / Ivy
Go to download
Show more of this group Show more artifacts with this name
Show all versions of fabric-sdk-java Show documentation
Show all versions of fabric-sdk-java Show documentation
Java SDK for Hyperledger Fabric. Deprecated as of Fabric v2.5, replaced by org.hyperledger.fabric:fabric-gateway.
// Generated by the protocol buffer compiler. DO NOT EDIT!
// source: orderer/kafka.proto
package org.hyperledger.fabric.protos.orderer;
public final class Kafka {
private Kafka() {}
public static void registerAllExtensions(
com.google.protobuf.ExtensionRegistryLite registry) {
}
public static void registerAllExtensions(
com.google.protobuf.ExtensionRegistry registry) {
registerAllExtensions(
(com.google.protobuf.ExtensionRegistryLite) registry);
}
public interface KafkaMessageOrBuilder extends
// @@protoc_insertion_point(interface_extends:orderer.KafkaMessage)
com.google.protobuf.MessageOrBuilder {
/**
* .orderer.KafkaMessageRegular regular = 1;
* @return Whether the regular field is set.
*/
boolean hasRegular();
/**
* .orderer.KafkaMessageRegular regular = 1;
* @return The regular.
*/
org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageRegular getRegular();
/**
* .orderer.KafkaMessageRegular regular = 1;
*/
org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageRegularOrBuilder getRegularOrBuilder();
/**
* .orderer.KafkaMessageTimeToCut time_to_cut = 2;
* @return Whether the timeToCut field is set.
*/
boolean hasTimeToCut();
/**
* .orderer.KafkaMessageTimeToCut time_to_cut = 2;
* @return The timeToCut.
*/
org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageTimeToCut getTimeToCut();
/**
* .orderer.KafkaMessageTimeToCut time_to_cut = 2;
*/
org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageTimeToCutOrBuilder getTimeToCutOrBuilder();
/**
* .orderer.KafkaMessageConnect connect = 3;
* @return Whether the connect field is set.
*/
boolean hasConnect();
/**
* .orderer.KafkaMessageConnect connect = 3;
* @return The connect.
*/
org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageConnect getConnect();
/**
* .orderer.KafkaMessageConnect connect = 3;
*/
org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageConnectOrBuilder getConnectOrBuilder();
public org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessage.TypeCase getTypeCase();
}
/**
*
* KafkaMessage is a wrapper type for the messages
* that the Kafka-based orderer deals with.
*
*
* Protobuf type {@code orderer.KafkaMessage}
*/
public static final class KafkaMessage extends
com.google.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:orderer.KafkaMessage)
KafkaMessageOrBuilder {
private static final long serialVersionUID = 0L;
// Use KafkaMessage.newBuilder() to construct.
private KafkaMessage(com.google.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private KafkaMessage() {
}
@java.lang.Override
@SuppressWarnings({"unused"})
protected java.lang.Object newInstance(
UnusedPrivateParameter unused) {
return new KafkaMessage();
}
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.hyperledger.fabric.protos.orderer.Kafka.internal_static_orderer_KafkaMessage_descriptor;
}
@java.lang.Override
protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.hyperledger.fabric.protos.orderer.Kafka.internal_static_orderer_KafkaMessage_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessage.class, org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessage.Builder.class);
}
private int typeCase_ = 0;
private java.lang.Object type_;
public enum TypeCase
implements com.google.protobuf.Internal.EnumLite,
com.google.protobuf.AbstractMessage.InternalOneOfEnum {
REGULAR(1),
TIME_TO_CUT(2),
CONNECT(3),
TYPE_NOT_SET(0);
private final int value;
private TypeCase(int value) {
this.value = value;
}
/**
* @param value The number of the enum to look for.
* @return The enum associated with the given number.
* @deprecated Use {@link #forNumber(int)} instead.
*/
@java.lang.Deprecated
public static TypeCase valueOf(int value) {
return forNumber(value);
}
public static TypeCase forNumber(int value) {
switch (value) {
case 1: return REGULAR;
case 2: return TIME_TO_CUT;
case 3: return CONNECT;
case 0: return TYPE_NOT_SET;
default: return null;
}
}
public int getNumber() {
return this.value;
}
};
public TypeCase
getTypeCase() {
return TypeCase.forNumber(
typeCase_);
}
public static final int REGULAR_FIELD_NUMBER = 1;
/**
* .orderer.KafkaMessageRegular regular = 1;
* @return Whether the regular field is set.
*/
@java.lang.Override
public boolean hasRegular() {
return typeCase_ == 1;
}
/**
* .orderer.KafkaMessageRegular regular = 1;
* @return The regular.
*/
@java.lang.Override
public org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageRegular getRegular() {
if (typeCase_ == 1) {
return (org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageRegular) type_;
}
return org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageRegular.getDefaultInstance();
}
/**
* .orderer.KafkaMessageRegular regular = 1;
*/
@java.lang.Override
public org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageRegularOrBuilder getRegularOrBuilder() {
if (typeCase_ == 1) {
return (org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageRegular) type_;
}
return org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageRegular.getDefaultInstance();
}
public static final int TIME_TO_CUT_FIELD_NUMBER = 2;
/**
* .orderer.KafkaMessageTimeToCut time_to_cut = 2;
* @return Whether the timeToCut field is set.
*/
@java.lang.Override
public boolean hasTimeToCut() {
return typeCase_ == 2;
}
/**
* .orderer.KafkaMessageTimeToCut time_to_cut = 2;
* @return The timeToCut.
*/
@java.lang.Override
public org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageTimeToCut getTimeToCut() {
if (typeCase_ == 2) {
return (org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageTimeToCut) type_;
}
return org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageTimeToCut.getDefaultInstance();
}
/**
* .orderer.KafkaMessageTimeToCut time_to_cut = 2;
*/
@java.lang.Override
public org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageTimeToCutOrBuilder getTimeToCutOrBuilder() {
if (typeCase_ == 2) {
return (org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageTimeToCut) type_;
}
return org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageTimeToCut.getDefaultInstance();
}
public static final int CONNECT_FIELD_NUMBER = 3;
/**
* .orderer.KafkaMessageConnect connect = 3;
* @return Whether the connect field is set.
*/
@java.lang.Override
public boolean hasConnect() {
return typeCase_ == 3;
}
/**
* .orderer.KafkaMessageConnect connect = 3;
* @return The connect.
*/
@java.lang.Override
public org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageConnect getConnect() {
if (typeCase_ == 3) {
return (org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageConnect) type_;
}
return org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageConnect.getDefaultInstance();
}
/**
* .orderer.KafkaMessageConnect connect = 3;
*/
@java.lang.Override
public org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageConnectOrBuilder getConnectOrBuilder() {
if (typeCase_ == 3) {
return (org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageConnect) type_;
}
return org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageConnect.getDefaultInstance();
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
if (typeCase_ == 1) {
output.writeMessage(1, (org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageRegular) type_);
}
if (typeCase_ == 2) {
output.writeMessage(2, (org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageTimeToCut) type_);
}
if (typeCase_ == 3) {
output.writeMessage(3, (org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageConnect) type_);
}
getUnknownFields().writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
if (typeCase_ == 1) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(1, (org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageRegular) type_);
}
if (typeCase_ == 2) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(2, (org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageTimeToCut) type_);
}
if (typeCase_ == 3) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(3, (org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageConnect) type_);
}
size += getUnknownFields().getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessage)) {
return super.equals(obj);
}
org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessage other = (org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessage) obj;
if (!getTypeCase().equals(other.getTypeCase())) return false;
switch (typeCase_) {
case 1:
if (!getRegular()
.equals(other.getRegular())) return false;
break;
case 2:
if (!getTimeToCut()
.equals(other.getTimeToCut())) return false;
break;
case 3:
if (!getConnect()
.equals(other.getConnect())) return false;
break;
case 0:
default:
}
if (!getUnknownFields().equals(other.getUnknownFields())) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
switch (typeCase_) {
case 1:
hash = (37 * hash) + REGULAR_FIELD_NUMBER;
hash = (53 * hash) + getRegular().hashCode();
break;
case 2:
hash = (37 * hash) + TIME_TO_CUT_FIELD_NUMBER;
hash = (53 * hash) + getTimeToCut().hashCode();
break;
case 3:
hash = (37 * hash) + CONNECT_FIELD_NUMBER;
hash = (53 * hash) + getConnect().hashCode();
break;
case 0:
default:
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessage parseFrom(
java.nio.ByteBuffer data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessage parseFrom(
java.nio.ByteBuffer data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessage parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessage parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessage parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessage parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessage parseFrom(java.io.InputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessage parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessage parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessage parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessage parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessage parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessage prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
*
* KafkaMessage is a wrapper type for the messages
* that the Kafka-based orderer deals with.
*
*
* Protobuf type {@code orderer.KafkaMessage}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:orderer.KafkaMessage)
org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.hyperledger.fabric.protos.orderer.Kafka.internal_static_orderer_KafkaMessage_descriptor;
}
@java.lang.Override
protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.hyperledger.fabric.protos.orderer.Kafka.internal_static_orderer_KafkaMessage_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessage.class, org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessage.Builder.class);
}
// Construct using org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessage.newBuilder()
private Builder() {
}
private Builder(
com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
}
@java.lang.Override
public Builder clear() {
super.clear();
if (regularBuilder_ != null) {
regularBuilder_.clear();
}
if (timeToCutBuilder_ != null) {
timeToCutBuilder_.clear();
}
if (connectBuilder_ != null) {
connectBuilder_.clear();
}
typeCase_ = 0;
type_ = null;
return this;
}
@java.lang.Override
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.hyperledger.fabric.protos.orderer.Kafka.internal_static_orderer_KafkaMessage_descriptor;
}
@java.lang.Override
public org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessage getDefaultInstanceForType() {
return org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessage.getDefaultInstance();
}
@java.lang.Override
public org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessage build() {
org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessage result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessage buildPartial() {
org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessage result = new org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessage(this);
if (typeCase_ == 1) {
if (regularBuilder_ == null) {
result.type_ = type_;
} else {
result.type_ = regularBuilder_.build();
}
}
if (typeCase_ == 2) {
if (timeToCutBuilder_ == null) {
result.type_ = type_;
} else {
result.type_ = timeToCutBuilder_.build();
}
}
if (typeCase_ == 3) {
if (connectBuilder_ == null) {
result.type_ = type_;
} else {
result.type_ = connectBuilder_.build();
}
}
result.typeCase_ = typeCase_;
onBuilt();
return result;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
com.google.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
com.google.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
com.google.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
com.google.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
com.google.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessage) {
return mergeFrom((org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessage)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessage other) {
if (other == org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessage.getDefaultInstance()) return this;
switch (other.getTypeCase()) {
case REGULAR: {
mergeRegular(other.getRegular());
break;
}
case TIME_TO_CUT: {
mergeTimeToCut(other.getTimeToCut());
break;
}
case CONNECT: {
mergeConnect(other.getConnect());
break;
}
case TYPE_NOT_SET: {
break;
}
}
this.mergeUnknownFields(other.getUnknownFields());
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
return true;
}
@java.lang.Override
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 10: {
input.readMessage(
getRegularFieldBuilder().getBuilder(),
extensionRegistry);
typeCase_ = 1;
break;
} // case 10
case 18: {
input.readMessage(
getTimeToCutFieldBuilder().getBuilder(),
extensionRegistry);
typeCase_ = 2;
break;
} // case 18
case 26: {
input.readMessage(
getConnectFieldBuilder().getBuilder(),
extensionRegistry);
typeCase_ = 3;
break;
} // case 26
default: {
if (!super.parseUnknownField(input, extensionRegistry, tag)) {
done = true; // was an endgroup tag
}
break;
} // default:
} // switch (tag)
} // while (!done)
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.unwrapIOException();
} finally {
onChanged();
} // finally
return this;
}
private int typeCase_ = 0;
private java.lang.Object type_;
public TypeCase
getTypeCase() {
return TypeCase.forNumber(
typeCase_);
}
public Builder clearType() {
typeCase_ = 0;
type_ = null;
onChanged();
return this;
}
private com.google.protobuf.SingleFieldBuilderV3<
org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageRegular, org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageRegular.Builder, org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageRegularOrBuilder> regularBuilder_;
/**
* .orderer.KafkaMessageRegular regular = 1;
* @return Whether the regular field is set.
*/
@java.lang.Override
public boolean hasRegular() {
return typeCase_ == 1;
}
/**
* .orderer.KafkaMessageRegular regular = 1;
* @return The regular.
*/
@java.lang.Override
public org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageRegular getRegular() {
if (regularBuilder_ == null) {
if (typeCase_ == 1) {
return (org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageRegular) type_;
}
return org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageRegular.getDefaultInstance();
} else {
if (typeCase_ == 1) {
return regularBuilder_.getMessage();
}
return org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageRegular.getDefaultInstance();
}
}
/**
* .orderer.KafkaMessageRegular regular = 1;
*/
public Builder setRegular(org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageRegular value) {
if (regularBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
type_ = value;
onChanged();
} else {
regularBuilder_.setMessage(value);
}
typeCase_ = 1;
return this;
}
/**
* .orderer.KafkaMessageRegular regular = 1;
*/
public Builder setRegular(
org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageRegular.Builder builderForValue) {
if (regularBuilder_ == null) {
type_ = builderForValue.build();
onChanged();
} else {
regularBuilder_.setMessage(builderForValue.build());
}
typeCase_ = 1;
return this;
}
/**
* .orderer.KafkaMessageRegular regular = 1;
*/
public Builder mergeRegular(org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageRegular value) {
if (regularBuilder_ == null) {
if (typeCase_ == 1 &&
type_ != org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageRegular.getDefaultInstance()) {
type_ = org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageRegular.newBuilder((org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageRegular) type_)
.mergeFrom(value).buildPartial();
} else {
type_ = value;
}
onChanged();
} else {
if (typeCase_ == 1) {
regularBuilder_.mergeFrom(value);
} else {
regularBuilder_.setMessage(value);
}
}
typeCase_ = 1;
return this;
}
/**
* .orderer.KafkaMessageRegular regular = 1;
*/
public Builder clearRegular() {
if (regularBuilder_ == null) {
if (typeCase_ == 1) {
typeCase_ = 0;
type_ = null;
onChanged();
}
} else {
if (typeCase_ == 1) {
typeCase_ = 0;
type_ = null;
}
regularBuilder_.clear();
}
return this;
}
/**
* .orderer.KafkaMessageRegular regular = 1;
*/
public org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageRegular.Builder getRegularBuilder() {
return getRegularFieldBuilder().getBuilder();
}
/**
* .orderer.KafkaMessageRegular regular = 1;
*/
@java.lang.Override
public org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageRegularOrBuilder getRegularOrBuilder() {
if ((typeCase_ == 1) && (regularBuilder_ != null)) {
return regularBuilder_.getMessageOrBuilder();
} else {
if (typeCase_ == 1) {
return (org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageRegular) type_;
}
return org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageRegular.getDefaultInstance();
}
}
/**
* .orderer.KafkaMessageRegular regular = 1;
*/
private com.google.protobuf.SingleFieldBuilderV3<
org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageRegular, org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageRegular.Builder, org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageRegularOrBuilder>
getRegularFieldBuilder() {
if (regularBuilder_ == null) {
if (!(typeCase_ == 1)) {
type_ = org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageRegular.getDefaultInstance();
}
regularBuilder_ = new com.google.protobuf.SingleFieldBuilderV3<
org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageRegular, org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageRegular.Builder, org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageRegularOrBuilder>(
(org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageRegular) type_,
getParentForChildren(),
isClean());
type_ = null;
}
typeCase_ = 1;
onChanged();;
return regularBuilder_;
}
private com.google.protobuf.SingleFieldBuilderV3<
org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageTimeToCut, org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageTimeToCut.Builder, org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageTimeToCutOrBuilder> timeToCutBuilder_;
/**
* .orderer.KafkaMessageTimeToCut time_to_cut = 2;
* @return Whether the timeToCut field is set.
*/
@java.lang.Override
public boolean hasTimeToCut() {
return typeCase_ == 2;
}
/**
* .orderer.KafkaMessageTimeToCut time_to_cut = 2;
* @return The timeToCut.
*/
@java.lang.Override
public org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageTimeToCut getTimeToCut() {
if (timeToCutBuilder_ == null) {
if (typeCase_ == 2) {
return (org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageTimeToCut) type_;
}
return org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageTimeToCut.getDefaultInstance();
} else {
if (typeCase_ == 2) {
return timeToCutBuilder_.getMessage();
}
return org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageTimeToCut.getDefaultInstance();
}
}
/**
* .orderer.KafkaMessageTimeToCut time_to_cut = 2;
*/
public Builder setTimeToCut(org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageTimeToCut value) {
if (timeToCutBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
type_ = value;
onChanged();
} else {
timeToCutBuilder_.setMessage(value);
}
typeCase_ = 2;
return this;
}
/**
* .orderer.KafkaMessageTimeToCut time_to_cut = 2;
*/
public Builder setTimeToCut(
org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageTimeToCut.Builder builderForValue) {
if (timeToCutBuilder_ == null) {
type_ = builderForValue.build();
onChanged();
} else {
timeToCutBuilder_.setMessage(builderForValue.build());
}
typeCase_ = 2;
return this;
}
/**
* .orderer.KafkaMessageTimeToCut time_to_cut = 2;
*/
public Builder mergeTimeToCut(org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageTimeToCut value) {
if (timeToCutBuilder_ == null) {
if (typeCase_ == 2 &&
type_ != org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageTimeToCut.getDefaultInstance()) {
type_ = org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageTimeToCut.newBuilder((org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageTimeToCut) type_)
.mergeFrom(value).buildPartial();
} else {
type_ = value;
}
onChanged();
} else {
if (typeCase_ == 2) {
timeToCutBuilder_.mergeFrom(value);
} else {
timeToCutBuilder_.setMessage(value);
}
}
typeCase_ = 2;
return this;
}
/**
* .orderer.KafkaMessageTimeToCut time_to_cut = 2;
*/
public Builder clearTimeToCut() {
if (timeToCutBuilder_ == null) {
if (typeCase_ == 2) {
typeCase_ = 0;
type_ = null;
onChanged();
}
} else {
if (typeCase_ == 2) {
typeCase_ = 0;
type_ = null;
}
timeToCutBuilder_.clear();
}
return this;
}
/**
* .orderer.KafkaMessageTimeToCut time_to_cut = 2;
*/
public org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageTimeToCut.Builder getTimeToCutBuilder() {
return getTimeToCutFieldBuilder().getBuilder();
}
/**
* .orderer.KafkaMessageTimeToCut time_to_cut = 2;
*/
@java.lang.Override
public org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageTimeToCutOrBuilder getTimeToCutOrBuilder() {
if ((typeCase_ == 2) && (timeToCutBuilder_ != null)) {
return timeToCutBuilder_.getMessageOrBuilder();
} else {
if (typeCase_ == 2) {
return (org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageTimeToCut) type_;
}
return org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageTimeToCut.getDefaultInstance();
}
}
/**
* .orderer.KafkaMessageTimeToCut time_to_cut = 2;
*/
private com.google.protobuf.SingleFieldBuilderV3<
org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageTimeToCut, org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageTimeToCut.Builder, org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageTimeToCutOrBuilder>
getTimeToCutFieldBuilder() {
if (timeToCutBuilder_ == null) {
if (!(typeCase_ == 2)) {
type_ = org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageTimeToCut.getDefaultInstance();
}
timeToCutBuilder_ = new com.google.protobuf.SingleFieldBuilderV3<
org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageTimeToCut, org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageTimeToCut.Builder, org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageTimeToCutOrBuilder>(
(org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageTimeToCut) type_,
getParentForChildren(),
isClean());
type_ = null;
}
typeCase_ = 2;
onChanged();;
return timeToCutBuilder_;
}
private com.google.protobuf.SingleFieldBuilderV3<
org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageConnect, org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageConnect.Builder, org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageConnectOrBuilder> connectBuilder_;
/**
* .orderer.KafkaMessageConnect connect = 3;
* @return Whether the connect field is set.
*/
@java.lang.Override
public boolean hasConnect() {
return typeCase_ == 3;
}
/**
* .orderer.KafkaMessageConnect connect = 3;
* @return The connect.
*/
@java.lang.Override
public org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageConnect getConnect() {
if (connectBuilder_ == null) {
if (typeCase_ == 3) {
return (org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageConnect) type_;
}
return org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageConnect.getDefaultInstance();
} else {
if (typeCase_ == 3) {
return connectBuilder_.getMessage();
}
return org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageConnect.getDefaultInstance();
}
}
/**
* .orderer.KafkaMessageConnect connect = 3;
*/
public Builder setConnect(org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageConnect value) {
if (connectBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
type_ = value;
onChanged();
} else {
connectBuilder_.setMessage(value);
}
typeCase_ = 3;
return this;
}
/**
* .orderer.KafkaMessageConnect connect = 3;
*/
public Builder setConnect(
org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageConnect.Builder builderForValue) {
if (connectBuilder_ == null) {
type_ = builderForValue.build();
onChanged();
} else {
connectBuilder_.setMessage(builderForValue.build());
}
typeCase_ = 3;
return this;
}
/**
* .orderer.KafkaMessageConnect connect = 3;
*/
public Builder mergeConnect(org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageConnect value) {
if (connectBuilder_ == null) {
if (typeCase_ == 3 &&
type_ != org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageConnect.getDefaultInstance()) {
type_ = org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageConnect.newBuilder((org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageConnect) type_)
.mergeFrom(value).buildPartial();
} else {
type_ = value;
}
onChanged();
} else {
if (typeCase_ == 3) {
connectBuilder_.mergeFrom(value);
} else {
connectBuilder_.setMessage(value);
}
}
typeCase_ = 3;
return this;
}
/**
* .orderer.KafkaMessageConnect connect = 3;
*/
public Builder clearConnect() {
if (connectBuilder_ == null) {
if (typeCase_ == 3) {
typeCase_ = 0;
type_ = null;
onChanged();
}
} else {
if (typeCase_ == 3) {
typeCase_ = 0;
type_ = null;
}
connectBuilder_.clear();
}
return this;
}
/**
* .orderer.KafkaMessageConnect connect = 3;
*/
public org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageConnect.Builder getConnectBuilder() {
return getConnectFieldBuilder().getBuilder();
}
/**
* .orderer.KafkaMessageConnect connect = 3;
*/
@java.lang.Override
public org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageConnectOrBuilder getConnectOrBuilder() {
if ((typeCase_ == 3) && (connectBuilder_ != null)) {
return connectBuilder_.getMessageOrBuilder();
} else {
if (typeCase_ == 3) {
return (org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageConnect) type_;
}
return org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageConnect.getDefaultInstance();
}
}
/**
* .orderer.KafkaMessageConnect connect = 3;
*/
private com.google.protobuf.SingleFieldBuilderV3<
org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageConnect, org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageConnect.Builder, org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageConnectOrBuilder>
getConnectFieldBuilder() {
if (connectBuilder_ == null) {
if (!(typeCase_ == 3)) {
type_ = org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageConnect.getDefaultInstance();
}
connectBuilder_ = new com.google.protobuf.SingleFieldBuilderV3<
org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageConnect, org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageConnect.Builder, org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageConnectOrBuilder>(
(org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageConnect) type_,
getParentForChildren(),
isClean());
type_ = null;
}
typeCase_ = 3;
onChanged();;
return connectBuilder_;
}
@java.lang.Override
public final Builder setUnknownFields(
final com.google.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final com.google.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:orderer.KafkaMessage)
}
// @@protoc_insertion_point(class_scope:orderer.KafkaMessage)
private static final org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessage DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessage();
}
public static org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessage getDefaultInstance() {
return DEFAULT_INSTANCE;
}
private static final com.google.protobuf.Parser
PARSER = new com.google.protobuf.AbstractParser() {
@java.lang.Override
public KafkaMessage parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
Builder builder = newBuilder();
try {
builder.mergeFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(builder.buildPartial());
} catch (com.google.protobuf.UninitializedMessageException e) {
throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(e)
.setUnfinishedMessage(builder.buildPartial());
}
return builder.buildPartial();
}
};
public static com.google.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessage getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
public interface KafkaMessageRegularOrBuilder extends
// @@protoc_insertion_point(interface_extends:orderer.KafkaMessageRegular)
com.google.protobuf.MessageOrBuilder {
/**
* bytes payload = 1;
* @return The payload.
*/
com.google.protobuf.ByteString getPayload();
/**
* uint64 config_seq = 2;
* @return The configSeq.
*/
long getConfigSeq();
/**
* .orderer.KafkaMessageRegular.Class class = 3;
* @return The enum numeric value on the wire for class.
*/
int getClass_Value();
/**
* .orderer.KafkaMessageRegular.Class class = 3;
* @return The class.
*/
org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageRegular.Class getClass_();
/**
* int64 original_offset = 4;
* @return The originalOffset.
*/
long getOriginalOffset();
}
/**
*
* KafkaMessageRegular wraps a marshalled envelope.
*
*
* Protobuf type {@code orderer.KafkaMessageRegular}
*/
public static final class KafkaMessageRegular extends
com.google.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:orderer.KafkaMessageRegular)
KafkaMessageRegularOrBuilder {
private static final long serialVersionUID = 0L;
// Use KafkaMessageRegular.newBuilder() to construct.
private KafkaMessageRegular(com.google.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private KafkaMessageRegular() {
payload_ = com.google.protobuf.ByteString.EMPTY;
class__ = 0;
}
@java.lang.Override
@SuppressWarnings({"unused"})
protected java.lang.Object newInstance(
UnusedPrivateParameter unused) {
return new KafkaMessageRegular();
}
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.hyperledger.fabric.protos.orderer.Kafka.internal_static_orderer_KafkaMessageRegular_descriptor;
}
@java.lang.Override
protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.hyperledger.fabric.protos.orderer.Kafka.internal_static_orderer_KafkaMessageRegular_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageRegular.class, org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageRegular.Builder.class);
}
/**
* Protobuf enum {@code orderer.KafkaMessageRegular.Class}
*/
public enum Class
implements com.google.protobuf.ProtocolMessageEnum {
/**
* UNKNOWN = 0;
*/
UNKNOWN(0),
/**
* NORMAL = 1;
*/
NORMAL(1),
/**
* CONFIG = 2;
*/
CONFIG(2),
UNRECOGNIZED(-1),
;
/**
* UNKNOWN = 0;
*/
public static final int UNKNOWN_VALUE = 0;
/**
* NORMAL = 1;
*/
public static final int NORMAL_VALUE = 1;
/**
* CONFIG = 2;
*/
public static final int CONFIG_VALUE = 2;
public final int getNumber() {
if (this == UNRECOGNIZED) {
throw new java.lang.IllegalArgumentException(
"Can't get the number of an unknown enum value.");
}
return value;
}
/**
* @param value The numeric wire value of the corresponding enum entry.
* @return The enum associated with the given numeric wire value.
* @deprecated Use {@link #forNumber(int)} instead.
*/
@java.lang.Deprecated
public static Class valueOf(int value) {
return forNumber(value);
}
/**
* @param value The numeric wire value of the corresponding enum entry.
* @return The enum associated with the given numeric wire value.
*/
public static Class forNumber(int value) {
switch (value) {
case 0: return UNKNOWN;
case 1: return NORMAL;
case 2: return CONFIG;
default: return null;
}
}
public static com.google.protobuf.Internal.EnumLiteMap
internalGetValueMap() {
return internalValueMap;
}
private static final com.google.protobuf.Internal.EnumLiteMap<
Class> internalValueMap =
new com.google.protobuf.Internal.EnumLiteMap() {
public Class findValueByNumber(int number) {
return Class.forNumber(number);
}
};
public final com.google.protobuf.Descriptors.EnumValueDescriptor
getValueDescriptor() {
if (this == UNRECOGNIZED) {
throw new java.lang.IllegalStateException(
"Can't get the descriptor of an unrecognized enum value.");
}
return getDescriptor().getValues().get(ordinal());
}
public final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptorForType() {
return getDescriptor();
}
public static final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptor() {
return org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageRegular.getDescriptor().getEnumTypes().get(0);
}
private static final Class[] VALUES = values();
public static Class valueOf(
com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
if (desc.getType() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"EnumValueDescriptor is not for this type.");
}
if (desc.getIndex() == -1) {
return UNRECOGNIZED;
}
return VALUES[desc.getIndex()];
}
private final int value;
private Class(int value) {
this.value = value;
}
// @@protoc_insertion_point(enum_scope:orderer.KafkaMessageRegular.Class)
}
public static final int PAYLOAD_FIELD_NUMBER = 1;
private com.google.protobuf.ByteString payload_;
/**
* bytes payload = 1;
* @return The payload.
*/
@java.lang.Override
public com.google.protobuf.ByteString getPayload() {
return payload_;
}
public static final int CONFIG_SEQ_FIELD_NUMBER = 2;
private long configSeq_;
/**
* uint64 config_seq = 2;
* @return The configSeq.
*/
@java.lang.Override
public long getConfigSeq() {
return configSeq_;
}
public static final int CLASS_FIELD_NUMBER = 3;
private int class__;
/**
* .orderer.KafkaMessageRegular.Class class = 3;
* @return The enum numeric value on the wire for class.
*/
@java.lang.Override public int getClass_Value() {
return class__;
}
/**
* .orderer.KafkaMessageRegular.Class class = 3;
* @return The class.
*/
@java.lang.Override public org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageRegular.Class getClass_() {
@SuppressWarnings("deprecation")
org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageRegular.Class result = org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageRegular.Class.valueOf(class__);
return result == null ? org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageRegular.Class.UNRECOGNIZED : result;
}
public static final int ORIGINAL_OFFSET_FIELD_NUMBER = 4;
private long originalOffset_;
/**
* int64 original_offset = 4;
* @return The originalOffset.
*/
@java.lang.Override
public long getOriginalOffset() {
return originalOffset_;
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
if (!payload_.isEmpty()) {
output.writeBytes(1, payload_);
}
if (configSeq_ != 0L) {
output.writeUInt64(2, configSeq_);
}
if (class__ != org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageRegular.Class.UNKNOWN.getNumber()) {
output.writeEnum(3, class__);
}
if (originalOffset_ != 0L) {
output.writeInt64(4, originalOffset_);
}
getUnknownFields().writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
if (!payload_.isEmpty()) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(1, payload_);
}
if (configSeq_ != 0L) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(2, configSeq_);
}
if (class__ != org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageRegular.Class.UNKNOWN.getNumber()) {
size += com.google.protobuf.CodedOutputStream
.computeEnumSize(3, class__);
}
if (originalOffset_ != 0L) {
size += com.google.protobuf.CodedOutputStream
.computeInt64Size(4, originalOffset_);
}
size += getUnknownFields().getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageRegular)) {
return super.equals(obj);
}
org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageRegular other = (org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageRegular) obj;
if (!getPayload()
.equals(other.getPayload())) return false;
if (getConfigSeq()
!= other.getConfigSeq()) return false;
if (class__ != other.class__) return false;
if (getOriginalOffset()
!= other.getOriginalOffset()) return false;
if (!getUnknownFields().equals(other.getUnknownFields())) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
hash = (37 * hash) + PAYLOAD_FIELD_NUMBER;
hash = (53 * hash) + getPayload().hashCode();
hash = (37 * hash) + CONFIG_SEQ_FIELD_NUMBER;
hash = (53 * hash) + com.google.protobuf.Internal.hashLong(
getConfigSeq());
hash = (37 * hash) + CLASS_FIELD_NUMBER;
hash = (53 * hash) + class__;
hash = (37 * hash) + ORIGINAL_OFFSET_FIELD_NUMBER;
hash = (53 * hash) + com.google.protobuf.Internal.hashLong(
getOriginalOffset());
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageRegular parseFrom(
java.nio.ByteBuffer data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageRegular parseFrom(
java.nio.ByteBuffer data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageRegular parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageRegular parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageRegular parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageRegular parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageRegular parseFrom(java.io.InputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageRegular parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageRegular parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageRegular parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageRegular parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageRegular parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageRegular prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
*
* KafkaMessageRegular wraps a marshalled envelope.
*
*
* Protobuf type {@code orderer.KafkaMessageRegular}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:orderer.KafkaMessageRegular)
org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageRegularOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.hyperledger.fabric.protos.orderer.Kafka.internal_static_orderer_KafkaMessageRegular_descriptor;
}
@java.lang.Override
protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.hyperledger.fabric.protos.orderer.Kafka.internal_static_orderer_KafkaMessageRegular_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageRegular.class, org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageRegular.Builder.class);
}
// Construct using org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageRegular.newBuilder()
private Builder() {
}
private Builder(
com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
}
@java.lang.Override
public Builder clear() {
super.clear();
payload_ = com.google.protobuf.ByteString.EMPTY;
configSeq_ = 0L;
class__ = 0;
originalOffset_ = 0L;
return this;
}
@java.lang.Override
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.hyperledger.fabric.protos.orderer.Kafka.internal_static_orderer_KafkaMessageRegular_descriptor;
}
@java.lang.Override
public org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageRegular getDefaultInstanceForType() {
return org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageRegular.getDefaultInstance();
}
@java.lang.Override
public org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageRegular build() {
org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageRegular result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageRegular buildPartial() {
org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageRegular result = new org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageRegular(this);
result.payload_ = payload_;
result.configSeq_ = configSeq_;
result.class__ = class__;
result.originalOffset_ = originalOffset_;
onBuilt();
return result;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
com.google.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
com.google.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
com.google.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
com.google.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
com.google.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageRegular) {
return mergeFrom((org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageRegular)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageRegular other) {
if (other == org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageRegular.getDefaultInstance()) return this;
if (other.getPayload() != com.google.protobuf.ByteString.EMPTY) {
setPayload(other.getPayload());
}
if (other.getConfigSeq() != 0L) {
setConfigSeq(other.getConfigSeq());
}
if (other.class__ != 0) {
setClass_Value(other.getClass_Value());
}
if (other.getOriginalOffset() != 0L) {
setOriginalOffset(other.getOriginalOffset());
}
this.mergeUnknownFields(other.getUnknownFields());
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
return true;
}
@java.lang.Override
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 10: {
payload_ = input.readBytes();
break;
} // case 10
case 16: {
configSeq_ = input.readUInt64();
break;
} // case 16
case 24: {
class__ = input.readEnum();
break;
} // case 24
case 32: {
originalOffset_ = input.readInt64();
break;
} // case 32
default: {
if (!super.parseUnknownField(input, extensionRegistry, tag)) {
done = true; // was an endgroup tag
}
break;
} // default:
} // switch (tag)
} // while (!done)
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.unwrapIOException();
} finally {
onChanged();
} // finally
return this;
}
private com.google.protobuf.ByteString payload_ = com.google.protobuf.ByteString.EMPTY;
/**
* bytes payload = 1;
* @return The payload.
*/
@java.lang.Override
public com.google.protobuf.ByteString getPayload() {
return payload_;
}
/**
* bytes payload = 1;
* @param value The payload to set.
* @return This builder for chaining.
*/
public Builder setPayload(com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
payload_ = value;
onChanged();
return this;
}
/**
* bytes payload = 1;
* @return This builder for chaining.
*/
public Builder clearPayload() {
payload_ = getDefaultInstance().getPayload();
onChanged();
return this;
}
private long configSeq_ ;
/**
* uint64 config_seq = 2;
* @return The configSeq.
*/
@java.lang.Override
public long getConfigSeq() {
return configSeq_;
}
/**
* uint64 config_seq = 2;
* @param value The configSeq to set.
* @return This builder for chaining.
*/
public Builder setConfigSeq(long value) {
configSeq_ = value;
onChanged();
return this;
}
/**
* uint64 config_seq = 2;
* @return This builder for chaining.
*/
public Builder clearConfigSeq() {
configSeq_ = 0L;
onChanged();
return this;
}
private int class__ = 0;
/**
* .orderer.KafkaMessageRegular.Class class = 3;
* @return The enum numeric value on the wire for class.
*/
@java.lang.Override public int getClass_Value() {
return class__;
}
/**
* .orderer.KafkaMessageRegular.Class class = 3;
* @param value The enum numeric value on the wire for class to set.
* @return This builder for chaining.
*/
public Builder setClass_Value(int value) {
class__ = value;
onChanged();
return this;
}
/**
* .orderer.KafkaMessageRegular.Class class = 3;
* @return The class.
*/
@java.lang.Override
public org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageRegular.Class getClass_() {
@SuppressWarnings("deprecation")
org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageRegular.Class result = org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageRegular.Class.valueOf(class__);
return result == null ? org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageRegular.Class.UNRECOGNIZED : result;
}
/**
* .orderer.KafkaMessageRegular.Class class = 3;
* @param value The class to set.
* @return This builder for chaining.
*/
public Builder setClass_(org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageRegular.Class value) {
if (value == null) {
throw new NullPointerException();
}
class__ = value.getNumber();
onChanged();
return this;
}
/**
* .orderer.KafkaMessageRegular.Class class = 3;
* @return This builder for chaining.
*/
public Builder clearClass_() {
class__ = 0;
onChanged();
return this;
}
private long originalOffset_ ;
/**
* int64 original_offset = 4;
* @return The originalOffset.
*/
@java.lang.Override
public long getOriginalOffset() {
return originalOffset_;
}
/**
* int64 original_offset = 4;
* @param value The originalOffset to set.
* @return This builder for chaining.
*/
public Builder setOriginalOffset(long value) {
originalOffset_ = value;
onChanged();
return this;
}
/**
* int64 original_offset = 4;
* @return This builder for chaining.
*/
public Builder clearOriginalOffset() {
originalOffset_ = 0L;
onChanged();
return this;
}
@java.lang.Override
public final Builder setUnknownFields(
final com.google.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final com.google.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:orderer.KafkaMessageRegular)
}
// @@protoc_insertion_point(class_scope:orderer.KafkaMessageRegular)
private static final org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageRegular DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageRegular();
}
public static org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageRegular getDefaultInstance() {
return DEFAULT_INSTANCE;
}
private static final com.google.protobuf.Parser
PARSER = new com.google.protobuf.AbstractParser() {
@java.lang.Override
public KafkaMessageRegular parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
Builder builder = newBuilder();
try {
builder.mergeFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(builder.buildPartial());
} catch (com.google.protobuf.UninitializedMessageException e) {
throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(e)
.setUnfinishedMessage(builder.buildPartial());
}
return builder.buildPartial();
}
};
public static com.google.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageRegular getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
public interface KafkaMessageTimeToCutOrBuilder extends
// @@protoc_insertion_point(interface_extends:orderer.KafkaMessageTimeToCut)
com.google.protobuf.MessageOrBuilder {
/**
* uint64 block_number = 1;
* @return The blockNumber.
*/
long getBlockNumber();
}
/**
*
* KafkaMessageTimeToCut is used to signal to the orderers
* that it is time to cut block <block_number>.
*
*
* Protobuf type {@code orderer.KafkaMessageTimeToCut}
*/
public static final class KafkaMessageTimeToCut extends
com.google.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:orderer.KafkaMessageTimeToCut)
KafkaMessageTimeToCutOrBuilder {
private static final long serialVersionUID = 0L;
// Use KafkaMessageTimeToCut.newBuilder() to construct.
private KafkaMessageTimeToCut(com.google.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private KafkaMessageTimeToCut() {
}
@java.lang.Override
@SuppressWarnings({"unused"})
protected java.lang.Object newInstance(
UnusedPrivateParameter unused) {
return new KafkaMessageTimeToCut();
}
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.hyperledger.fabric.protos.orderer.Kafka.internal_static_orderer_KafkaMessageTimeToCut_descriptor;
}
@java.lang.Override
protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.hyperledger.fabric.protos.orderer.Kafka.internal_static_orderer_KafkaMessageTimeToCut_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageTimeToCut.class, org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageTimeToCut.Builder.class);
}
public static final int BLOCK_NUMBER_FIELD_NUMBER = 1;
private long blockNumber_;
/**
* uint64 block_number = 1;
* @return The blockNumber.
*/
@java.lang.Override
public long getBlockNumber() {
return blockNumber_;
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
if (blockNumber_ != 0L) {
output.writeUInt64(1, blockNumber_);
}
getUnknownFields().writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
if (blockNumber_ != 0L) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(1, blockNumber_);
}
size += getUnknownFields().getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageTimeToCut)) {
return super.equals(obj);
}
org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageTimeToCut other = (org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageTimeToCut) obj;
if (getBlockNumber()
!= other.getBlockNumber()) return false;
if (!getUnknownFields().equals(other.getUnknownFields())) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
hash = (37 * hash) + BLOCK_NUMBER_FIELD_NUMBER;
hash = (53 * hash) + com.google.protobuf.Internal.hashLong(
getBlockNumber());
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageTimeToCut parseFrom(
java.nio.ByteBuffer data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageTimeToCut parseFrom(
java.nio.ByteBuffer data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageTimeToCut parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageTimeToCut parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageTimeToCut parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageTimeToCut parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageTimeToCut parseFrom(java.io.InputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageTimeToCut parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageTimeToCut parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageTimeToCut parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageTimeToCut parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageTimeToCut parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageTimeToCut prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
*
* KafkaMessageTimeToCut is used to signal to the orderers
* that it is time to cut block <block_number>.
*
*
* Protobuf type {@code orderer.KafkaMessageTimeToCut}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:orderer.KafkaMessageTimeToCut)
org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageTimeToCutOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.hyperledger.fabric.protos.orderer.Kafka.internal_static_orderer_KafkaMessageTimeToCut_descriptor;
}
@java.lang.Override
protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.hyperledger.fabric.protos.orderer.Kafka.internal_static_orderer_KafkaMessageTimeToCut_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageTimeToCut.class, org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageTimeToCut.Builder.class);
}
// Construct using org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageTimeToCut.newBuilder()
private Builder() {
}
private Builder(
com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
}
@java.lang.Override
public Builder clear() {
super.clear();
blockNumber_ = 0L;
return this;
}
@java.lang.Override
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.hyperledger.fabric.protos.orderer.Kafka.internal_static_orderer_KafkaMessageTimeToCut_descriptor;
}
@java.lang.Override
public org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageTimeToCut getDefaultInstanceForType() {
return org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageTimeToCut.getDefaultInstance();
}
@java.lang.Override
public org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageTimeToCut build() {
org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageTimeToCut result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageTimeToCut buildPartial() {
org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageTimeToCut result = new org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageTimeToCut(this);
result.blockNumber_ = blockNumber_;
onBuilt();
return result;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
com.google.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
com.google.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
com.google.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
com.google.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
com.google.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageTimeToCut) {
return mergeFrom((org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageTimeToCut)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageTimeToCut other) {
if (other == org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageTimeToCut.getDefaultInstance()) return this;
if (other.getBlockNumber() != 0L) {
setBlockNumber(other.getBlockNumber());
}
this.mergeUnknownFields(other.getUnknownFields());
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
return true;
}
@java.lang.Override
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 8: {
blockNumber_ = input.readUInt64();
break;
} // case 8
default: {
if (!super.parseUnknownField(input, extensionRegistry, tag)) {
done = true; // was an endgroup tag
}
break;
} // default:
} // switch (tag)
} // while (!done)
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.unwrapIOException();
} finally {
onChanged();
} // finally
return this;
}
private long blockNumber_ ;
/**
* uint64 block_number = 1;
* @return The blockNumber.
*/
@java.lang.Override
public long getBlockNumber() {
return blockNumber_;
}
/**
* uint64 block_number = 1;
* @param value The blockNumber to set.
* @return This builder for chaining.
*/
public Builder setBlockNumber(long value) {
blockNumber_ = value;
onChanged();
return this;
}
/**
* uint64 block_number = 1;
* @return This builder for chaining.
*/
public Builder clearBlockNumber() {
blockNumber_ = 0L;
onChanged();
return this;
}
@java.lang.Override
public final Builder setUnknownFields(
final com.google.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final com.google.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:orderer.KafkaMessageTimeToCut)
}
// @@protoc_insertion_point(class_scope:orderer.KafkaMessageTimeToCut)
private static final org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageTimeToCut DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageTimeToCut();
}
public static org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageTimeToCut getDefaultInstance() {
return DEFAULT_INSTANCE;
}
private static final com.google.protobuf.Parser
PARSER = new com.google.protobuf.AbstractParser() {
@java.lang.Override
public KafkaMessageTimeToCut parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
Builder builder = newBuilder();
try {
builder.mergeFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(builder.buildPartial());
} catch (com.google.protobuf.UninitializedMessageException e) {
throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(e)
.setUnfinishedMessage(builder.buildPartial());
}
return builder.buildPartial();
}
};
public static com.google.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageTimeToCut getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
public interface KafkaMessageConnectOrBuilder extends
// @@protoc_insertion_point(interface_extends:orderer.KafkaMessageConnect)
com.google.protobuf.MessageOrBuilder {
/**
* bytes payload = 1;
* @return The payload.
*/
com.google.protobuf.ByteString getPayload();
}
/**
*
* KafkaMessageConnect is posted by an orderer upon booting up.
* It is used to prevent the panic that would be caused if we
* were to consume an empty partition. It is ignored by all
* orderers when processing the partition.
*
*
* Protobuf type {@code orderer.KafkaMessageConnect}
*/
public static final class KafkaMessageConnect extends
com.google.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:orderer.KafkaMessageConnect)
KafkaMessageConnectOrBuilder {
private static final long serialVersionUID = 0L;
// Use KafkaMessageConnect.newBuilder() to construct.
private KafkaMessageConnect(com.google.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private KafkaMessageConnect() {
payload_ = com.google.protobuf.ByteString.EMPTY;
}
@java.lang.Override
@SuppressWarnings({"unused"})
protected java.lang.Object newInstance(
UnusedPrivateParameter unused) {
return new KafkaMessageConnect();
}
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.hyperledger.fabric.protos.orderer.Kafka.internal_static_orderer_KafkaMessageConnect_descriptor;
}
@java.lang.Override
protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.hyperledger.fabric.protos.orderer.Kafka.internal_static_orderer_KafkaMessageConnect_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageConnect.class, org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageConnect.Builder.class);
}
public static final int PAYLOAD_FIELD_NUMBER = 1;
private com.google.protobuf.ByteString payload_;
/**
* bytes payload = 1;
* @return The payload.
*/
@java.lang.Override
public com.google.protobuf.ByteString getPayload() {
return payload_;
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
if (!payload_.isEmpty()) {
output.writeBytes(1, payload_);
}
getUnknownFields().writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
if (!payload_.isEmpty()) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(1, payload_);
}
size += getUnknownFields().getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageConnect)) {
return super.equals(obj);
}
org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageConnect other = (org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageConnect) obj;
if (!getPayload()
.equals(other.getPayload())) return false;
if (!getUnknownFields().equals(other.getUnknownFields())) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
hash = (37 * hash) + PAYLOAD_FIELD_NUMBER;
hash = (53 * hash) + getPayload().hashCode();
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageConnect parseFrom(
java.nio.ByteBuffer data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageConnect parseFrom(
java.nio.ByteBuffer data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageConnect parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageConnect parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageConnect parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageConnect parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageConnect parseFrom(java.io.InputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageConnect parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageConnect parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageConnect parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageConnect parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageConnect parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageConnect prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
*
* KafkaMessageConnect is posted by an orderer upon booting up.
* It is used to prevent the panic that would be caused if we
* were to consume an empty partition. It is ignored by all
* orderers when processing the partition.
*
*
* Protobuf type {@code orderer.KafkaMessageConnect}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:orderer.KafkaMessageConnect)
org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageConnectOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.hyperledger.fabric.protos.orderer.Kafka.internal_static_orderer_KafkaMessageConnect_descriptor;
}
@java.lang.Override
protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.hyperledger.fabric.protos.orderer.Kafka.internal_static_orderer_KafkaMessageConnect_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageConnect.class, org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageConnect.Builder.class);
}
// Construct using org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageConnect.newBuilder()
private Builder() {
}
private Builder(
com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
}
@java.lang.Override
public Builder clear() {
super.clear();
payload_ = com.google.protobuf.ByteString.EMPTY;
return this;
}
@java.lang.Override
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.hyperledger.fabric.protos.orderer.Kafka.internal_static_orderer_KafkaMessageConnect_descriptor;
}
@java.lang.Override
public org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageConnect getDefaultInstanceForType() {
return org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageConnect.getDefaultInstance();
}
@java.lang.Override
public org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageConnect build() {
org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageConnect result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageConnect buildPartial() {
org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageConnect result = new org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageConnect(this);
result.payload_ = payload_;
onBuilt();
return result;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
com.google.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
com.google.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
com.google.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
com.google.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
com.google.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageConnect) {
return mergeFrom((org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageConnect)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageConnect other) {
if (other == org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageConnect.getDefaultInstance()) return this;
if (other.getPayload() != com.google.protobuf.ByteString.EMPTY) {
setPayload(other.getPayload());
}
this.mergeUnknownFields(other.getUnknownFields());
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
return true;
}
@java.lang.Override
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 10: {
payload_ = input.readBytes();
break;
} // case 10
default: {
if (!super.parseUnknownField(input, extensionRegistry, tag)) {
done = true; // was an endgroup tag
}
break;
} // default:
} // switch (tag)
} // while (!done)
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.unwrapIOException();
} finally {
onChanged();
} // finally
return this;
}
private com.google.protobuf.ByteString payload_ = com.google.protobuf.ByteString.EMPTY;
/**
* bytes payload = 1;
* @return The payload.
*/
@java.lang.Override
public com.google.protobuf.ByteString getPayload() {
return payload_;
}
/**
* bytes payload = 1;
* @param value The payload to set.
* @return This builder for chaining.
*/
public Builder setPayload(com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
payload_ = value;
onChanged();
return this;
}
/**
* bytes payload = 1;
* @return This builder for chaining.
*/
public Builder clearPayload() {
payload_ = getDefaultInstance().getPayload();
onChanged();
return this;
}
@java.lang.Override
public final Builder setUnknownFields(
final com.google.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final com.google.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:orderer.KafkaMessageConnect)
}
// @@protoc_insertion_point(class_scope:orderer.KafkaMessageConnect)
private static final org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageConnect DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageConnect();
}
public static org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageConnect getDefaultInstance() {
return DEFAULT_INSTANCE;
}
private static final com.google.protobuf.Parser
PARSER = new com.google.protobuf.AbstractParser() {
@java.lang.Override
public KafkaMessageConnect parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
Builder builder = newBuilder();
try {
builder.mergeFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(builder.buildPartial());
} catch (com.google.protobuf.UninitializedMessageException e) {
throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(e)
.setUnfinishedMessage(builder.buildPartial());
}
return builder.buildPartial();
}
};
public static com.google.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public org.hyperledger.fabric.protos.orderer.Kafka.KafkaMessageConnect getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
public interface KafkaMetadataOrBuilder extends
// @@protoc_insertion_point(interface_extends:orderer.KafkaMetadata)
com.google.protobuf.MessageOrBuilder {
/**
*
* LastOffsetPersisted is the encoded value for the Metadata message
* which is encoded in the ORDERER block metadata index for the case
* of the Kafka-based orderer.
*
*
* int64 last_offset_persisted = 1;
* @return The lastOffsetPersisted.
*/
long getLastOffsetPersisted();
/**
*
* LastOriginalOffsetProcessed is used to keep track of the newest
* offset processed if a message is re-validated and re-ordered.
* This value is used to deduplicate re-submitted messages from
* multiple orderer so that we don't bother re-processing it again.
*
*
* int64 last_original_offset_processed = 2;
* @return The lastOriginalOffsetProcessed.
*/
long getLastOriginalOffsetProcessed();
/**
*
* LastResubmittedConfigOffset is used to capture the newest offset of
* CONFIG kafka message, which is revalidated and resubmitted. By comparing
* this with LastOriginalOffsetProcessed, we could detemine whether there
* are still CONFIG messages that have been resubmitted but NOT processed
* yet. It's used as condition to block ingress messages, so we could reduce
* the overhead of repeatedly resubmitting messages as config seq keeps
* advancing.
*
*
* int64 last_resubmitted_config_offset = 3;
* @return The lastResubmittedConfigOffset.
*/
long getLastResubmittedConfigOffset();
}
/**
*
* KafkaMetadata is encoded into the ORDERER block to keep track of
* Kafka-related metadata associated with this block.
*
*
* Protobuf type {@code orderer.KafkaMetadata}
*/
public static final class KafkaMetadata extends
com.google.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:orderer.KafkaMetadata)
KafkaMetadataOrBuilder {
private static final long serialVersionUID = 0L;
// Use KafkaMetadata.newBuilder() to construct.
private KafkaMetadata(com.google.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private KafkaMetadata() {
}
@java.lang.Override
@SuppressWarnings({"unused"})
protected java.lang.Object newInstance(
UnusedPrivateParameter unused) {
return new KafkaMetadata();
}
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.hyperledger.fabric.protos.orderer.Kafka.internal_static_orderer_KafkaMetadata_descriptor;
}
@java.lang.Override
protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.hyperledger.fabric.protos.orderer.Kafka.internal_static_orderer_KafkaMetadata_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.hyperledger.fabric.protos.orderer.Kafka.KafkaMetadata.class, org.hyperledger.fabric.protos.orderer.Kafka.KafkaMetadata.Builder.class);
}
public static final int LAST_OFFSET_PERSISTED_FIELD_NUMBER = 1;
private long lastOffsetPersisted_;
/**
*
* LastOffsetPersisted is the encoded value for the Metadata message
* which is encoded in the ORDERER block metadata index for the case
* of the Kafka-based orderer.
*
*
* int64 last_offset_persisted = 1;
* @return The lastOffsetPersisted.
*/
@java.lang.Override
public long getLastOffsetPersisted() {
return lastOffsetPersisted_;
}
public static final int LAST_ORIGINAL_OFFSET_PROCESSED_FIELD_NUMBER = 2;
private long lastOriginalOffsetProcessed_;
/**
*
* LastOriginalOffsetProcessed is used to keep track of the newest
* offset processed if a message is re-validated and re-ordered.
* This value is used to deduplicate re-submitted messages from
* multiple orderer so that we don't bother re-processing it again.
*
*
* int64 last_original_offset_processed = 2;
* @return The lastOriginalOffsetProcessed.
*/
@java.lang.Override
public long getLastOriginalOffsetProcessed() {
return lastOriginalOffsetProcessed_;
}
public static final int LAST_RESUBMITTED_CONFIG_OFFSET_FIELD_NUMBER = 3;
private long lastResubmittedConfigOffset_;
/**
*
* LastResubmittedConfigOffset is used to capture the newest offset of
* CONFIG kafka message, which is revalidated and resubmitted. By comparing
* this with LastOriginalOffsetProcessed, we could detemine whether there
* are still CONFIG messages that have been resubmitted but NOT processed
* yet. It's used as condition to block ingress messages, so we could reduce
* the overhead of repeatedly resubmitting messages as config seq keeps
* advancing.
*
*
* int64 last_resubmitted_config_offset = 3;
* @return The lastResubmittedConfigOffset.
*/
@java.lang.Override
public long getLastResubmittedConfigOffset() {
return lastResubmittedConfigOffset_;
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
if (lastOffsetPersisted_ != 0L) {
output.writeInt64(1, lastOffsetPersisted_);
}
if (lastOriginalOffsetProcessed_ != 0L) {
output.writeInt64(2, lastOriginalOffsetProcessed_);
}
if (lastResubmittedConfigOffset_ != 0L) {
output.writeInt64(3, lastResubmittedConfigOffset_);
}
getUnknownFields().writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
if (lastOffsetPersisted_ != 0L) {
size += com.google.protobuf.CodedOutputStream
.computeInt64Size(1, lastOffsetPersisted_);
}
if (lastOriginalOffsetProcessed_ != 0L) {
size += com.google.protobuf.CodedOutputStream
.computeInt64Size(2, lastOriginalOffsetProcessed_);
}
if (lastResubmittedConfigOffset_ != 0L) {
size += com.google.protobuf.CodedOutputStream
.computeInt64Size(3, lastResubmittedConfigOffset_);
}
size += getUnknownFields().getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.hyperledger.fabric.protos.orderer.Kafka.KafkaMetadata)) {
return super.equals(obj);
}
org.hyperledger.fabric.protos.orderer.Kafka.KafkaMetadata other = (org.hyperledger.fabric.protos.orderer.Kafka.KafkaMetadata) obj;
if (getLastOffsetPersisted()
!= other.getLastOffsetPersisted()) return false;
if (getLastOriginalOffsetProcessed()
!= other.getLastOriginalOffsetProcessed()) return false;
if (getLastResubmittedConfigOffset()
!= other.getLastResubmittedConfigOffset()) return false;
if (!getUnknownFields().equals(other.getUnknownFields())) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
hash = (37 * hash) + LAST_OFFSET_PERSISTED_FIELD_NUMBER;
hash = (53 * hash) + com.google.protobuf.Internal.hashLong(
getLastOffsetPersisted());
hash = (37 * hash) + LAST_ORIGINAL_OFFSET_PROCESSED_FIELD_NUMBER;
hash = (53 * hash) + com.google.protobuf.Internal.hashLong(
getLastOriginalOffsetProcessed());
hash = (37 * hash) + LAST_RESUBMITTED_CONFIG_OFFSET_FIELD_NUMBER;
hash = (53 * hash) + com.google.protobuf.Internal.hashLong(
getLastResubmittedConfigOffset());
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.hyperledger.fabric.protos.orderer.Kafka.KafkaMetadata parseFrom(
java.nio.ByteBuffer data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.hyperledger.fabric.protos.orderer.Kafka.KafkaMetadata parseFrom(
java.nio.ByteBuffer data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.hyperledger.fabric.protos.orderer.Kafka.KafkaMetadata parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.hyperledger.fabric.protos.orderer.Kafka.KafkaMetadata parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.hyperledger.fabric.protos.orderer.Kafka.KafkaMetadata parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.hyperledger.fabric.protos.orderer.Kafka.KafkaMetadata parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.hyperledger.fabric.protos.orderer.Kafka.KafkaMetadata parseFrom(java.io.InputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.hyperledger.fabric.protos.orderer.Kafka.KafkaMetadata parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static org.hyperledger.fabric.protos.orderer.Kafka.KafkaMetadata parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static org.hyperledger.fabric.protos.orderer.Kafka.KafkaMetadata parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static org.hyperledger.fabric.protos.orderer.Kafka.KafkaMetadata parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.hyperledger.fabric.protos.orderer.Kafka.KafkaMetadata parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(org.hyperledger.fabric.protos.orderer.Kafka.KafkaMetadata prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
*
* KafkaMetadata is encoded into the ORDERER block to keep track of
* Kafka-related metadata associated with this block.
*
*
* Protobuf type {@code orderer.KafkaMetadata}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:orderer.KafkaMetadata)
org.hyperledger.fabric.protos.orderer.Kafka.KafkaMetadataOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.hyperledger.fabric.protos.orderer.Kafka.internal_static_orderer_KafkaMetadata_descriptor;
}
@java.lang.Override
protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.hyperledger.fabric.protos.orderer.Kafka.internal_static_orderer_KafkaMetadata_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.hyperledger.fabric.protos.orderer.Kafka.KafkaMetadata.class, org.hyperledger.fabric.protos.orderer.Kafka.KafkaMetadata.Builder.class);
}
// Construct using org.hyperledger.fabric.protos.orderer.Kafka.KafkaMetadata.newBuilder()
private Builder() {
}
private Builder(
com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
}
@java.lang.Override
public Builder clear() {
super.clear();
lastOffsetPersisted_ = 0L;
lastOriginalOffsetProcessed_ = 0L;
lastResubmittedConfigOffset_ = 0L;
return this;
}
@java.lang.Override
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.hyperledger.fabric.protos.orderer.Kafka.internal_static_orderer_KafkaMetadata_descriptor;
}
@java.lang.Override
public org.hyperledger.fabric.protos.orderer.Kafka.KafkaMetadata getDefaultInstanceForType() {
return org.hyperledger.fabric.protos.orderer.Kafka.KafkaMetadata.getDefaultInstance();
}
@java.lang.Override
public org.hyperledger.fabric.protos.orderer.Kafka.KafkaMetadata build() {
org.hyperledger.fabric.protos.orderer.Kafka.KafkaMetadata result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public org.hyperledger.fabric.protos.orderer.Kafka.KafkaMetadata buildPartial() {
org.hyperledger.fabric.protos.orderer.Kafka.KafkaMetadata result = new org.hyperledger.fabric.protos.orderer.Kafka.KafkaMetadata(this);
result.lastOffsetPersisted_ = lastOffsetPersisted_;
result.lastOriginalOffsetProcessed_ = lastOriginalOffsetProcessed_;
result.lastResubmittedConfigOffset_ = lastResubmittedConfigOffset_;
onBuilt();
return result;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
com.google.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
com.google.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
com.google.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
com.google.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
com.google.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.hyperledger.fabric.protos.orderer.Kafka.KafkaMetadata) {
return mergeFrom((org.hyperledger.fabric.protos.orderer.Kafka.KafkaMetadata)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.hyperledger.fabric.protos.orderer.Kafka.KafkaMetadata other) {
if (other == org.hyperledger.fabric.protos.orderer.Kafka.KafkaMetadata.getDefaultInstance()) return this;
if (other.getLastOffsetPersisted() != 0L) {
setLastOffsetPersisted(other.getLastOffsetPersisted());
}
if (other.getLastOriginalOffsetProcessed() != 0L) {
setLastOriginalOffsetProcessed(other.getLastOriginalOffsetProcessed());
}
if (other.getLastResubmittedConfigOffset() != 0L) {
setLastResubmittedConfigOffset(other.getLastResubmittedConfigOffset());
}
this.mergeUnknownFields(other.getUnknownFields());
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
return true;
}
@java.lang.Override
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 8: {
lastOffsetPersisted_ = input.readInt64();
break;
} // case 8
case 16: {
lastOriginalOffsetProcessed_ = input.readInt64();
break;
} // case 16
case 24: {
lastResubmittedConfigOffset_ = input.readInt64();
break;
} // case 24
default: {
if (!super.parseUnknownField(input, extensionRegistry, tag)) {
done = true; // was an endgroup tag
}
break;
} // default:
} // switch (tag)
} // while (!done)
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.unwrapIOException();
} finally {
onChanged();
} // finally
return this;
}
private long lastOffsetPersisted_ ;
/**
*
* LastOffsetPersisted is the encoded value for the Metadata message
* which is encoded in the ORDERER block metadata index for the case
* of the Kafka-based orderer.
*
*
* int64 last_offset_persisted = 1;
* @return The lastOffsetPersisted.
*/
@java.lang.Override
public long getLastOffsetPersisted() {
return lastOffsetPersisted_;
}
/**
*
* LastOffsetPersisted is the encoded value for the Metadata message
* which is encoded in the ORDERER block metadata index for the case
* of the Kafka-based orderer.
*
*
* int64 last_offset_persisted = 1;
* @param value The lastOffsetPersisted to set.
* @return This builder for chaining.
*/
public Builder setLastOffsetPersisted(long value) {
lastOffsetPersisted_ = value;
onChanged();
return this;
}
/**
*
* LastOffsetPersisted is the encoded value for the Metadata message
* which is encoded in the ORDERER block metadata index for the case
* of the Kafka-based orderer.
*
*
* int64 last_offset_persisted = 1;
* @return This builder for chaining.
*/
public Builder clearLastOffsetPersisted() {
lastOffsetPersisted_ = 0L;
onChanged();
return this;
}
private long lastOriginalOffsetProcessed_ ;
/**
*
* LastOriginalOffsetProcessed is used to keep track of the newest
* offset processed if a message is re-validated and re-ordered.
* This value is used to deduplicate re-submitted messages from
* multiple orderer so that we don't bother re-processing it again.
*
*
* int64 last_original_offset_processed = 2;
* @return The lastOriginalOffsetProcessed.
*/
@java.lang.Override
public long getLastOriginalOffsetProcessed() {
return lastOriginalOffsetProcessed_;
}
/**
*
* LastOriginalOffsetProcessed is used to keep track of the newest
* offset processed if a message is re-validated and re-ordered.
* This value is used to deduplicate re-submitted messages from
* multiple orderer so that we don't bother re-processing it again.
*
*
* int64 last_original_offset_processed = 2;
* @param value The lastOriginalOffsetProcessed to set.
* @return This builder for chaining.
*/
public Builder setLastOriginalOffsetProcessed(long value) {
lastOriginalOffsetProcessed_ = value;
onChanged();
return this;
}
/**
*
* LastOriginalOffsetProcessed is used to keep track of the newest
* offset processed if a message is re-validated and re-ordered.
* This value is used to deduplicate re-submitted messages from
* multiple orderer so that we don't bother re-processing it again.
*
*
* int64 last_original_offset_processed = 2;
* @return This builder for chaining.
*/
public Builder clearLastOriginalOffsetProcessed() {
lastOriginalOffsetProcessed_ = 0L;
onChanged();
return this;
}
private long lastResubmittedConfigOffset_ ;
/**
*
* LastResubmittedConfigOffset is used to capture the newest offset of
* CONFIG kafka message, which is revalidated and resubmitted. By comparing
* this with LastOriginalOffsetProcessed, we could detemine whether there
* are still CONFIG messages that have been resubmitted but NOT processed
* yet. It's used as condition to block ingress messages, so we could reduce
* the overhead of repeatedly resubmitting messages as config seq keeps
* advancing.
*
*
* int64 last_resubmitted_config_offset = 3;
* @return The lastResubmittedConfigOffset.
*/
@java.lang.Override
public long getLastResubmittedConfigOffset() {
return lastResubmittedConfigOffset_;
}
/**
*
* LastResubmittedConfigOffset is used to capture the newest offset of
* CONFIG kafka message, which is revalidated and resubmitted. By comparing
* this with LastOriginalOffsetProcessed, we could detemine whether there
* are still CONFIG messages that have been resubmitted but NOT processed
* yet. It's used as condition to block ingress messages, so we could reduce
* the overhead of repeatedly resubmitting messages as config seq keeps
* advancing.
*
*
* int64 last_resubmitted_config_offset = 3;
* @param value The lastResubmittedConfigOffset to set.
* @return This builder for chaining.
*/
public Builder setLastResubmittedConfigOffset(long value) {
lastResubmittedConfigOffset_ = value;
onChanged();
return this;
}
/**
*
* LastResubmittedConfigOffset is used to capture the newest offset of
* CONFIG kafka message, which is revalidated and resubmitted. By comparing
* this with LastOriginalOffsetProcessed, we could detemine whether there
* are still CONFIG messages that have been resubmitted but NOT processed
* yet. It's used as condition to block ingress messages, so we could reduce
* the overhead of repeatedly resubmitting messages as config seq keeps
* advancing.
*
*
* int64 last_resubmitted_config_offset = 3;
* @return This builder for chaining.
*/
public Builder clearLastResubmittedConfigOffset() {
lastResubmittedConfigOffset_ = 0L;
onChanged();
return this;
}
@java.lang.Override
public final Builder setUnknownFields(
final com.google.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final com.google.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:orderer.KafkaMetadata)
}
// @@protoc_insertion_point(class_scope:orderer.KafkaMetadata)
private static final org.hyperledger.fabric.protos.orderer.Kafka.KafkaMetadata DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new org.hyperledger.fabric.protos.orderer.Kafka.KafkaMetadata();
}
public static org.hyperledger.fabric.protos.orderer.Kafka.KafkaMetadata getDefaultInstance() {
return DEFAULT_INSTANCE;
}
private static final com.google.protobuf.Parser
PARSER = new com.google.protobuf.AbstractParser() {
@java.lang.Override
public KafkaMetadata parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
Builder builder = newBuilder();
try {
builder.mergeFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(builder.buildPartial());
} catch (com.google.protobuf.UninitializedMessageException e) {
throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(e)
.setUnfinishedMessage(builder.buildPartial());
}
return builder.buildPartial();
}
};
public static com.google.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public org.hyperledger.fabric.protos.orderer.Kafka.KafkaMetadata getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
private static final com.google.protobuf.Descriptors.Descriptor
internal_static_orderer_KafkaMessage_descriptor;
private static final
com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_orderer_KafkaMessage_fieldAccessorTable;
private static final com.google.protobuf.Descriptors.Descriptor
internal_static_orderer_KafkaMessageRegular_descriptor;
private static final
com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_orderer_KafkaMessageRegular_fieldAccessorTable;
private static final com.google.protobuf.Descriptors.Descriptor
internal_static_orderer_KafkaMessageTimeToCut_descriptor;
private static final
com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_orderer_KafkaMessageTimeToCut_fieldAccessorTable;
private static final com.google.protobuf.Descriptors.Descriptor
internal_static_orderer_KafkaMessageConnect_descriptor;
private static final
com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_orderer_KafkaMessageConnect_fieldAccessorTable;
private static final com.google.protobuf.Descriptors.Descriptor
internal_static_orderer_KafkaMetadata_descriptor;
private static final
com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_orderer_KafkaMetadata_fieldAccessorTable;
public static com.google.protobuf.Descriptors.FileDescriptor
getDescriptor() {
return descriptor;
}
private static com.google.protobuf.Descriptors.FileDescriptor
descriptor;
static {
java.lang.String[] descriptorData = {
"\n\023orderer/kafka.proto\022\007orderer\"\257\001\n\014Kafka" +
"Message\022/\n\007regular\030\001 \001(\0132\034.orderer.Kafka" +
"MessageRegularH\000\0225\n\013time_to_cut\030\002 \001(\0132\036." +
"orderer.KafkaMessageTimeToCutH\000\022/\n\007conne" +
"ct\030\003 \001(\0132\034.orderer.KafkaMessageConnectH\000" +
"B\006\n\004Type\"\264\001\n\023KafkaMessageRegular\022\017\n\007payl" +
"oad\030\001 \001(\014\022\022\n\nconfig_seq\030\002 \001(\004\0221\n\005class\030\003" +
" \001(\0162\".orderer.KafkaMessageRegular.Class" +
"\022\027\n\017original_offset\030\004 \001(\003\",\n\005Class\022\013\n\007UN" +
"KNOWN\020\000\022\n\n\006NORMAL\020\001\022\n\n\006CONFIG\020\002\"-\n\025Kafka" +
"MessageTimeToCut\022\024\n\014block_number\030\001 \001(\004\"&" +
"\n\023KafkaMessageConnect\022\017\n\007payload\030\001 \001(\014\"~" +
"\n\rKafkaMetadata\022\035\n\025last_offset_persisted" +
"\030\001 \001(\003\022&\n\036last_original_offset_processed" +
"\030\002 \001(\003\022&\n\036last_resubmitted_config_offset" +
"\030\003 \001(\003BX\n%org.hyperledger.fabric.protos." +
"ordererZ/github.com/hyperledger/fabric-p" +
"rotos-go/ordererb\006proto3"
};
descriptor = com.google.protobuf.Descriptors.FileDescriptor
.internalBuildGeneratedFileFrom(descriptorData,
new com.google.protobuf.Descriptors.FileDescriptor[] {
});
internal_static_orderer_KafkaMessage_descriptor =
getDescriptor().getMessageTypes().get(0);
internal_static_orderer_KafkaMessage_fieldAccessorTable = new
com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_orderer_KafkaMessage_descriptor,
new java.lang.String[] { "Regular", "TimeToCut", "Connect", "Type", });
internal_static_orderer_KafkaMessageRegular_descriptor =
getDescriptor().getMessageTypes().get(1);
internal_static_orderer_KafkaMessageRegular_fieldAccessorTable = new
com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_orderer_KafkaMessageRegular_descriptor,
new java.lang.String[] { "Payload", "ConfigSeq", "Class_", "OriginalOffset", });
internal_static_orderer_KafkaMessageTimeToCut_descriptor =
getDescriptor().getMessageTypes().get(2);
internal_static_orderer_KafkaMessageTimeToCut_fieldAccessorTable = new
com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_orderer_KafkaMessageTimeToCut_descriptor,
new java.lang.String[] { "BlockNumber", });
internal_static_orderer_KafkaMessageConnect_descriptor =
getDescriptor().getMessageTypes().get(3);
internal_static_orderer_KafkaMessageConnect_fieldAccessorTable = new
com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_orderer_KafkaMessageConnect_descriptor,
new java.lang.String[] { "Payload", });
internal_static_orderer_KafkaMetadata_descriptor =
getDescriptor().getMessageTypes().get(4);
internal_static_orderer_KafkaMetadata_fieldAccessorTable = new
com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_orderer_KafkaMetadata_descriptor,
new java.lang.String[] { "LastOffsetPersisted", "LastOriginalOffsetProcessed", "LastResubmittedConfigOffset", });
}
// @@protoc_insertion_point(outer_class_scope)
}