Many resources are needed to download a project. Please understand that we have to compensate our server costs. Thank you in advance. Project price only 1 $
You can buy this project and download/modify it how often you want.
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
package org.apache.kafka.common.message;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import org.apache.kafka.common.errors.UnsupportedVersionException;
import org.apache.kafka.common.protocol.ApiMessage;
import org.apache.kafka.common.protocol.Message;
import org.apache.kafka.common.protocol.MessageSizeAccumulator;
import org.apache.kafka.common.protocol.MessageUtil;
import org.apache.kafka.common.protocol.ObjectSerializationCache;
import org.apache.kafka.common.protocol.Readable;
import org.apache.kafka.common.protocol.Writable;
import org.apache.kafka.common.protocol.types.ArrayOf;
import org.apache.kafka.common.protocol.types.Field;
import org.apache.kafka.common.protocol.types.RawTaggedField;
import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter;
import org.apache.kafka.common.protocol.types.Schema;
import org.apache.kafka.common.protocol.types.Type;
import org.apache.kafka.common.utils.ByteUtils;
import org.apache.kafka.common.utils.ImplicitLinkedHashCollection;
import org.apache.kafka.common.utils.ImplicitLinkedHashMultiCollection;
public class OffsetDeleteRequestData implements ApiMessage {
String groupId;
OffsetDeleteRequestTopicCollection topics;
private List _unknownTaggedFields;
public static final Schema SCHEMA_0 =
new Schema(
new Field("group_id", Type.STRING, "The unique group identifier."),
new Field("topics", new ArrayOf(OffsetDeleteRequestTopic.SCHEMA_0), "The topics to delete offsets for")
);
public static final Schema[] SCHEMAS = new Schema[] {
SCHEMA_0
};
public static final short LOWEST_SUPPORTED_VERSION = 0;
public static final short HIGHEST_SUPPORTED_VERSION = 0;
public OffsetDeleteRequestData(Readable _readable, short _version) {
read(_readable, _version);
}
public OffsetDeleteRequestData() {
this.groupId = "";
this.topics = new OffsetDeleteRequestTopicCollection(0);
}
@Override
public short apiKey() {
return 47;
}
@Override
public short lowestSupportedVersion() {
return 0;
}
@Override
public short highestSupportedVersion() {
return 0;
}
@Override
public void read(Readable _readable, short _version) {
{
int length;
length = _readable.readShort();
if (length < 0) {
throw new RuntimeException("non-nullable field groupId was serialized as null");
} else if (length > 0x7fff) {
throw new RuntimeException("string field groupId had invalid length " + length);
} else {
this.groupId = _readable.readString(length);
}
}
{
int arrayLength;
arrayLength = _readable.readInt();
if (arrayLength < 0) {
throw new RuntimeException("non-nullable field topics was serialized as null");
} else {
if (arrayLength > _readable.remaining()) {
throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining.");
}
OffsetDeleteRequestTopicCollection newCollection = new OffsetDeleteRequestTopicCollection(arrayLength);
for (int i = 0; i < arrayLength; i++) {
newCollection.add(new OffsetDeleteRequestTopic(_readable, _version));
}
this.topics = newCollection;
}
}
this._unknownTaggedFields = null;
}
@Override
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
{
byte[] _stringBytes = _cache.getSerializedValue(groupId);
_writable.writeShort((short) _stringBytes.length);
_writable.writeByteArray(_stringBytes);
}
_writable.writeInt(topics.size());
for (OffsetDeleteRequestTopic topicsElement : topics) {
topicsElement.write(_writable, _cache, _version);
}
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
_numTaggedFields += _rawWriter.numFields();
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
}
@Override
public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
{
byte[] _stringBytes = groupId.getBytes(StandardCharsets.UTF_8);
if (_stringBytes.length > 0x7fff) {
throw new RuntimeException("'groupId' field is too long to be serialized");
}
_cache.cacheSerializedValue(groupId, _stringBytes);
_size.addBytes(_stringBytes.length + 2);
}
{
_size.addBytes(4);
for (OffsetDeleteRequestTopic topicsElement : topics) {
topicsElement.addSize(_size, _cache, _version);
}
}
if (_unknownTaggedFields != null) {
_numTaggedFields += _unknownTaggedFields.size();
for (RawTaggedField _field : _unknownTaggedFields) {
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag()));
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size()));
_size.addBytes(_field.size());
}
}
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof OffsetDeleteRequestData)) return false;
OffsetDeleteRequestData other = (OffsetDeleteRequestData) obj;
if (this.groupId == null) {
if (other.groupId != null) return false;
} else {
if (!this.groupId.equals(other.groupId)) return false;
}
if (this.topics == null) {
if (other.topics != null) return false;
} else {
if (!this.topics.equals(other.topics)) return false;
}
return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields);
}
@Override
public int hashCode() {
int hashCode = 0;
hashCode = 31 * hashCode + (groupId == null ? 0 : groupId.hashCode());
hashCode = 31 * hashCode + (topics == null ? 0 : topics.hashCode());
return hashCode;
}
@Override
public OffsetDeleteRequestData duplicate() {
OffsetDeleteRequestData _duplicate = new OffsetDeleteRequestData();
_duplicate.groupId = groupId;
OffsetDeleteRequestTopicCollection newTopics = new OffsetDeleteRequestTopicCollection(topics.size());
for (OffsetDeleteRequestTopic _element : topics) {
newTopics.add(_element.duplicate());
}
_duplicate.topics = newTopics;
return _duplicate;
}
@Override
public String toString() {
return "OffsetDeleteRequestData("
+ "groupId=" + ((groupId == null) ? "null" : "'" + groupId.toString() + "'")
+ ", topics=" + MessageUtil.deepToString(topics.iterator())
+ ")";
}
public String groupId() {
return this.groupId;
}
public OffsetDeleteRequestTopicCollection topics() {
return this.topics;
}
@Override
public List unknownTaggedFields() {
if (_unknownTaggedFields == null) {
_unknownTaggedFields = new ArrayList<>(0);
}
return _unknownTaggedFields;
}
public OffsetDeleteRequestData setGroupId(String v) {
this.groupId = v;
return this;
}
public OffsetDeleteRequestData setTopics(OffsetDeleteRequestTopicCollection v) {
this.topics = v;
return this;
}
public static class OffsetDeleteRequestTopic implements Message, ImplicitLinkedHashMultiCollection.Element {
String name;
List partitions;
private List _unknownTaggedFields;
private int next;
private int prev;
public static final Schema SCHEMA_0 =
new Schema(
new Field("name", Type.STRING, "The topic name."),
new Field("partitions", new ArrayOf(OffsetDeleteRequestPartition.SCHEMA_0), "Each partition to delete offsets for.")
);
public static final Schema[] SCHEMAS = new Schema[] {
SCHEMA_0
};
public static final short LOWEST_SUPPORTED_VERSION = 0;
public static final short HIGHEST_SUPPORTED_VERSION = 0;
public OffsetDeleteRequestTopic(Readable _readable, short _version) {
read(_readable, _version);
this.prev = ImplicitLinkedHashCollection.INVALID_INDEX;
this.next = ImplicitLinkedHashCollection.INVALID_INDEX;
}
public OffsetDeleteRequestTopic() {
this.name = "";
this.partitions = new ArrayList(0);
this.prev = ImplicitLinkedHashCollection.INVALID_INDEX;
this.next = ImplicitLinkedHashCollection.INVALID_INDEX;
}
@Override
public short lowestSupportedVersion() {
return 0;
}
@Override
public short highestSupportedVersion() {
return 0;
}
@Override
public void read(Readable _readable, short _version) {
if (_version > 0) {
throw new UnsupportedVersionException("Can't read version " + _version + " of OffsetDeleteRequestTopic");
}
{
int length;
length = _readable.readShort();
if (length < 0) {
throw new RuntimeException("non-nullable field name was serialized as null");
} else if (length > 0x7fff) {
throw new RuntimeException("string field name had invalid length " + length);
} else {
this.name = _readable.readString(length);
}
}
{
int arrayLength;
arrayLength = _readable.readInt();
if (arrayLength < 0) {
throw new RuntimeException("non-nullable field partitions was serialized as null");
} else {
if (arrayLength > _readable.remaining()) {
throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining.");
}
ArrayList newCollection = new ArrayList<>(arrayLength);
for (int i = 0; i < arrayLength; i++) {
newCollection.add(new OffsetDeleteRequestPartition(_readable, _version));
}
this.partitions = newCollection;
}
}
this._unknownTaggedFields = null;
}
@Override
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
{
byte[] _stringBytes = _cache.getSerializedValue(name);
_writable.writeShort((short) _stringBytes.length);
_writable.writeByteArray(_stringBytes);
}
_writable.writeInt(partitions.size());
for (OffsetDeleteRequestPartition partitionsElement : partitions) {
partitionsElement.write(_writable, _cache, _version);
}
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
_numTaggedFields += _rawWriter.numFields();
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
}
@Override
public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
if (_version > 0) {
throw new UnsupportedVersionException("Can't size version " + _version + " of OffsetDeleteRequestTopic");
}
{
byte[] _stringBytes = name.getBytes(StandardCharsets.UTF_8);
if (_stringBytes.length > 0x7fff) {
throw new RuntimeException("'name' field is too long to be serialized");
}
_cache.cacheSerializedValue(name, _stringBytes);
_size.addBytes(_stringBytes.length + 2);
}
{
_size.addBytes(4);
for (OffsetDeleteRequestPartition partitionsElement : partitions) {
partitionsElement.addSize(_size, _cache, _version);
}
}
if (_unknownTaggedFields != null) {
_numTaggedFields += _unknownTaggedFields.size();
for (RawTaggedField _field : _unknownTaggedFields) {
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag()));
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size()));
_size.addBytes(_field.size());
}
}
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
}
@Override
public boolean elementKeysAreEqual(Object obj) {
if (!(obj instanceof OffsetDeleteRequestTopic)) return false;
OffsetDeleteRequestTopic other = (OffsetDeleteRequestTopic) obj;
if (this.name == null) {
if (other.name != null) return false;
} else {
if (!this.name.equals(other.name)) return false;
}
return true;
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof OffsetDeleteRequestTopic)) return false;
OffsetDeleteRequestTopic other = (OffsetDeleteRequestTopic) obj;
if (this.name == null) {
if (other.name != null) return false;
} else {
if (!this.name.equals(other.name)) return false;
}
if (this.partitions == null) {
if (other.partitions != null) return false;
} else {
if (!this.partitions.equals(other.partitions)) return false;
}
return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields);
}
@Override
public int hashCode() {
int hashCode = 0;
hashCode = 31 * hashCode + (name == null ? 0 : name.hashCode());
return hashCode;
}
@Override
public OffsetDeleteRequestTopic duplicate() {
OffsetDeleteRequestTopic _duplicate = new OffsetDeleteRequestTopic();
_duplicate.name = name;
ArrayList newPartitions = new ArrayList(partitions.size());
for (OffsetDeleteRequestPartition _element : partitions) {
newPartitions.add(_element.duplicate());
}
_duplicate.partitions = newPartitions;
return _duplicate;
}
@Override
public String toString() {
return "OffsetDeleteRequestTopic("
+ "name=" + ((name == null) ? "null" : "'" + name.toString() + "'")
+ ", partitions=" + MessageUtil.deepToString(partitions.iterator())
+ ")";
}
public String name() {
return this.name;
}
public List partitions() {
return this.partitions;
}
@Override
public int next() {
return this.next;
}
@Override
public int prev() {
return this.prev;
}
@Override
public List unknownTaggedFields() {
if (_unknownTaggedFields == null) {
_unknownTaggedFields = new ArrayList<>(0);
}
return _unknownTaggedFields;
}
public OffsetDeleteRequestTopic setName(String v) {
this.name = v;
return this;
}
public OffsetDeleteRequestTopic setPartitions(List v) {
this.partitions = v;
return this;
}
@Override
public void setNext(int v) {
this.next = v;
}
@Override
public void setPrev(int v) {
this.prev = v;
}
}
public static class OffsetDeleteRequestPartition implements Message {
int partitionIndex;
private List _unknownTaggedFields;
public static final Schema SCHEMA_0 =
new Schema(
new Field("partition_index", Type.INT32, "The partition index.")
);
public static final Schema[] SCHEMAS = new Schema[] {
SCHEMA_0
};
public static final short LOWEST_SUPPORTED_VERSION = 0;
public static final short HIGHEST_SUPPORTED_VERSION = 0;
public OffsetDeleteRequestPartition(Readable _readable, short _version) {
read(_readable, _version);
}
public OffsetDeleteRequestPartition() {
this.partitionIndex = 0;
}
@Override
public short lowestSupportedVersion() {
return 0;
}
@Override
public short highestSupportedVersion() {
return 0;
}
@Override
public void read(Readable _readable, short _version) {
if (_version > 0) {
throw new UnsupportedVersionException("Can't read version " + _version + " of OffsetDeleteRequestPartition");
}
this.partitionIndex = _readable.readInt();
this._unknownTaggedFields = null;
}
@Override
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
_writable.writeInt(partitionIndex);
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
_numTaggedFields += _rawWriter.numFields();
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
}
@Override
public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
if (_version > 0) {
throw new UnsupportedVersionException("Can't size version " + _version + " of OffsetDeleteRequestPartition");
}
_size.addBytes(4);
if (_unknownTaggedFields != null) {
_numTaggedFields += _unknownTaggedFields.size();
for (RawTaggedField _field : _unknownTaggedFields) {
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag()));
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size()));
_size.addBytes(_field.size());
}
}
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof OffsetDeleteRequestPartition)) return false;
OffsetDeleteRequestPartition other = (OffsetDeleteRequestPartition) obj;
if (partitionIndex != other.partitionIndex) return false;
return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields);
}
@Override
public int hashCode() {
int hashCode = 0;
hashCode = 31 * hashCode + partitionIndex;
return hashCode;
}
@Override
public OffsetDeleteRequestPartition duplicate() {
OffsetDeleteRequestPartition _duplicate = new OffsetDeleteRequestPartition();
_duplicate.partitionIndex = partitionIndex;
return _duplicate;
}
@Override
public String toString() {
return "OffsetDeleteRequestPartition("
+ "partitionIndex=" + partitionIndex
+ ")";
}
public int partitionIndex() {
return this.partitionIndex;
}
@Override
public List unknownTaggedFields() {
if (_unknownTaggedFields == null) {
_unknownTaggedFields = new ArrayList<>(0);
}
return _unknownTaggedFields;
}
public OffsetDeleteRequestPartition setPartitionIndex(int v) {
this.partitionIndex = v;
return this;
}
}
public static class OffsetDeleteRequestTopicCollection extends ImplicitLinkedHashMultiCollection {
public OffsetDeleteRequestTopicCollection() {
super();
}
public OffsetDeleteRequestTopicCollection(int expectedNumElements) {
super(expectedNumElements);
}
public OffsetDeleteRequestTopicCollection(Iterator iterator) {
super(iterator);
}
public OffsetDeleteRequestTopic find(String name) {
OffsetDeleteRequestTopic _key = new OffsetDeleteRequestTopic();
_key.setName(name);
return find(_key);
}
public List findAll(String name) {
OffsetDeleteRequestTopic _key = new OffsetDeleteRequestTopic();
_key.setName(name);
return findAll(_key);
}
public OffsetDeleteRequestTopicCollection duplicate() {
OffsetDeleteRequestTopicCollection _duplicate = new OffsetDeleteRequestTopicCollection(size());
for (OffsetDeleteRequestTopic _element : this) {
_duplicate.add(_element.duplicate());
}
return _duplicate;
}
}
}