All Downloads are FREE. Search and download functionalities are using the official Maven repository.

com.clarifai.grpc.api.TaskWorkerPartitionedStrategyInfo Maven / Gradle / Ivy

// Generated by the protocol buffer compiler.  DO NOT EDIT!
// source: proto/clarifai/api/resources.proto

package com.clarifai.grpc.api;

/**
 * 
 * TaskWorkerPartitionedStrategyInfo
 * 
* * Protobuf type {@code clarifai.api.TaskWorkerPartitionedStrategyInfo} */ public final class TaskWorkerPartitionedStrategyInfo extends com.google.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:clarifai.api.TaskWorkerPartitionedStrategyInfo) TaskWorkerPartitionedStrategyInfoOrBuilder { private static final long serialVersionUID = 0L; // Use TaskWorkerPartitionedStrategyInfo.newBuilder() to construct. private TaskWorkerPartitionedStrategyInfo(com.google.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private TaskWorkerPartitionedStrategyInfo() { type_ = 0; } @java.lang.Override @SuppressWarnings({"unused"}) protected java.lang.Object newInstance( UnusedPrivateParameter unused) { return new TaskWorkerPartitionedStrategyInfo(); } @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private TaskWorkerPartitionedStrategyInfo( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 8: { int rawValue = input.readEnum(); type_ = rawValue; break; } case 16: { workersPerInput_ = input.readInt32(); break; } case 26: { com.google.protobuf.Struct.Builder subBuilder = null; if (weights_ != null) { subBuilder = weights_.toBuilder(); } weights_ = input.readMessage(com.google.protobuf.Struct.parser(), extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(weights_); weights_ = subBuilder.buildPartial(); } break; } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (com.google.protobuf.UninitializedMessageException e) { throw e.asInvalidProtocolBufferException().setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return com.clarifai.grpc.api.Resources.internal_static_clarifai_api_TaskWorkerPartitionedStrategyInfo_descriptor; } @java.lang.Override protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return com.clarifai.grpc.api.Resources.internal_static_clarifai_api_TaskWorkerPartitionedStrategyInfo_fieldAccessorTable .ensureFieldAccessorsInitialized( com.clarifai.grpc.api.TaskWorkerPartitionedStrategyInfo.class, com.clarifai.grpc.api.TaskWorkerPartitionedStrategyInfo.Builder.class); } /** * Protobuf enum {@code clarifai.api.TaskWorkerPartitionedStrategyInfo.TaskWorkerPartitionedStrategy} */ public enum TaskWorkerPartitionedStrategy implements com.google.protobuf.ProtocolMessageEnum { /** * PARTITIONED_WORKER_STRATEGY_NOT_SET = 0; */ PARTITIONED_WORKER_STRATEGY_NOT_SET(0), /** *
     * Each worker will label (approximately) the same number of inputs.
     * 
* * EVENLY = 1; */ EVENLY(1), /** *
     * Each worker will have an assigned weight.
     * See weights field for more details.
     * 
* * WEIGHTED = 2; */ WEIGHTED(2), UNRECOGNIZED(-1), ; /** * PARTITIONED_WORKER_STRATEGY_NOT_SET = 0; */ public static final int PARTITIONED_WORKER_STRATEGY_NOT_SET_VALUE = 0; /** *
     * Each worker will label (approximately) the same number of inputs.
     * 
* * EVENLY = 1; */ public static final int EVENLY_VALUE = 1; /** *
     * Each worker will have an assigned weight.
     * See weights field for more details.
     * 
* * WEIGHTED = 2; */ public static final int WEIGHTED_VALUE = 2; public final int getNumber() { if (this == UNRECOGNIZED) { throw new java.lang.IllegalArgumentException( "Can't get the number of an unknown enum value."); } return value; } /** * @param value The numeric wire value of the corresponding enum entry. * @return The enum associated with the given numeric wire value. * @deprecated Use {@link #forNumber(int)} instead. */ @java.lang.Deprecated public static TaskWorkerPartitionedStrategy valueOf(int value) { return forNumber(value); } /** * @param value The numeric wire value of the corresponding enum entry. * @return The enum associated with the given numeric wire value. */ public static TaskWorkerPartitionedStrategy forNumber(int value) { switch (value) { case 0: return PARTITIONED_WORKER_STRATEGY_NOT_SET; case 1: return EVENLY; case 2: return WEIGHTED; default: return null; } } public static com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { return internalValueMap; } private static final com.google.protobuf.Internal.EnumLiteMap< TaskWorkerPartitionedStrategy> internalValueMap = new com.google.protobuf.Internal.EnumLiteMap() { public TaskWorkerPartitionedStrategy findValueByNumber(int number) { return TaskWorkerPartitionedStrategy.forNumber(number); } }; public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { if (this == UNRECOGNIZED) { throw new java.lang.IllegalStateException( "Can't get the descriptor of an unrecognized enum value."); } return getDescriptor().getValues().get(ordinal()); } public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { return getDescriptor(); } public static final com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { return com.clarifai.grpc.api.TaskWorkerPartitionedStrategyInfo.getDescriptor().getEnumTypes().get(0); } private static final TaskWorkerPartitionedStrategy[] VALUES = values(); public static TaskWorkerPartitionedStrategy valueOf( com.google.protobuf.Descriptors.EnumValueDescriptor desc) { if (desc.getType() != getDescriptor()) { throw new java.lang.IllegalArgumentException( "EnumValueDescriptor is not for this type."); } if (desc.getIndex() == -1) { return UNRECOGNIZED; } return VALUES[desc.getIndex()]; } private final int value; private TaskWorkerPartitionedStrategy(int value) { this.value = value; } // @@protoc_insertion_point(enum_scope:clarifai.api.TaskWorkerPartitionedStrategyInfo.TaskWorkerPartitionedStrategy) } public static final int TYPE_FIELD_NUMBER = 1; private int type_; /** *
   * Define how the partitioning should work.
   * 
* * .clarifai.api.TaskWorkerPartitionedStrategyInfo.TaskWorkerPartitionedStrategy type = 1; * @return The enum numeric value on the wire for type. */ @java.lang.Override public int getTypeValue() { return type_; } /** *
   * Define how the partitioning should work.
   * 
* * .clarifai.api.TaskWorkerPartitionedStrategyInfo.TaskWorkerPartitionedStrategy type = 1; * @return The type. */ @java.lang.Override public com.clarifai.grpc.api.TaskWorkerPartitionedStrategyInfo.TaskWorkerPartitionedStrategy getType() { @SuppressWarnings("deprecation") com.clarifai.grpc.api.TaskWorkerPartitionedStrategyInfo.TaskWorkerPartitionedStrategy result = com.clarifai.grpc.api.TaskWorkerPartitionedStrategyInfo.TaskWorkerPartitionedStrategy.valueOf(type_); return result == null ? com.clarifai.grpc.api.TaskWorkerPartitionedStrategyInfo.TaskWorkerPartitionedStrategy.UNRECOGNIZED : result; } public static final int WORKERS_PER_INPUT_FIELD_NUMBER = 2; private int workersPerInput_; /** *
   * How many workers will label each input.
   * 
* * int32 workers_per_input = 2; * @return The workersPerInput. */ @java.lang.Override public int getWorkersPerInput() { return workersPerInput_; } public static final int WEIGHTS_FIELD_NUMBER = 3; private com.google.protobuf.Struct weights_; /** *
   * In case of weighted partitioning, map user ids to weights.
   * Each labeler will be assigned work proportional to its own weight as compared to the sum of total weight.
   * EXAMPLE:
   * If we have 3 workers, and weights = {1: 30, 2: 30, 3: 40},
   * then first worker will have assigned 30% of the work,
   * second worker will have assigned 30% of the work,
   * and third worker will have assigned 40% of the work.
   * You may use weights which add up to 100, but it's not necessary.
   * For example, weights {1: 30, 2: 30, 3: 40} are equivalent with {1: 3, 2: 3, 3: 4}
   * because they represent the same percentages: {1: 30%, 2: 30%, 3: 40%}.
   * NOTE:
   * Note that no worker should be assigned a weight percentage greater than 1/workers_per_input.
   * It is mathematically impossible to partition the work in such a case.
   * Why? Say, we have 3 workers. And workers_per_input = 2, i.e. each input must be labeled by 2 workers.
   * Let's assign weights {1: 51%, 2: 25%, 3: 24%}.
   * Note that first worker has a weight percentage higher than 1/workers_per_input = 1/2 = 50%.
   * If we have 100 inputs, then a total of 100 * workers_per_input = 200 cumulative inputs will be labeled by these 3 workers.
   * Worker 1 should label 102 cumulative inputs, while worker 2 and worker 3 will label 98 cumulative inputs together.
   * No matter how we assign the 98 cumulative inputs, the 2 workers will be able to label up to 98 actual inputs.
   * This means the remaining 2 inputs will be labeled only by worker 1. This contradicts the worker_per_input = 2 requirement.
   * 
* * .google.protobuf.Struct weights = 3; * @return Whether the weights field is set. */ @java.lang.Override public boolean hasWeights() { return weights_ != null; } /** *
   * In case of weighted partitioning, map user ids to weights.
   * Each labeler will be assigned work proportional to its own weight as compared to the sum of total weight.
   * EXAMPLE:
   * If we have 3 workers, and weights = {1: 30, 2: 30, 3: 40},
   * then first worker will have assigned 30% of the work,
   * second worker will have assigned 30% of the work,
   * and third worker will have assigned 40% of the work.
   * You may use weights which add up to 100, but it's not necessary.
   * For example, weights {1: 30, 2: 30, 3: 40} are equivalent with {1: 3, 2: 3, 3: 4}
   * because they represent the same percentages: {1: 30%, 2: 30%, 3: 40%}.
   * NOTE:
   * Note that no worker should be assigned a weight percentage greater than 1/workers_per_input.
   * It is mathematically impossible to partition the work in such a case.
   * Why? Say, we have 3 workers. And workers_per_input = 2, i.e. each input must be labeled by 2 workers.
   * Let's assign weights {1: 51%, 2: 25%, 3: 24%}.
   * Note that first worker has a weight percentage higher than 1/workers_per_input = 1/2 = 50%.
   * If we have 100 inputs, then a total of 100 * workers_per_input = 200 cumulative inputs will be labeled by these 3 workers.
   * Worker 1 should label 102 cumulative inputs, while worker 2 and worker 3 will label 98 cumulative inputs together.
   * No matter how we assign the 98 cumulative inputs, the 2 workers will be able to label up to 98 actual inputs.
   * This means the remaining 2 inputs will be labeled only by worker 1. This contradicts the worker_per_input = 2 requirement.
   * 
* * .google.protobuf.Struct weights = 3; * @return The weights. */ @java.lang.Override public com.google.protobuf.Struct getWeights() { return weights_ == null ? com.google.protobuf.Struct.getDefaultInstance() : weights_; } /** *
   * In case of weighted partitioning, map user ids to weights.
   * Each labeler will be assigned work proportional to its own weight as compared to the sum of total weight.
   * EXAMPLE:
   * If we have 3 workers, and weights = {1: 30, 2: 30, 3: 40},
   * then first worker will have assigned 30% of the work,
   * second worker will have assigned 30% of the work,
   * and third worker will have assigned 40% of the work.
   * You may use weights which add up to 100, but it's not necessary.
   * For example, weights {1: 30, 2: 30, 3: 40} are equivalent with {1: 3, 2: 3, 3: 4}
   * because they represent the same percentages: {1: 30%, 2: 30%, 3: 40%}.
   * NOTE:
   * Note that no worker should be assigned a weight percentage greater than 1/workers_per_input.
   * It is mathematically impossible to partition the work in such a case.
   * Why? Say, we have 3 workers. And workers_per_input = 2, i.e. each input must be labeled by 2 workers.
   * Let's assign weights {1: 51%, 2: 25%, 3: 24%}.
   * Note that first worker has a weight percentage higher than 1/workers_per_input = 1/2 = 50%.
   * If we have 100 inputs, then a total of 100 * workers_per_input = 200 cumulative inputs will be labeled by these 3 workers.
   * Worker 1 should label 102 cumulative inputs, while worker 2 and worker 3 will label 98 cumulative inputs together.
   * No matter how we assign the 98 cumulative inputs, the 2 workers will be able to label up to 98 actual inputs.
   * This means the remaining 2 inputs will be labeled only by worker 1. This contradicts the worker_per_input = 2 requirement.
   * 
* * .google.protobuf.Struct weights = 3; */ @java.lang.Override public com.google.protobuf.StructOrBuilder getWeightsOrBuilder() { return getWeights(); } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { if (type_ != com.clarifai.grpc.api.TaskWorkerPartitionedStrategyInfo.TaskWorkerPartitionedStrategy.PARTITIONED_WORKER_STRATEGY_NOT_SET.getNumber()) { output.writeEnum(1, type_); } if (workersPerInput_ != 0) { output.writeInt32(2, workersPerInput_); } if (weights_ != null) { output.writeMessage(3, getWeights()); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (type_ != com.clarifai.grpc.api.TaskWorkerPartitionedStrategyInfo.TaskWorkerPartitionedStrategy.PARTITIONED_WORKER_STRATEGY_NOT_SET.getNumber()) { size += com.google.protobuf.CodedOutputStream .computeEnumSize(1, type_); } if (workersPerInput_ != 0) { size += com.google.protobuf.CodedOutputStream .computeInt32Size(2, workersPerInput_); } if (weights_ != null) { size += com.google.protobuf.CodedOutputStream .computeMessageSize(3, getWeights()); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof com.clarifai.grpc.api.TaskWorkerPartitionedStrategyInfo)) { return super.equals(obj); } com.clarifai.grpc.api.TaskWorkerPartitionedStrategyInfo other = (com.clarifai.grpc.api.TaskWorkerPartitionedStrategyInfo) obj; if (type_ != other.type_) return false; if (getWorkersPerInput() != other.getWorkersPerInput()) return false; if (hasWeights() != other.hasWeights()) return false; if (hasWeights()) { if (!getWeights() .equals(other.getWeights())) return false; } if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); hash = (37 * hash) + TYPE_FIELD_NUMBER; hash = (53 * hash) + type_; hash = (37 * hash) + WORKERS_PER_INPUT_FIELD_NUMBER; hash = (53 * hash) + getWorkersPerInput(); if (hasWeights()) { hash = (37 * hash) + WEIGHTS_FIELD_NUMBER; hash = (53 * hash) + getWeights().hashCode(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static com.clarifai.grpc.api.TaskWorkerPartitionedStrategyInfo parseFrom( java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static com.clarifai.grpc.api.TaskWorkerPartitionedStrategyInfo parseFrom( java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static com.clarifai.grpc.api.TaskWorkerPartitionedStrategyInfo parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static com.clarifai.grpc.api.TaskWorkerPartitionedStrategyInfo parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static com.clarifai.grpc.api.TaskWorkerPartitionedStrategyInfo parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static com.clarifai.grpc.api.TaskWorkerPartitionedStrategyInfo parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static com.clarifai.grpc.api.TaskWorkerPartitionedStrategyInfo parseFrom(java.io.InputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static com.clarifai.grpc.api.TaskWorkerPartitionedStrategyInfo parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static com.clarifai.grpc.api.TaskWorkerPartitionedStrategyInfo parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static com.clarifai.grpc.api.TaskWorkerPartitionedStrategyInfo parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static com.clarifai.grpc.api.TaskWorkerPartitionedStrategyInfo parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static com.clarifai.grpc.api.TaskWorkerPartitionedStrategyInfo parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(com.clarifai.grpc.api.TaskWorkerPartitionedStrategyInfo prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
   * TaskWorkerPartitionedStrategyInfo
   * 
* * Protobuf type {@code clarifai.api.TaskWorkerPartitionedStrategyInfo} */ public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:clarifai.api.TaskWorkerPartitionedStrategyInfo) com.clarifai.grpc.api.TaskWorkerPartitionedStrategyInfoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return com.clarifai.grpc.api.Resources.internal_static_clarifai_api_TaskWorkerPartitionedStrategyInfo_descriptor; } @java.lang.Override protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return com.clarifai.grpc.api.Resources.internal_static_clarifai_api_TaskWorkerPartitionedStrategyInfo_fieldAccessorTable .ensureFieldAccessorsInitialized( com.clarifai.grpc.api.TaskWorkerPartitionedStrategyInfo.class, com.clarifai.grpc.api.TaskWorkerPartitionedStrategyInfo.Builder.class); } // Construct using com.clarifai.grpc.api.TaskWorkerPartitionedStrategyInfo.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { } } @java.lang.Override public Builder clear() { super.clear(); type_ = 0; workersPerInput_ = 0; if (weightsBuilder_ == null) { weights_ = null; } else { weights_ = null; weightsBuilder_ = null; } return this; } @java.lang.Override public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return com.clarifai.grpc.api.Resources.internal_static_clarifai_api_TaskWorkerPartitionedStrategyInfo_descriptor; } @java.lang.Override public com.clarifai.grpc.api.TaskWorkerPartitionedStrategyInfo getDefaultInstanceForType() { return com.clarifai.grpc.api.TaskWorkerPartitionedStrategyInfo.getDefaultInstance(); } @java.lang.Override public com.clarifai.grpc.api.TaskWorkerPartitionedStrategyInfo build() { com.clarifai.grpc.api.TaskWorkerPartitionedStrategyInfo result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public com.clarifai.grpc.api.TaskWorkerPartitionedStrategyInfo buildPartial() { com.clarifai.grpc.api.TaskWorkerPartitionedStrategyInfo result = new com.clarifai.grpc.api.TaskWorkerPartitionedStrategyInfo(this); result.type_ = type_; result.workersPerInput_ = workersPerInput_; if (weightsBuilder_ == null) { result.weights_ = weights_; } else { result.weights_ = weightsBuilder_.build(); } onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField( com.google.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof( com.google.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof com.clarifai.grpc.api.TaskWorkerPartitionedStrategyInfo) { return mergeFrom((com.clarifai.grpc.api.TaskWorkerPartitionedStrategyInfo)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(com.clarifai.grpc.api.TaskWorkerPartitionedStrategyInfo other) { if (other == com.clarifai.grpc.api.TaskWorkerPartitionedStrategyInfo.getDefaultInstance()) return this; if (other.type_ != 0) { setTypeValue(other.getTypeValue()); } if (other.getWorkersPerInput() != 0) { setWorkersPerInput(other.getWorkersPerInput()); } if (other.hasWeights()) { mergeWeights(other.getWeights()); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { return true; } @java.lang.Override public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { com.clarifai.grpc.api.TaskWorkerPartitionedStrategyInfo parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (com.clarifai.grpc.api.TaskWorkerPartitionedStrategyInfo) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int type_ = 0; /** *
     * Define how the partitioning should work.
     * 
* * .clarifai.api.TaskWorkerPartitionedStrategyInfo.TaskWorkerPartitionedStrategy type = 1; * @return The enum numeric value on the wire for type. */ @java.lang.Override public int getTypeValue() { return type_; } /** *
     * Define how the partitioning should work.
     * 
* * .clarifai.api.TaskWorkerPartitionedStrategyInfo.TaskWorkerPartitionedStrategy type = 1; * @param value The enum numeric value on the wire for type to set. * @return This builder for chaining. */ public Builder setTypeValue(int value) { type_ = value; onChanged(); return this; } /** *
     * Define how the partitioning should work.
     * 
* * .clarifai.api.TaskWorkerPartitionedStrategyInfo.TaskWorkerPartitionedStrategy type = 1; * @return The type. */ @java.lang.Override public com.clarifai.grpc.api.TaskWorkerPartitionedStrategyInfo.TaskWorkerPartitionedStrategy getType() { @SuppressWarnings("deprecation") com.clarifai.grpc.api.TaskWorkerPartitionedStrategyInfo.TaskWorkerPartitionedStrategy result = com.clarifai.grpc.api.TaskWorkerPartitionedStrategyInfo.TaskWorkerPartitionedStrategy.valueOf(type_); return result == null ? com.clarifai.grpc.api.TaskWorkerPartitionedStrategyInfo.TaskWorkerPartitionedStrategy.UNRECOGNIZED : result; } /** *
     * Define how the partitioning should work.
     * 
* * .clarifai.api.TaskWorkerPartitionedStrategyInfo.TaskWorkerPartitionedStrategy type = 1; * @param value The type to set. * @return This builder for chaining. */ public Builder setType(com.clarifai.grpc.api.TaskWorkerPartitionedStrategyInfo.TaskWorkerPartitionedStrategy value) { if (value == null) { throw new NullPointerException(); } type_ = value.getNumber(); onChanged(); return this; } /** *
     * Define how the partitioning should work.
     * 
* * .clarifai.api.TaskWorkerPartitionedStrategyInfo.TaskWorkerPartitionedStrategy type = 1; * @return This builder for chaining. */ public Builder clearType() { type_ = 0; onChanged(); return this; } private int workersPerInput_ ; /** *
     * How many workers will label each input.
     * 
* * int32 workers_per_input = 2; * @return The workersPerInput. */ @java.lang.Override public int getWorkersPerInput() { return workersPerInput_; } /** *
     * How many workers will label each input.
     * 
* * int32 workers_per_input = 2; * @param value The workersPerInput to set. * @return This builder for chaining. */ public Builder setWorkersPerInput(int value) { workersPerInput_ = value; onChanged(); return this; } /** *
     * How many workers will label each input.
     * 
* * int32 workers_per_input = 2; * @return This builder for chaining. */ public Builder clearWorkersPerInput() { workersPerInput_ = 0; onChanged(); return this; } private com.google.protobuf.Struct weights_; private com.google.protobuf.SingleFieldBuilderV3< com.google.protobuf.Struct, com.google.protobuf.Struct.Builder, com.google.protobuf.StructOrBuilder> weightsBuilder_; /** *
     * In case of weighted partitioning, map user ids to weights.
     * Each labeler will be assigned work proportional to its own weight as compared to the sum of total weight.
     * EXAMPLE:
     * If we have 3 workers, and weights = {1: 30, 2: 30, 3: 40},
     * then first worker will have assigned 30% of the work,
     * second worker will have assigned 30% of the work,
     * and third worker will have assigned 40% of the work.
     * You may use weights which add up to 100, but it's not necessary.
     * For example, weights {1: 30, 2: 30, 3: 40} are equivalent with {1: 3, 2: 3, 3: 4}
     * because they represent the same percentages: {1: 30%, 2: 30%, 3: 40%}.
     * NOTE:
     * Note that no worker should be assigned a weight percentage greater than 1/workers_per_input.
     * It is mathematically impossible to partition the work in such a case.
     * Why? Say, we have 3 workers. And workers_per_input = 2, i.e. each input must be labeled by 2 workers.
     * Let's assign weights {1: 51%, 2: 25%, 3: 24%}.
     * Note that first worker has a weight percentage higher than 1/workers_per_input = 1/2 = 50%.
     * If we have 100 inputs, then a total of 100 * workers_per_input = 200 cumulative inputs will be labeled by these 3 workers.
     * Worker 1 should label 102 cumulative inputs, while worker 2 and worker 3 will label 98 cumulative inputs together.
     * No matter how we assign the 98 cumulative inputs, the 2 workers will be able to label up to 98 actual inputs.
     * This means the remaining 2 inputs will be labeled only by worker 1. This contradicts the worker_per_input = 2 requirement.
     * 
* * .google.protobuf.Struct weights = 3; * @return Whether the weights field is set. */ public boolean hasWeights() { return weightsBuilder_ != null || weights_ != null; } /** *
     * In case of weighted partitioning, map user ids to weights.
     * Each labeler will be assigned work proportional to its own weight as compared to the sum of total weight.
     * EXAMPLE:
     * If we have 3 workers, and weights = {1: 30, 2: 30, 3: 40},
     * then first worker will have assigned 30% of the work,
     * second worker will have assigned 30% of the work,
     * and third worker will have assigned 40% of the work.
     * You may use weights which add up to 100, but it's not necessary.
     * For example, weights {1: 30, 2: 30, 3: 40} are equivalent with {1: 3, 2: 3, 3: 4}
     * because they represent the same percentages: {1: 30%, 2: 30%, 3: 40%}.
     * NOTE:
     * Note that no worker should be assigned a weight percentage greater than 1/workers_per_input.
     * It is mathematically impossible to partition the work in such a case.
     * Why? Say, we have 3 workers. And workers_per_input = 2, i.e. each input must be labeled by 2 workers.
     * Let's assign weights {1: 51%, 2: 25%, 3: 24%}.
     * Note that first worker has a weight percentage higher than 1/workers_per_input = 1/2 = 50%.
     * If we have 100 inputs, then a total of 100 * workers_per_input = 200 cumulative inputs will be labeled by these 3 workers.
     * Worker 1 should label 102 cumulative inputs, while worker 2 and worker 3 will label 98 cumulative inputs together.
     * No matter how we assign the 98 cumulative inputs, the 2 workers will be able to label up to 98 actual inputs.
     * This means the remaining 2 inputs will be labeled only by worker 1. This contradicts the worker_per_input = 2 requirement.
     * 
* * .google.protobuf.Struct weights = 3; * @return The weights. */ public com.google.protobuf.Struct getWeights() { if (weightsBuilder_ == null) { return weights_ == null ? com.google.protobuf.Struct.getDefaultInstance() : weights_; } else { return weightsBuilder_.getMessage(); } } /** *
     * In case of weighted partitioning, map user ids to weights.
     * Each labeler will be assigned work proportional to its own weight as compared to the sum of total weight.
     * EXAMPLE:
     * If we have 3 workers, and weights = {1: 30, 2: 30, 3: 40},
     * then first worker will have assigned 30% of the work,
     * second worker will have assigned 30% of the work,
     * and third worker will have assigned 40% of the work.
     * You may use weights which add up to 100, but it's not necessary.
     * For example, weights {1: 30, 2: 30, 3: 40} are equivalent with {1: 3, 2: 3, 3: 4}
     * because they represent the same percentages: {1: 30%, 2: 30%, 3: 40%}.
     * NOTE:
     * Note that no worker should be assigned a weight percentage greater than 1/workers_per_input.
     * It is mathematically impossible to partition the work in such a case.
     * Why? Say, we have 3 workers. And workers_per_input = 2, i.e. each input must be labeled by 2 workers.
     * Let's assign weights {1: 51%, 2: 25%, 3: 24%}.
     * Note that first worker has a weight percentage higher than 1/workers_per_input = 1/2 = 50%.
     * If we have 100 inputs, then a total of 100 * workers_per_input = 200 cumulative inputs will be labeled by these 3 workers.
     * Worker 1 should label 102 cumulative inputs, while worker 2 and worker 3 will label 98 cumulative inputs together.
     * No matter how we assign the 98 cumulative inputs, the 2 workers will be able to label up to 98 actual inputs.
     * This means the remaining 2 inputs will be labeled only by worker 1. This contradicts the worker_per_input = 2 requirement.
     * 
* * .google.protobuf.Struct weights = 3; */ public Builder setWeights(com.google.protobuf.Struct value) { if (weightsBuilder_ == null) { if (value == null) { throw new NullPointerException(); } weights_ = value; onChanged(); } else { weightsBuilder_.setMessage(value); } return this; } /** *
     * In case of weighted partitioning, map user ids to weights.
     * Each labeler will be assigned work proportional to its own weight as compared to the sum of total weight.
     * EXAMPLE:
     * If we have 3 workers, and weights = {1: 30, 2: 30, 3: 40},
     * then first worker will have assigned 30% of the work,
     * second worker will have assigned 30% of the work,
     * and third worker will have assigned 40% of the work.
     * You may use weights which add up to 100, but it's not necessary.
     * For example, weights {1: 30, 2: 30, 3: 40} are equivalent with {1: 3, 2: 3, 3: 4}
     * because they represent the same percentages: {1: 30%, 2: 30%, 3: 40%}.
     * NOTE:
     * Note that no worker should be assigned a weight percentage greater than 1/workers_per_input.
     * It is mathematically impossible to partition the work in such a case.
     * Why? Say, we have 3 workers. And workers_per_input = 2, i.e. each input must be labeled by 2 workers.
     * Let's assign weights {1: 51%, 2: 25%, 3: 24%}.
     * Note that first worker has a weight percentage higher than 1/workers_per_input = 1/2 = 50%.
     * If we have 100 inputs, then a total of 100 * workers_per_input = 200 cumulative inputs will be labeled by these 3 workers.
     * Worker 1 should label 102 cumulative inputs, while worker 2 and worker 3 will label 98 cumulative inputs together.
     * No matter how we assign the 98 cumulative inputs, the 2 workers will be able to label up to 98 actual inputs.
     * This means the remaining 2 inputs will be labeled only by worker 1. This contradicts the worker_per_input = 2 requirement.
     * 
* * .google.protobuf.Struct weights = 3; */ public Builder setWeights( com.google.protobuf.Struct.Builder builderForValue) { if (weightsBuilder_ == null) { weights_ = builderForValue.build(); onChanged(); } else { weightsBuilder_.setMessage(builderForValue.build()); } return this; } /** *
     * In case of weighted partitioning, map user ids to weights.
     * Each labeler will be assigned work proportional to its own weight as compared to the sum of total weight.
     * EXAMPLE:
     * If we have 3 workers, and weights = {1: 30, 2: 30, 3: 40},
     * then first worker will have assigned 30% of the work,
     * second worker will have assigned 30% of the work,
     * and third worker will have assigned 40% of the work.
     * You may use weights which add up to 100, but it's not necessary.
     * For example, weights {1: 30, 2: 30, 3: 40} are equivalent with {1: 3, 2: 3, 3: 4}
     * because they represent the same percentages: {1: 30%, 2: 30%, 3: 40%}.
     * NOTE:
     * Note that no worker should be assigned a weight percentage greater than 1/workers_per_input.
     * It is mathematically impossible to partition the work in such a case.
     * Why? Say, we have 3 workers. And workers_per_input = 2, i.e. each input must be labeled by 2 workers.
     * Let's assign weights {1: 51%, 2: 25%, 3: 24%}.
     * Note that first worker has a weight percentage higher than 1/workers_per_input = 1/2 = 50%.
     * If we have 100 inputs, then a total of 100 * workers_per_input = 200 cumulative inputs will be labeled by these 3 workers.
     * Worker 1 should label 102 cumulative inputs, while worker 2 and worker 3 will label 98 cumulative inputs together.
     * No matter how we assign the 98 cumulative inputs, the 2 workers will be able to label up to 98 actual inputs.
     * This means the remaining 2 inputs will be labeled only by worker 1. This contradicts the worker_per_input = 2 requirement.
     * 
* * .google.protobuf.Struct weights = 3; */ public Builder mergeWeights(com.google.protobuf.Struct value) { if (weightsBuilder_ == null) { if (weights_ != null) { weights_ = com.google.protobuf.Struct.newBuilder(weights_).mergeFrom(value).buildPartial(); } else { weights_ = value; } onChanged(); } else { weightsBuilder_.mergeFrom(value); } return this; } /** *
     * In case of weighted partitioning, map user ids to weights.
     * Each labeler will be assigned work proportional to its own weight as compared to the sum of total weight.
     * EXAMPLE:
     * If we have 3 workers, and weights = {1: 30, 2: 30, 3: 40},
     * then first worker will have assigned 30% of the work,
     * second worker will have assigned 30% of the work,
     * and third worker will have assigned 40% of the work.
     * You may use weights which add up to 100, but it's not necessary.
     * For example, weights {1: 30, 2: 30, 3: 40} are equivalent with {1: 3, 2: 3, 3: 4}
     * because they represent the same percentages: {1: 30%, 2: 30%, 3: 40%}.
     * NOTE:
     * Note that no worker should be assigned a weight percentage greater than 1/workers_per_input.
     * It is mathematically impossible to partition the work in such a case.
     * Why? Say, we have 3 workers. And workers_per_input = 2, i.e. each input must be labeled by 2 workers.
     * Let's assign weights {1: 51%, 2: 25%, 3: 24%}.
     * Note that first worker has a weight percentage higher than 1/workers_per_input = 1/2 = 50%.
     * If we have 100 inputs, then a total of 100 * workers_per_input = 200 cumulative inputs will be labeled by these 3 workers.
     * Worker 1 should label 102 cumulative inputs, while worker 2 and worker 3 will label 98 cumulative inputs together.
     * No matter how we assign the 98 cumulative inputs, the 2 workers will be able to label up to 98 actual inputs.
     * This means the remaining 2 inputs will be labeled only by worker 1. This contradicts the worker_per_input = 2 requirement.
     * 
* * .google.protobuf.Struct weights = 3; */ public Builder clearWeights() { if (weightsBuilder_ == null) { weights_ = null; onChanged(); } else { weights_ = null; weightsBuilder_ = null; } return this; } /** *
     * In case of weighted partitioning, map user ids to weights.
     * Each labeler will be assigned work proportional to its own weight as compared to the sum of total weight.
     * EXAMPLE:
     * If we have 3 workers, and weights = {1: 30, 2: 30, 3: 40},
     * then first worker will have assigned 30% of the work,
     * second worker will have assigned 30% of the work,
     * and third worker will have assigned 40% of the work.
     * You may use weights which add up to 100, but it's not necessary.
     * For example, weights {1: 30, 2: 30, 3: 40} are equivalent with {1: 3, 2: 3, 3: 4}
     * because they represent the same percentages: {1: 30%, 2: 30%, 3: 40%}.
     * NOTE:
     * Note that no worker should be assigned a weight percentage greater than 1/workers_per_input.
     * It is mathematically impossible to partition the work in such a case.
     * Why? Say, we have 3 workers. And workers_per_input = 2, i.e. each input must be labeled by 2 workers.
     * Let's assign weights {1: 51%, 2: 25%, 3: 24%}.
     * Note that first worker has a weight percentage higher than 1/workers_per_input = 1/2 = 50%.
     * If we have 100 inputs, then a total of 100 * workers_per_input = 200 cumulative inputs will be labeled by these 3 workers.
     * Worker 1 should label 102 cumulative inputs, while worker 2 and worker 3 will label 98 cumulative inputs together.
     * No matter how we assign the 98 cumulative inputs, the 2 workers will be able to label up to 98 actual inputs.
     * This means the remaining 2 inputs will be labeled only by worker 1. This contradicts the worker_per_input = 2 requirement.
     * 
* * .google.protobuf.Struct weights = 3; */ public com.google.protobuf.Struct.Builder getWeightsBuilder() { onChanged(); return getWeightsFieldBuilder().getBuilder(); } /** *
     * In case of weighted partitioning, map user ids to weights.
     * Each labeler will be assigned work proportional to its own weight as compared to the sum of total weight.
     * EXAMPLE:
     * If we have 3 workers, and weights = {1: 30, 2: 30, 3: 40},
     * then first worker will have assigned 30% of the work,
     * second worker will have assigned 30% of the work,
     * and third worker will have assigned 40% of the work.
     * You may use weights which add up to 100, but it's not necessary.
     * For example, weights {1: 30, 2: 30, 3: 40} are equivalent with {1: 3, 2: 3, 3: 4}
     * because they represent the same percentages: {1: 30%, 2: 30%, 3: 40%}.
     * NOTE:
     * Note that no worker should be assigned a weight percentage greater than 1/workers_per_input.
     * It is mathematically impossible to partition the work in such a case.
     * Why? Say, we have 3 workers. And workers_per_input = 2, i.e. each input must be labeled by 2 workers.
     * Let's assign weights {1: 51%, 2: 25%, 3: 24%}.
     * Note that first worker has a weight percentage higher than 1/workers_per_input = 1/2 = 50%.
     * If we have 100 inputs, then a total of 100 * workers_per_input = 200 cumulative inputs will be labeled by these 3 workers.
     * Worker 1 should label 102 cumulative inputs, while worker 2 and worker 3 will label 98 cumulative inputs together.
     * No matter how we assign the 98 cumulative inputs, the 2 workers will be able to label up to 98 actual inputs.
     * This means the remaining 2 inputs will be labeled only by worker 1. This contradicts the worker_per_input = 2 requirement.
     * 
* * .google.protobuf.Struct weights = 3; */ public com.google.protobuf.StructOrBuilder getWeightsOrBuilder() { if (weightsBuilder_ != null) { return weightsBuilder_.getMessageOrBuilder(); } else { return weights_ == null ? com.google.protobuf.Struct.getDefaultInstance() : weights_; } } /** *
     * In case of weighted partitioning, map user ids to weights.
     * Each labeler will be assigned work proportional to its own weight as compared to the sum of total weight.
     * EXAMPLE:
     * If we have 3 workers, and weights = {1: 30, 2: 30, 3: 40},
     * then first worker will have assigned 30% of the work,
     * second worker will have assigned 30% of the work,
     * and third worker will have assigned 40% of the work.
     * You may use weights which add up to 100, but it's not necessary.
     * For example, weights {1: 30, 2: 30, 3: 40} are equivalent with {1: 3, 2: 3, 3: 4}
     * because they represent the same percentages: {1: 30%, 2: 30%, 3: 40%}.
     * NOTE:
     * Note that no worker should be assigned a weight percentage greater than 1/workers_per_input.
     * It is mathematically impossible to partition the work in such a case.
     * Why? Say, we have 3 workers. And workers_per_input = 2, i.e. each input must be labeled by 2 workers.
     * Let's assign weights {1: 51%, 2: 25%, 3: 24%}.
     * Note that first worker has a weight percentage higher than 1/workers_per_input = 1/2 = 50%.
     * If we have 100 inputs, then a total of 100 * workers_per_input = 200 cumulative inputs will be labeled by these 3 workers.
     * Worker 1 should label 102 cumulative inputs, while worker 2 and worker 3 will label 98 cumulative inputs together.
     * No matter how we assign the 98 cumulative inputs, the 2 workers will be able to label up to 98 actual inputs.
     * This means the remaining 2 inputs will be labeled only by worker 1. This contradicts the worker_per_input = 2 requirement.
     * 
* * .google.protobuf.Struct weights = 3; */ private com.google.protobuf.SingleFieldBuilderV3< com.google.protobuf.Struct, com.google.protobuf.Struct.Builder, com.google.protobuf.StructOrBuilder> getWeightsFieldBuilder() { if (weightsBuilder_ == null) { weightsBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< com.google.protobuf.Struct, com.google.protobuf.Struct.Builder, com.google.protobuf.StructOrBuilder>( getWeights(), getParentForChildren(), isClean()); weights_ = null; } return weightsBuilder_; } @java.lang.Override public final Builder setUnknownFields( final com.google.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final com.google.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:clarifai.api.TaskWorkerPartitionedStrategyInfo) } // @@protoc_insertion_point(class_scope:clarifai.api.TaskWorkerPartitionedStrategyInfo) private static final com.clarifai.grpc.api.TaskWorkerPartitionedStrategyInfo DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new com.clarifai.grpc.api.TaskWorkerPartitionedStrategyInfo(); } public static com.clarifai.grpc.api.TaskWorkerPartitionedStrategyInfo getDefaultInstance() { return DEFAULT_INSTANCE; } private static final com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { @java.lang.Override public TaskWorkerPartitionedStrategyInfo parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new TaskWorkerPartitionedStrategyInfo(input, extensionRegistry); } }; public static com.google.protobuf.Parser parser() { return PARSER; } @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public com.clarifai.grpc.api.TaskWorkerPartitionedStrategyInfo getDefaultInstanceForType() { return DEFAULT_INSTANCE; } }




© 2015 - 2024 Weber Informatics LLC | Privacy Policy