All Downloads are FREE. Search and download functionalities are using the official Maven repository.

google.cloud.documentai.v1beta3.evaluation.proto Maven / Gradle / Ivy

There is a newer version: 0.71.0
Show newest version
// Copyright 2023 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

syntax = "proto3";

package google.cloud.documentai.v1beta3;

import "google/api/resource.proto";
import "google/protobuf/timestamp.proto";

option csharp_namespace = "Google.Cloud.DocumentAI.V1Beta3";
option go_package = "cloud.google.com/go/documentai/apiv1beta3/documentaipb;documentaipb";
option java_multiple_files = true;
option java_outer_classname = "DocumentAiEvaluation";
option java_package = "com.google.cloud.documentai.v1beta3";
option php_namespace = "Google\\Cloud\\DocumentAI\\V1beta3";
option ruby_package = "Google::Cloud::DocumentAI::V1beta3";

// Gives a short summary of an evaluation, and links to the evaluation itself.
message EvaluationReference {
  // The resource name of the Long Running Operation for the evaluation.
  string operation = 1;

  // The resource name of the evaluation.
  string evaluation = 2 [(google.api.resource_reference) = {
    type: "documentai.googleapis.com/Evaluation"
  }];

  // An aggregate of the statistics for the evaluation with fuzzy matching on.
  Evaluation.Metrics aggregate_metrics = 4;

  // An aggregate of the statistics for the evaluation with fuzzy matching off.
  Evaluation.Metrics aggregate_metrics_exact = 5;
}

// An evaluation of a ProcessorVersion's performance.
message Evaluation {
  option (google.api.resource) = {
    type: "documentai.googleapis.com/Evaluation"
    pattern: "projects/{project}/locations/{location}/processors/{processor}/processorVersions/{processor_version}/evaluations/{evaluation}"
  };

  // Evaluation counters for the documents that were used.
  message Counters {
    // How many documents were sent for evaluation.
    int32 input_documents_count = 1;

    // How many documents were not included in the evaluation as they didn't
    // pass validation.
    int32 invalid_documents_count = 2;

    // How many documents were not included in the evaluation as Document AI
    // failed to process them.
    int32 failed_documents_count = 3;

    // How many documents were used in the evaluation.
    int32 evaluated_documents_count = 4;
  }

  // Evaluation metrics, either in aggregate or about a specific entity.
  message Metrics {
    // The calculated precision.
    float precision = 1;

    // The calculated recall.
    float recall = 2;

    // The calculated f1 score.
    float f1_score = 3;

    // The amount of occurrences in predicted documents.
    int32 predicted_occurrences_count = 4;

    // The amount of occurrences in ground truth documents.
    int32 ground_truth_occurrences_count = 5;

    // The amount of documents with a predicted occurrence.
    int32 predicted_document_count = 10;

    // The amount of documents with a ground truth occurrence.
    int32 ground_truth_document_count = 11;

    // The amount of true positives.
    int32 true_positives_count = 6;

    // The amount of false positives.
    int32 false_positives_count = 7;

    // The amount of false negatives.
    int32 false_negatives_count = 8;

    // The amount of documents that had an occurrence of this label.
    int32 total_documents_count = 9;
  }

  // Evaluations metrics, at a specific confidence level.
  message ConfidenceLevelMetrics {
    // The confidence level.
    float confidence_level = 1;

    // The metrics at the specific confidence level.
    Metrics metrics = 2;
  }

  // Metrics across multiple confidence levels.
  message MultiConfidenceMetrics {
    // A type that determines how metrics should be interpreted.
    enum MetricsType {
      // The metrics type is unspecified. By default, metrics without a
      // particular specification are for leaf entity types (i.e., top-level
      // entity types without child types, or child types which are not
      // parent types themselves).
      METRICS_TYPE_UNSPECIFIED = 0;

      // Indicates whether metrics for this particular label type represent an
      // aggregate of metrics for other types instead of being based on actual
      // TP/FP/FN values for the label type. Metrics for parent (i.e., non-leaf)
      // entity types are an aggregate of metrics for their children.
      AGGREGATE = 1;
    }

    // Metrics across confidence levels with fuzzy matching enabled.
    repeated ConfidenceLevelMetrics confidence_level_metrics = 1;

    // Metrics across confidence levels with only exact matching.
    repeated ConfidenceLevelMetrics confidence_level_metrics_exact = 4;

    // The calculated area under the precision recall curve (AUPRC), computed by
    // integrating over all confidence thresholds.
    float auprc = 2;

    // The Estimated Calibration Error (ECE) of the confidence of the predicted
    // entities.
    float estimated_calibration_error = 3;

    // The AUPRC for metrics with fuzzy matching disabled, i.e., exact matching
    // only.
    float auprc_exact = 5;

    // The ECE for the predicted entities with fuzzy matching disabled, i.e.,
    // exact matching only.
    float estimated_calibration_error_exact = 6;

    // The metrics type for the label.
    MetricsType metrics_type = 7;
  }

  // The resource name of the evaluation.
  // Format:
  // `projects/{project}/locations/{location}/processors/{processor}/processorVersions/{processor_version}/evaluations/{evaluation}`
  string name = 1;

  // The time that the evaluation was created.
  google.protobuf.Timestamp create_time = 2;

  // Counters for the documents used in the evaluation.
  Counters document_counters = 5;

  // Metrics for all the entities in aggregate.
  MultiConfidenceMetrics all_entities_metrics = 3;

  // Metrics across confidence levels, for different entities.
  map entity_metrics = 4;

  // The KMS key name used for encryption.
  string kms_key_name = 6;

  // The KMS key version with which data is encrypted.
  string kms_key_version_name = 7;
}




© 2015 - 2025 Weber Informatics LLC | Privacy Policy