All Downloads are FREE. Search and download functionalities are using the official Maven repository.

com.google.cloud.vision.v1p1beta1.FaceAnnotationOrBuilder Maven / Gradle / Ivy

There is a newer version: 0.141.0
Show newest version
/*
 * Copyright 2024 Google LLC
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *     https://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
// Generated by the protocol buffer compiler.  DO NOT EDIT!
// source: google/cloud/vision/v1p1beta1/image_annotator.proto

// Protobuf Java Version: 3.25.5
package com.google.cloud.vision.v1p1beta1;

public interface FaceAnnotationOrBuilder
    extends
    // @@protoc_insertion_point(interface_extends:google.cloud.vision.v1p1beta1.FaceAnnotation)
    com.google.protobuf.MessageOrBuilder {

  /**
   *
   *
   * 
   * The bounding polygon around the face. The coordinates of the bounding box
   * are in the original image's scale, as returned in `ImageParams`.
   * The bounding box is computed to "frame" the face in accordance with human
   * expectations. It is based on the landmarker results.
   * Note that one or more x and/or y coordinates may not be generated in the
   * `BoundingPoly` (the polygon will be unbounded) if only a partial face
   * appears in the image to be annotated.
   * 
* * .google.cloud.vision.v1p1beta1.BoundingPoly bounding_poly = 1; * * @return Whether the boundingPoly field is set. */ boolean hasBoundingPoly(); /** * * *
   * The bounding polygon around the face. The coordinates of the bounding box
   * are in the original image's scale, as returned in `ImageParams`.
   * The bounding box is computed to "frame" the face in accordance with human
   * expectations. It is based on the landmarker results.
   * Note that one or more x and/or y coordinates may not be generated in the
   * `BoundingPoly` (the polygon will be unbounded) if only a partial face
   * appears in the image to be annotated.
   * 
* * .google.cloud.vision.v1p1beta1.BoundingPoly bounding_poly = 1; * * @return The boundingPoly. */ com.google.cloud.vision.v1p1beta1.BoundingPoly getBoundingPoly(); /** * * *
   * The bounding polygon around the face. The coordinates of the bounding box
   * are in the original image's scale, as returned in `ImageParams`.
   * The bounding box is computed to "frame" the face in accordance with human
   * expectations. It is based on the landmarker results.
   * Note that one or more x and/or y coordinates may not be generated in the
   * `BoundingPoly` (the polygon will be unbounded) if only a partial face
   * appears in the image to be annotated.
   * 
* * .google.cloud.vision.v1p1beta1.BoundingPoly bounding_poly = 1; */ com.google.cloud.vision.v1p1beta1.BoundingPolyOrBuilder getBoundingPolyOrBuilder(); /** * * *
   * The `fd_bounding_poly` bounding polygon is tighter than the
   * `boundingPoly`, and encloses only the skin part of the face. Typically, it
   * is used to eliminate the face from any image analysis that detects the
   * "amount of skin" visible in an image. It is not based on the
   * landmarker results, only on the initial face detection, hence
   * the <code>fd</code> (face detection) prefix.
   * 
* * .google.cloud.vision.v1p1beta1.BoundingPoly fd_bounding_poly = 2; * * @return Whether the fdBoundingPoly field is set. */ boolean hasFdBoundingPoly(); /** * * *
   * The `fd_bounding_poly` bounding polygon is tighter than the
   * `boundingPoly`, and encloses only the skin part of the face. Typically, it
   * is used to eliminate the face from any image analysis that detects the
   * "amount of skin" visible in an image. It is not based on the
   * landmarker results, only on the initial face detection, hence
   * the <code>fd</code> (face detection) prefix.
   * 
* * .google.cloud.vision.v1p1beta1.BoundingPoly fd_bounding_poly = 2; * * @return The fdBoundingPoly. */ com.google.cloud.vision.v1p1beta1.BoundingPoly getFdBoundingPoly(); /** * * *
   * The `fd_bounding_poly` bounding polygon is tighter than the
   * `boundingPoly`, and encloses only the skin part of the face. Typically, it
   * is used to eliminate the face from any image analysis that detects the
   * "amount of skin" visible in an image. It is not based on the
   * landmarker results, only on the initial face detection, hence
   * the <code>fd</code> (face detection) prefix.
   * 
* * .google.cloud.vision.v1p1beta1.BoundingPoly fd_bounding_poly = 2; */ com.google.cloud.vision.v1p1beta1.BoundingPolyOrBuilder getFdBoundingPolyOrBuilder(); /** * * *
   * Detected face landmarks.
   * 
* * repeated .google.cloud.vision.v1p1beta1.FaceAnnotation.Landmark landmarks = 3; */ java.util.List getLandmarksList(); /** * * *
   * Detected face landmarks.
   * 
* * repeated .google.cloud.vision.v1p1beta1.FaceAnnotation.Landmark landmarks = 3; */ com.google.cloud.vision.v1p1beta1.FaceAnnotation.Landmark getLandmarks(int index); /** * * *
   * Detected face landmarks.
   * 
* * repeated .google.cloud.vision.v1p1beta1.FaceAnnotation.Landmark landmarks = 3; */ int getLandmarksCount(); /** * * *
   * Detected face landmarks.
   * 
* * repeated .google.cloud.vision.v1p1beta1.FaceAnnotation.Landmark landmarks = 3; */ java.util.List getLandmarksOrBuilderList(); /** * * *
   * Detected face landmarks.
   * 
* * repeated .google.cloud.vision.v1p1beta1.FaceAnnotation.Landmark landmarks = 3; */ com.google.cloud.vision.v1p1beta1.FaceAnnotation.LandmarkOrBuilder getLandmarksOrBuilder( int index); /** * * *
   * Roll angle, which indicates the amount of clockwise/anti-clockwise rotation
   * of the face relative to the image vertical about the axis perpendicular to
   * the face. Range [-180,180].
   * 
* * float roll_angle = 4; * * @return The rollAngle. */ float getRollAngle(); /** * * *
   * Yaw angle, which indicates the leftward/rightward angle that the face is
   * pointing relative to the vertical plane perpendicular to the image. Range
   * [-180,180].
   * 
* * float pan_angle = 5; * * @return The panAngle. */ float getPanAngle(); /** * * *
   * Pitch angle, which indicates the upwards/downwards angle that the face is
   * pointing relative to the image's horizontal plane. Range [-180,180].
   * 
* * float tilt_angle = 6; * * @return The tiltAngle. */ float getTiltAngle(); /** * * *
   * Detection confidence. Range [0, 1].
   * 
* * float detection_confidence = 7; * * @return The detectionConfidence. */ float getDetectionConfidence(); /** * * *
   * Face landmarking confidence. Range [0, 1].
   * 
* * float landmarking_confidence = 8; * * @return The landmarkingConfidence. */ float getLandmarkingConfidence(); /** * * *
   * Joy likelihood.
   * 
* * .google.cloud.vision.v1p1beta1.Likelihood joy_likelihood = 9; * * @return The enum numeric value on the wire for joyLikelihood. */ int getJoyLikelihoodValue(); /** * * *
   * Joy likelihood.
   * 
* * .google.cloud.vision.v1p1beta1.Likelihood joy_likelihood = 9; * * @return The joyLikelihood. */ com.google.cloud.vision.v1p1beta1.Likelihood getJoyLikelihood(); /** * * *
   * Sorrow likelihood.
   * 
* * .google.cloud.vision.v1p1beta1.Likelihood sorrow_likelihood = 10; * * @return The enum numeric value on the wire for sorrowLikelihood. */ int getSorrowLikelihoodValue(); /** * * *
   * Sorrow likelihood.
   * 
* * .google.cloud.vision.v1p1beta1.Likelihood sorrow_likelihood = 10; * * @return The sorrowLikelihood. */ com.google.cloud.vision.v1p1beta1.Likelihood getSorrowLikelihood(); /** * * *
   * Anger likelihood.
   * 
* * .google.cloud.vision.v1p1beta1.Likelihood anger_likelihood = 11; * * @return The enum numeric value on the wire for angerLikelihood. */ int getAngerLikelihoodValue(); /** * * *
   * Anger likelihood.
   * 
* * .google.cloud.vision.v1p1beta1.Likelihood anger_likelihood = 11; * * @return The angerLikelihood. */ com.google.cloud.vision.v1p1beta1.Likelihood getAngerLikelihood(); /** * * *
   * Surprise likelihood.
   * 
* * .google.cloud.vision.v1p1beta1.Likelihood surprise_likelihood = 12; * * @return The enum numeric value on the wire for surpriseLikelihood. */ int getSurpriseLikelihoodValue(); /** * * *
   * Surprise likelihood.
   * 
* * .google.cloud.vision.v1p1beta1.Likelihood surprise_likelihood = 12; * * @return The surpriseLikelihood. */ com.google.cloud.vision.v1p1beta1.Likelihood getSurpriseLikelihood(); /** * * *
   * Under-exposed likelihood.
   * 
* * .google.cloud.vision.v1p1beta1.Likelihood under_exposed_likelihood = 13; * * @return The enum numeric value on the wire for underExposedLikelihood. */ int getUnderExposedLikelihoodValue(); /** * * *
   * Under-exposed likelihood.
   * 
* * .google.cloud.vision.v1p1beta1.Likelihood under_exposed_likelihood = 13; * * @return The underExposedLikelihood. */ com.google.cloud.vision.v1p1beta1.Likelihood getUnderExposedLikelihood(); /** * * *
   * Blurred likelihood.
   * 
* * .google.cloud.vision.v1p1beta1.Likelihood blurred_likelihood = 14; * * @return The enum numeric value on the wire for blurredLikelihood. */ int getBlurredLikelihoodValue(); /** * * *
   * Blurred likelihood.
   * 
* * .google.cloud.vision.v1p1beta1.Likelihood blurred_likelihood = 14; * * @return The blurredLikelihood. */ com.google.cloud.vision.v1p1beta1.Likelihood getBlurredLikelihood(); /** * * *
   * Headwear likelihood.
   * 
* * .google.cloud.vision.v1p1beta1.Likelihood headwear_likelihood = 15; * * @return The enum numeric value on the wire for headwearLikelihood. */ int getHeadwearLikelihoodValue(); /** * * *
   * Headwear likelihood.
   * 
* * .google.cloud.vision.v1p1beta1.Likelihood headwear_likelihood = 15; * * @return The headwearLikelihood. */ com.google.cloud.vision.v1p1beta1.Likelihood getHeadwearLikelihood(); }




© 2015 - 2024 Weber Informatics LLC | Privacy Policy