All Downloads are FREE. Search and download functionalities are using the official Maven repository.

com.google.cloud.vision.v1.FaceAnnotationOrBuilder Maven / Gradle / Ivy

There is a newer version: 3.47.0
Show newest version
// Generated by the protocol buffer compiler.  DO NOT EDIT!
// source: google/cloud/vision/v1/image_annotator.proto

package com.google.cloud.vision.v1;

public interface FaceAnnotationOrBuilder extends
    // @@protoc_insertion_point(interface_extends:google.cloud.vision.v1.FaceAnnotation)
    com.google.protobuf.MessageOrBuilder {

  /**
   * 
   * The bounding polygon around the face. The coordinates of the bounding box
   * are in the original image's scale, as returned in `ImageParams`.
   * The bounding box is computed to "frame" the face in accordance with human
   * expectations. It is based on the landmarker results.
   * Note that one or more x and/or y coordinates may not be generated in the
   * `BoundingPoly` (the polygon will be unbounded) if only a partial face
   * appears in the image to be annotated.
   * 
* * .google.cloud.vision.v1.BoundingPoly bounding_poly = 1; */ boolean hasBoundingPoly(); /** *
   * The bounding polygon around the face. The coordinates of the bounding box
   * are in the original image's scale, as returned in `ImageParams`.
   * The bounding box is computed to "frame" the face in accordance with human
   * expectations. It is based on the landmarker results.
   * Note that one or more x and/or y coordinates may not be generated in the
   * `BoundingPoly` (the polygon will be unbounded) if only a partial face
   * appears in the image to be annotated.
   * 
* * .google.cloud.vision.v1.BoundingPoly bounding_poly = 1; */ com.google.cloud.vision.v1.BoundingPoly getBoundingPoly(); /** *
   * The bounding polygon around the face. The coordinates of the bounding box
   * are in the original image's scale, as returned in `ImageParams`.
   * The bounding box is computed to "frame" the face in accordance with human
   * expectations. It is based on the landmarker results.
   * Note that one or more x and/or y coordinates may not be generated in the
   * `BoundingPoly` (the polygon will be unbounded) if only a partial face
   * appears in the image to be annotated.
   * 
* * .google.cloud.vision.v1.BoundingPoly bounding_poly = 1; */ com.google.cloud.vision.v1.BoundingPolyOrBuilder getBoundingPolyOrBuilder(); /** *
   * The `fd_bounding_poly` bounding polygon is tighter than the
   * `boundingPoly`, and encloses only the skin part of the face. Typically, it
   * is used to eliminate the face from any image analysis that detects the
   * "amount of skin" visible in an image. It is not based on the
   * landmarker results, only on the initial face detection, hence
   * the <code>fd</code> (face detection) prefix.
   * 
* * .google.cloud.vision.v1.BoundingPoly fd_bounding_poly = 2; */ boolean hasFdBoundingPoly(); /** *
   * The `fd_bounding_poly` bounding polygon is tighter than the
   * `boundingPoly`, and encloses only the skin part of the face. Typically, it
   * is used to eliminate the face from any image analysis that detects the
   * "amount of skin" visible in an image. It is not based on the
   * landmarker results, only on the initial face detection, hence
   * the <code>fd</code> (face detection) prefix.
   * 
* * .google.cloud.vision.v1.BoundingPoly fd_bounding_poly = 2; */ com.google.cloud.vision.v1.BoundingPoly getFdBoundingPoly(); /** *
   * The `fd_bounding_poly` bounding polygon is tighter than the
   * `boundingPoly`, and encloses only the skin part of the face. Typically, it
   * is used to eliminate the face from any image analysis that detects the
   * "amount of skin" visible in an image. It is not based on the
   * landmarker results, only on the initial face detection, hence
   * the <code>fd</code> (face detection) prefix.
   * 
* * .google.cloud.vision.v1.BoundingPoly fd_bounding_poly = 2; */ com.google.cloud.vision.v1.BoundingPolyOrBuilder getFdBoundingPolyOrBuilder(); /** *
   * Detected face landmarks.
   * 
* * repeated .google.cloud.vision.v1.FaceAnnotation.Landmark landmarks = 3; */ java.util.List getLandmarksList(); /** *
   * Detected face landmarks.
   * 
* * repeated .google.cloud.vision.v1.FaceAnnotation.Landmark landmarks = 3; */ com.google.cloud.vision.v1.FaceAnnotation.Landmark getLandmarks(int index); /** *
   * Detected face landmarks.
   * 
* * repeated .google.cloud.vision.v1.FaceAnnotation.Landmark landmarks = 3; */ int getLandmarksCount(); /** *
   * Detected face landmarks.
   * 
* * repeated .google.cloud.vision.v1.FaceAnnotation.Landmark landmarks = 3; */ java.util.List getLandmarksOrBuilderList(); /** *
   * Detected face landmarks.
   * 
* * repeated .google.cloud.vision.v1.FaceAnnotation.Landmark landmarks = 3; */ com.google.cloud.vision.v1.FaceAnnotation.LandmarkOrBuilder getLandmarksOrBuilder( int index); /** *
   * Roll angle, which indicates the amount of clockwise/anti-clockwise rotation
   * of the face relative to the image vertical about the axis perpendicular to
   * the face. Range [-180,180].
   * 
* * float roll_angle = 4; */ float getRollAngle(); /** *
   * Yaw angle, which indicates the leftward/rightward angle that the face is
   * pointing relative to the vertical plane perpendicular to the image. Range
   * [-180,180].
   * 
* * float pan_angle = 5; */ float getPanAngle(); /** *
   * Pitch angle, which indicates the upwards/downwards angle that the face is
   * pointing relative to the image's horizontal plane. Range [-180,180].
   * 
* * float tilt_angle = 6; */ float getTiltAngle(); /** *
   * Detection confidence. Range [0, 1].
   * 
* * float detection_confidence = 7; */ float getDetectionConfidence(); /** *
   * Face landmarking confidence. Range [0, 1].
   * 
* * float landmarking_confidence = 8; */ float getLandmarkingConfidence(); /** *
   * Joy likelihood.
   * 
* * .google.cloud.vision.v1.Likelihood joy_likelihood = 9; */ int getJoyLikelihoodValue(); /** *
   * Joy likelihood.
   * 
* * .google.cloud.vision.v1.Likelihood joy_likelihood = 9; */ com.google.cloud.vision.v1.Likelihood getJoyLikelihood(); /** *
   * Sorrow likelihood.
   * 
* * .google.cloud.vision.v1.Likelihood sorrow_likelihood = 10; */ int getSorrowLikelihoodValue(); /** *
   * Sorrow likelihood.
   * 
* * .google.cloud.vision.v1.Likelihood sorrow_likelihood = 10; */ com.google.cloud.vision.v1.Likelihood getSorrowLikelihood(); /** *
   * Anger likelihood.
   * 
* * .google.cloud.vision.v1.Likelihood anger_likelihood = 11; */ int getAngerLikelihoodValue(); /** *
   * Anger likelihood.
   * 
* * .google.cloud.vision.v1.Likelihood anger_likelihood = 11; */ com.google.cloud.vision.v1.Likelihood getAngerLikelihood(); /** *
   * Surprise likelihood.
   * 
* * .google.cloud.vision.v1.Likelihood surprise_likelihood = 12; */ int getSurpriseLikelihoodValue(); /** *
   * Surprise likelihood.
   * 
* * .google.cloud.vision.v1.Likelihood surprise_likelihood = 12; */ com.google.cloud.vision.v1.Likelihood getSurpriseLikelihood(); /** *
   * Under-exposed likelihood.
   * 
* * .google.cloud.vision.v1.Likelihood under_exposed_likelihood = 13; */ int getUnderExposedLikelihoodValue(); /** *
   * Under-exposed likelihood.
   * 
* * .google.cloud.vision.v1.Likelihood under_exposed_likelihood = 13; */ com.google.cloud.vision.v1.Likelihood getUnderExposedLikelihood(); /** *
   * Blurred likelihood.
   * 
* * .google.cloud.vision.v1.Likelihood blurred_likelihood = 14; */ int getBlurredLikelihoodValue(); /** *
   * Blurred likelihood.
   * 
* * .google.cloud.vision.v1.Likelihood blurred_likelihood = 14; */ com.google.cloud.vision.v1.Likelihood getBlurredLikelihood(); /** *
   * Headwear likelihood.
   * 
* * .google.cloud.vision.v1.Likelihood headwear_likelihood = 15; */ int getHeadwearLikelihoodValue(); /** *
   * Headwear likelihood.
   * 
* * .google.cloud.vision.v1.Likelihood headwear_likelihood = 15; */ com.google.cloud.vision.v1.Likelihood getHeadwearLikelihood(); }




© 2015 - 2024 Weber Informatics LLC | Privacy Policy