com.azure.resourcemanager.mediaservices.models.FaceDetectorPreset Maven / Gradle / Ivy
Go to download
Show more of this group Show more artifacts with this name
Show all versions of azure-resourcemanager-mediaservices Show documentation
Show all versions of azure-resourcemanager-mediaservices Show documentation
This package contains Microsoft Azure SDK for MediaServices Management SDK. For documentation on how to use this package, please see https://aka.ms/azsdk/java/mgmt. This Swagger was generated by the API Framework. Package tag package-account-2023-01.
The newest version!
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
// Code generated by Microsoft (R) AutoRest Code Generator.
package com.azure.resourcemanager.mediaservices.models;
import com.azure.core.annotation.Fluent;
import com.azure.json.JsonReader;
import com.azure.json.JsonToken;
import com.azure.json.JsonWriter;
import java.io.IOException;
import java.util.Map;
/**
* Describes all the settings to be used when analyzing a video in order to detect (and optionally redact) all the faces
* present.
*/
@Fluent
public final class FaceDetectorPreset extends Preset {
/*
* The discriminator for derived types.
*/
private String odataType = "#Microsoft.Media.FaceDetectorPreset";
/*
* Specifies the maximum resolution at which your video is analyzed. The default behavior is "SourceResolution,"
* which will keep the input video at its original resolution when analyzed. Using "StandardDefinition" will resize
* input videos to standard definition while preserving the appropriate aspect ratio. It will only resize if the
* video is of higher resolution. For example, a 1920x1080 input would be scaled to 640x360 before processing.
* Switching to "StandardDefinition" will reduce the time it takes to process high resolution video. It may also
* reduce the cost of using this component (see
* https://azure.microsoft.com/en-us/pricing/details/media-services/#analytics for details). However, faces that end
* up being too small in the resized video may not be detected.
*/
private AnalysisResolution resolution;
/*
* This mode provides the ability to choose between the following settings: 1) Analyze - For detection only.This
* mode generates a metadata JSON file marking appearances of faces throughout the video.Where possible, appearances
* of the same person are assigned the same ID. 2) Combined - Additionally redacts(blurs) detected faces. 3) Redact
* - This enables a 2-pass process, allowing for selective redaction of a subset of detected faces.It takes in the
* metadata file from a prior analyze pass, along with the source video, and a user-selected subset of IDs that
* require redaction.
*/
private FaceRedactorMode mode;
/*
* Blur type
*/
private BlurType blurType;
/*
* Dictionary containing key value pairs for parameters not exposed in the preset itself
*/
private Map experimentalOptions;
/**
* Creates an instance of FaceDetectorPreset class.
*/
public FaceDetectorPreset() {
}
/**
* Get the odataType property: The discriminator for derived types.
*
* @return the odataType value.
*/
@Override
public String odataType() {
return this.odataType;
}
/**
* Get the resolution property: Specifies the maximum resolution at which your video is analyzed. The default
* behavior is "SourceResolution," which will keep the input video at its original resolution when analyzed. Using
* "StandardDefinition" will resize input videos to standard definition while preserving the appropriate aspect
* ratio. It will only resize if the video is of higher resolution. For example, a 1920x1080 input would be scaled
* to 640x360 before processing. Switching to "StandardDefinition" will reduce the time it takes to process high
* resolution video. It may also reduce the cost of using this component (see
* https://azure.microsoft.com/en-us/pricing/details/media-services/#analytics for details). However, faces that end
* up being too small in the resized video may not be detected.
*
* @return the resolution value.
*/
public AnalysisResolution resolution() {
return this.resolution;
}
/**
* Set the resolution property: Specifies the maximum resolution at which your video is analyzed. The default
* behavior is "SourceResolution," which will keep the input video at its original resolution when analyzed. Using
* "StandardDefinition" will resize input videos to standard definition while preserving the appropriate aspect
* ratio. It will only resize if the video is of higher resolution. For example, a 1920x1080 input would be scaled
* to 640x360 before processing. Switching to "StandardDefinition" will reduce the time it takes to process high
* resolution video. It may also reduce the cost of using this component (see
* https://azure.microsoft.com/en-us/pricing/details/media-services/#analytics for details). However, faces that end
* up being too small in the resized video may not be detected.
*
* @param resolution the resolution value to set.
* @return the FaceDetectorPreset object itself.
*/
public FaceDetectorPreset withResolution(AnalysisResolution resolution) {
this.resolution = resolution;
return this;
}
/**
* Get the mode property: This mode provides the ability to choose between the following settings: 1) Analyze - For
* detection only.This mode generates a metadata JSON file marking appearances of faces throughout the video.Where
* possible, appearances of the same person are assigned the same ID. 2) Combined - Additionally redacts(blurs)
* detected faces. 3) Redact - This enables a 2-pass process, allowing for selective redaction of a subset of
* detected faces.It takes in the metadata file from a prior analyze pass, along with the source video, and a
* user-selected subset of IDs that require redaction.
*
* @return the mode value.
*/
public FaceRedactorMode mode() {
return this.mode;
}
/**
* Set the mode property: This mode provides the ability to choose between the following settings: 1) Analyze - For
* detection only.This mode generates a metadata JSON file marking appearances of faces throughout the video.Where
* possible, appearances of the same person are assigned the same ID. 2) Combined - Additionally redacts(blurs)
* detected faces. 3) Redact - This enables a 2-pass process, allowing for selective redaction of a subset of
* detected faces.It takes in the metadata file from a prior analyze pass, along with the source video, and a
* user-selected subset of IDs that require redaction.
*
* @param mode the mode value to set.
* @return the FaceDetectorPreset object itself.
*/
public FaceDetectorPreset withMode(FaceRedactorMode mode) {
this.mode = mode;
return this;
}
/**
* Get the blurType property: Blur type.
*
* @return the blurType value.
*/
public BlurType blurType() {
return this.blurType;
}
/**
* Set the blurType property: Blur type.
*
* @param blurType the blurType value to set.
* @return the FaceDetectorPreset object itself.
*/
public FaceDetectorPreset withBlurType(BlurType blurType) {
this.blurType = blurType;
return this;
}
/**
* Get the experimentalOptions property: Dictionary containing key value pairs for parameters not exposed in the
* preset itself.
*
* @return the experimentalOptions value.
*/
public Map experimentalOptions() {
return this.experimentalOptions;
}
/**
* Set the experimentalOptions property: Dictionary containing key value pairs for parameters not exposed in the
* preset itself.
*
* @param experimentalOptions the experimentalOptions value to set.
* @return the FaceDetectorPreset object itself.
*/
public FaceDetectorPreset withExperimentalOptions(Map experimentalOptions) {
this.experimentalOptions = experimentalOptions;
return this;
}
/**
* Validates the instance.
*
* @throws IllegalArgumentException thrown if the instance is not valid.
*/
@Override
public void validate() {
}
/**
* {@inheritDoc}
*/
@Override
public JsonWriter toJson(JsonWriter jsonWriter) throws IOException {
jsonWriter.writeStartObject();
jsonWriter.writeStringField("@odata.type", this.odataType);
jsonWriter.writeStringField("resolution", this.resolution == null ? null : this.resolution.toString());
jsonWriter.writeStringField("mode", this.mode == null ? null : this.mode.toString());
jsonWriter.writeStringField("blurType", this.blurType == null ? null : this.blurType.toString());
jsonWriter.writeMapField("experimentalOptions", this.experimentalOptions,
(writer, element) -> writer.writeString(element));
return jsonWriter.writeEndObject();
}
/**
* Reads an instance of FaceDetectorPreset from the JsonReader.
*
* @param jsonReader The JsonReader being read.
* @return An instance of FaceDetectorPreset if the JsonReader was pointing to an instance of it, or null if it was
* pointing to JSON null.
* @throws IOException If an error occurs while reading the FaceDetectorPreset.
*/
public static FaceDetectorPreset fromJson(JsonReader jsonReader) throws IOException {
return jsonReader.readObject(reader -> {
FaceDetectorPreset deserializedFaceDetectorPreset = new FaceDetectorPreset();
while (reader.nextToken() != JsonToken.END_OBJECT) {
String fieldName = reader.getFieldName();
reader.nextToken();
if ("@odata.type".equals(fieldName)) {
deserializedFaceDetectorPreset.odataType = reader.getString();
} else if ("resolution".equals(fieldName)) {
deserializedFaceDetectorPreset.resolution = AnalysisResolution.fromString(reader.getString());
} else if ("mode".equals(fieldName)) {
deserializedFaceDetectorPreset.mode = FaceRedactorMode.fromString(reader.getString());
} else if ("blurType".equals(fieldName)) {
deserializedFaceDetectorPreset.blurType = BlurType.fromString(reader.getString());
} else if ("experimentalOptions".equals(fieldName)) {
Map experimentalOptions = reader.readMap(reader1 -> reader1.getString());
deserializedFaceDetectorPreset.experimentalOptions = experimentalOptions;
} else {
reader.skipChildren();
}
}
return deserializedFaceDetectorPreset;
});
}
}
© 2015 - 2024 Weber Informatics LLC | Privacy Policy