
com.google.api.services.dataproc.model.SparkStandaloneAutoscalingConfig Maven / Gradle / Ivy
/*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
/*
* This code was generated by https://github.com/googleapis/google-api-java-client-services/
* Modify at your own risk.
*/
package com.google.api.services.dataproc.model;
/**
* Basic autoscaling configurations for Spark Standalone.
*
* This is the Java data model class that specifies how to parse/serialize into the JSON that is
* transmitted over HTTP when working with the Cloud Dataproc API. For a detailed explanation see:
* https://developers.google.com/api-client-library/java/google-http-java-client/json
*
*
* @author Google, Inc.
*/
@SuppressWarnings("javadoc")
public final class SparkStandaloneAutoscalingConfig extends com.google.api.client.json.GenericJson {
/**
* Required. Timeout for Spark graceful decommissioning of spark workers. Specifies the duration
* to wait for spark worker to complete spark decommissioning tasks before forcefully removing
* workers. Only applicable to downscaling operations.Bounds: 0s, 1d.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private String gracefulDecommissionTimeout;
/**
* Optional. Remove only idle workers when scaling down cluster
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.lang.Boolean removeOnlyIdleWorkers;
/**
* Required. Fraction of required executors to remove from Spark Serverless clusters. A scale-down
* factor of 1.0 will result in scaling down so that there are no more executors for the Spark
* Job.(more aggressive scaling). A scale-down factor closer to 0 will result in a smaller
* magnitude of scaling donw (less aggressive scaling).Bounds: 0.0, 1.0.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.lang.Double scaleDownFactor;
/**
* Optional. Minimum scale-down threshold as a fraction of total cluster size before scaling
* occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must
* recommend at least a 2 worker scale-down for the cluster to scale. A threshold of 0 means the
* autoscaler will scale down on any recommended change.Bounds: 0.0, 1.0. Default: 0.0.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.lang.Double scaleDownMinWorkerFraction;
/**
* Required. Fraction of required workers to add to Spark Standalone clusters. A scale-up factor
* of 1.0 will result in scaling up so that there are no more required workers for the Spark Job
* (more aggressive scaling). A scale-up factor closer to 0 will result in a smaller magnitude of
* scaling up (less aggressive scaling).Bounds: 0.0, 1.0.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.lang.Double scaleUpFactor;
/**
* Optional. Minimum scale-up threshold as a fraction of total cluster size before scaling occurs.
* For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at
* least a 2-worker scale-up for the cluster to scale. A threshold of 0 means the autoscaler will
* scale up on any recommended change.Bounds: 0.0, 1.0. Default: 0.0.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.lang.Double scaleUpMinWorkerFraction;
/**
* Required. Timeout for Spark graceful decommissioning of spark workers. Specifies the duration
* to wait for spark worker to complete spark decommissioning tasks before forcefully removing
* workers. Only applicable to downscaling operations.Bounds: 0s, 1d.
* @return value or {@code null} for none
*/
public String getGracefulDecommissionTimeout() {
return gracefulDecommissionTimeout;
}
/**
* Required. Timeout for Spark graceful decommissioning of spark workers. Specifies the duration
* to wait for spark worker to complete spark decommissioning tasks before forcefully removing
* workers. Only applicable to downscaling operations.Bounds: 0s, 1d.
* @param gracefulDecommissionTimeout gracefulDecommissionTimeout or {@code null} for none
*/
public SparkStandaloneAutoscalingConfig setGracefulDecommissionTimeout(String gracefulDecommissionTimeout) {
this.gracefulDecommissionTimeout = gracefulDecommissionTimeout;
return this;
}
/**
* Optional. Remove only idle workers when scaling down cluster
* @return value or {@code null} for none
*/
public java.lang.Boolean getRemoveOnlyIdleWorkers() {
return removeOnlyIdleWorkers;
}
/**
* Optional. Remove only idle workers when scaling down cluster
* @param removeOnlyIdleWorkers removeOnlyIdleWorkers or {@code null} for none
*/
public SparkStandaloneAutoscalingConfig setRemoveOnlyIdleWorkers(java.lang.Boolean removeOnlyIdleWorkers) {
this.removeOnlyIdleWorkers = removeOnlyIdleWorkers;
return this;
}
/**
* Required. Fraction of required executors to remove from Spark Serverless clusters. A scale-down
* factor of 1.0 will result in scaling down so that there are no more executors for the Spark
* Job.(more aggressive scaling). A scale-down factor closer to 0 will result in a smaller
* magnitude of scaling donw (less aggressive scaling).Bounds: 0.0, 1.0.
* @return value or {@code null} for none
*/
public java.lang.Double getScaleDownFactor() {
return scaleDownFactor;
}
/**
* Required. Fraction of required executors to remove from Spark Serverless clusters. A scale-down
* factor of 1.0 will result in scaling down so that there are no more executors for the Spark
* Job.(more aggressive scaling). A scale-down factor closer to 0 will result in a smaller
* magnitude of scaling donw (less aggressive scaling).Bounds: 0.0, 1.0.
* @param scaleDownFactor scaleDownFactor or {@code null} for none
*/
public SparkStandaloneAutoscalingConfig setScaleDownFactor(java.lang.Double scaleDownFactor) {
this.scaleDownFactor = scaleDownFactor;
return this;
}
/**
* Optional. Minimum scale-down threshold as a fraction of total cluster size before scaling
* occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must
* recommend at least a 2 worker scale-down for the cluster to scale. A threshold of 0 means the
* autoscaler will scale down on any recommended change.Bounds: 0.0, 1.0. Default: 0.0.
* @return value or {@code null} for none
*/
public java.lang.Double getScaleDownMinWorkerFraction() {
return scaleDownMinWorkerFraction;
}
/**
* Optional. Minimum scale-down threshold as a fraction of total cluster size before scaling
* occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must
* recommend at least a 2 worker scale-down for the cluster to scale. A threshold of 0 means the
* autoscaler will scale down on any recommended change.Bounds: 0.0, 1.0. Default: 0.0.
* @param scaleDownMinWorkerFraction scaleDownMinWorkerFraction or {@code null} for none
*/
public SparkStandaloneAutoscalingConfig setScaleDownMinWorkerFraction(java.lang.Double scaleDownMinWorkerFraction) {
this.scaleDownMinWorkerFraction = scaleDownMinWorkerFraction;
return this;
}
/**
* Required. Fraction of required workers to add to Spark Standalone clusters. A scale-up factor
* of 1.0 will result in scaling up so that there are no more required workers for the Spark Job
* (more aggressive scaling). A scale-up factor closer to 0 will result in a smaller magnitude of
* scaling up (less aggressive scaling).Bounds: 0.0, 1.0.
* @return value or {@code null} for none
*/
public java.lang.Double getScaleUpFactor() {
return scaleUpFactor;
}
/**
* Required. Fraction of required workers to add to Spark Standalone clusters. A scale-up factor
* of 1.0 will result in scaling up so that there are no more required workers for the Spark Job
* (more aggressive scaling). A scale-up factor closer to 0 will result in a smaller magnitude of
* scaling up (less aggressive scaling).Bounds: 0.0, 1.0.
* @param scaleUpFactor scaleUpFactor or {@code null} for none
*/
public SparkStandaloneAutoscalingConfig setScaleUpFactor(java.lang.Double scaleUpFactor) {
this.scaleUpFactor = scaleUpFactor;
return this;
}
/**
* Optional. Minimum scale-up threshold as a fraction of total cluster size before scaling occurs.
* For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at
* least a 2-worker scale-up for the cluster to scale. A threshold of 0 means the autoscaler will
* scale up on any recommended change.Bounds: 0.0, 1.0. Default: 0.0.
* @return value or {@code null} for none
*/
public java.lang.Double getScaleUpMinWorkerFraction() {
return scaleUpMinWorkerFraction;
}
/**
* Optional. Minimum scale-up threshold as a fraction of total cluster size before scaling occurs.
* For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at
* least a 2-worker scale-up for the cluster to scale. A threshold of 0 means the autoscaler will
* scale up on any recommended change.Bounds: 0.0, 1.0. Default: 0.0.
* @param scaleUpMinWorkerFraction scaleUpMinWorkerFraction or {@code null} for none
*/
public SparkStandaloneAutoscalingConfig setScaleUpMinWorkerFraction(java.lang.Double scaleUpMinWorkerFraction) {
this.scaleUpMinWorkerFraction = scaleUpMinWorkerFraction;
return this;
}
@Override
public SparkStandaloneAutoscalingConfig set(String fieldName, Object value) {
return (SparkStandaloneAutoscalingConfig) super.set(fieldName, value);
}
@Override
public SparkStandaloneAutoscalingConfig clone() {
return (SparkStandaloneAutoscalingConfig) super.clone();
}
}
© 2015 - 2025 Weber Informatics LLC | Privacy Policy