org.openqa.selenium.devtools.v90.webaudio.model.BaseAudioContext Maven / Gradle / Ivy
Go to download
Show more of this group Show more artifacts with this name
Show all versions of selenium-devtools-v90 Show documentation
Show all versions of selenium-devtools-v90 Show documentation
Selenium automates browsers. That's it! What you do with that power is entirely up to you.
package org.openqa.selenium.devtools.v90.webaudio.model;
import org.openqa.selenium.Beta;
import org.openqa.selenium.json.JsonInput;
/**
* Protocol object for BaseAudioContext
*/
public class BaseAudioContext {
private final org.openqa.selenium.devtools.v90.webaudio.model.GraphObjectId contextId;
private final org.openqa.selenium.devtools.v90.webaudio.model.ContextType contextType;
private final org.openqa.selenium.devtools.v90.webaudio.model.ContextState contextState;
private final java.util.Optional realtimeData;
private final java.lang.Number callbackBufferSize;
private final java.lang.Number maxOutputChannelCount;
private final java.lang.Number sampleRate;
public BaseAudioContext(org.openqa.selenium.devtools.v90.webaudio.model.GraphObjectId contextId, org.openqa.selenium.devtools.v90.webaudio.model.ContextType contextType, org.openqa.selenium.devtools.v90.webaudio.model.ContextState contextState, java.util.Optional realtimeData, java.lang.Number callbackBufferSize, java.lang.Number maxOutputChannelCount, java.lang.Number sampleRate) {
this.contextId = java.util.Objects.requireNonNull(contextId, "contextId is required");
this.contextType = java.util.Objects.requireNonNull(contextType, "contextType is required");
this.contextState = java.util.Objects.requireNonNull(contextState, "contextState is required");
this.realtimeData = realtimeData;
this.callbackBufferSize = java.util.Objects.requireNonNull(callbackBufferSize, "callbackBufferSize is required");
this.maxOutputChannelCount = java.util.Objects.requireNonNull(maxOutputChannelCount, "maxOutputChannelCount is required");
this.sampleRate = java.util.Objects.requireNonNull(sampleRate, "sampleRate is required");
}
public org.openqa.selenium.devtools.v90.webaudio.model.GraphObjectId getContextId() {
return contextId;
}
public org.openqa.selenium.devtools.v90.webaudio.model.ContextType getContextType() {
return contextType;
}
public org.openqa.selenium.devtools.v90.webaudio.model.ContextState getContextState() {
return contextState;
}
public java.util.Optional getRealtimeData() {
return realtimeData;
}
/**
* Platform-dependent callback buffer size.
*/
public java.lang.Number getCallbackBufferSize() {
return callbackBufferSize;
}
/**
* Number of output channels supported by audio hardware in use.
*/
public java.lang.Number getMaxOutputChannelCount() {
return maxOutputChannelCount;
}
/**
* Context sample rate.
*/
public java.lang.Number getSampleRate() {
return sampleRate;
}
private static BaseAudioContext fromJson(JsonInput input) {
org.openqa.selenium.devtools.v90.webaudio.model.GraphObjectId contextId = null;
org.openqa.selenium.devtools.v90.webaudio.model.ContextType contextType = null;
org.openqa.selenium.devtools.v90.webaudio.model.ContextState contextState = null;
java.util.Optional realtimeData = java.util.Optional.empty();
java.lang.Number callbackBufferSize = 0;
java.lang.Number maxOutputChannelCount = 0;
java.lang.Number sampleRate = 0;
input.beginObject();
while (input.hasNext()) {
switch(input.nextName()) {
case "contextId":
contextId = input.read(org.openqa.selenium.devtools.v90.webaudio.model.GraphObjectId.class);
break;
case "contextType":
contextType = input.read(org.openqa.selenium.devtools.v90.webaudio.model.ContextType.class);
break;
case "contextState":
contextState = input.read(org.openqa.selenium.devtools.v90.webaudio.model.ContextState.class);
break;
case "realtimeData":
realtimeData = java.util.Optional.ofNullable(input.read(org.openqa.selenium.devtools.v90.webaudio.model.ContextRealtimeData.class));
break;
case "callbackBufferSize":
callbackBufferSize = input.nextNumber();
break;
case "maxOutputChannelCount":
maxOutputChannelCount = input.nextNumber();
break;
case "sampleRate":
sampleRate = input.nextNumber();
break;
default:
input.skipValue();
break;
}
}
input.endObject();
return new BaseAudioContext(contextId, contextType, contextState, realtimeData, callbackBufferSize, maxOutputChannelCount, sampleRate);
}
}