All Downloads are FREE. Search and download functionalities are using the official Maven repository.

io.humble.video.MediaAudioResampler Maven / Gradle / Ivy

Go to download

This is the main Humble Video Java library. It contains no native code, but all Java runtime code. It must be paired up with the correct humble-video-arch-*.jar library for your OS. For most users, depending on humble-video-all will work better.

There is a newer version: 0.3.0
Show newest version
/* ----------------------------------------------------------------------------
 * This file was automatically generated by SWIG (http://www.swig.org).
 * Version 2.0.6
 *
 * Do not make changes to this file unless you know what you are doing--modify
 * the SWIG interface file instead.
 * ----------------------------------------------------------------------------- */

package io.humble.video;
import io.humble.ferry.*;
/**
 * A MediaAudioResampler object resamples MediaAudio objects from
* one format/sample-rate/channel-layout to another. */ public class MediaAudioResampler extends MediaResampler { // JNIHelper.swg: Start generated code // >>>>>>>>>>>>>>>>>>>>>>>>>>> /** * This method is only here to use some references and remove * a Eclipse compiler warning. */ @SuppressWarnings("unused") private void noop() { Buffer.make(null, 1); } private volatile long swigCPtr; /** * Internal Only. */ protected MediaAudioResampler(long cPtr, boolean cMemoryOwn) { super(VideoJNI.MediaAudioResampler_SWIGUpcast(cPtr), cMemoryOwn); swigCPtr = cPtr; } /** * Internal Only. */ protected MediaAudioResampler(long cPtr, boolean cMemoryOwn, java.util.concurrent.atomic.AtomicLong ref) { super(VideoJNI.MediaAudioResampler_SWIGUpcast(cPtr), cMemoryOwn, ref); swigCPtr = cPtr; } /** * Internal Only. Not part of public API. * * Get the raw value of the native object that obj is proxying for. * * @param obj The java proxy object for a native object. * @return The raw pointer obj is proxying for. */ protected static long getCPtr(MediaAudioResampler obj) { if (obj == null) return 0; return obj.getMyCPtr(); } /** * Internal Only. Not part of public API. * * Get the raw value of the native object that we're proxying for. * * @return The raw pointer we're proxying for. */ protected long getMyCPtr() { if (swigCPtr == 0) throw new IllegalStateException("underlying native object already deleted"); return swigCPtr; } /** * Create a new MediaAudioResampler object that is actually referring to the * exact same underlying native object. * * @return the new Java object. */ @Override public MediaAudioResampler copyReference() { if (swigCPtr == 0) return null; else return new MediaAudioResampler(swigCPtr, swigCMemOwn, getJavaRefCount()); } /** * Compares two values, returning true if the underlying objects in native code are the same object. * * That means you can have two different Java objects, but when you do a comparison, you'll find out * they are the EXACT same object. * * @return True if the underlying native object is the same. False otherwise. */ public boolean equals(Object obj) { boolean equal = false; if (obj instanceof MediaAudioResampler) equal = (((MediaAudioResampler)obj).swigCPtr == this.swigCPtr); return equal; } /** * Get a hashable value for this object. * * @return the hashable value. */ public int hashCode() { return (int)swigCPtr; } // <<<<<<<<<<<<<<<<<<<<<<<<<<< // JNIHelper.swg: End generated code /** * Create a new MediaAudioResampler. */ public static MediaAudioResampler make(AudioChannel.Layout outLayout, int outSampleRate, AudioFormat.Type outFormat, AudioChannel.Layout inLayout, int inSampleRate, AudioFormat.Type inFormat) { long cPtr = VideoJNI.MediaAudioResampler_make(outLayout.swigValue(), outSampleRate, outFormat.swigValue(), inLayout.swigValue(), inSampleRate, inFormat.swigValue()); return (cPtr == 0) ? null : new MediaAudioResampler(cPtr, false); } /** * Get output channel layout. */ public AudioChannel.Layout getOutputLayout() { return AudioChannel.Layout.swigToEnum(VideoJNI.MediaAudioResampler_getOutputLayout(swigCPtr, this)); } /** * Get input channel layout. */ public AudioChannel.Layout getInputLayout() { return AudioChannel.Layout.swigToEnum(VideoJNI.MediaAudioResampler_getInputLayout(swigCPtr, this)); } /** * Get output sample rate. */ public int getOutputSampleRate() { return VideoJNI.MediaAudioResampler_getOutputSampleRate(swigCPtr, this); } /** * Get input sample rate. */ public int getInputSampleRate() { return VideoJNI.MediaAudioResampler_getInputSampleRate(swigCPtr, this); } /** * Get output audio format. */ public AudioFormat.Type getOutputFormat() { return AudioFormat.Type.swigToEnum(VideoJNI.MediaAudioResampler_getOutputFormat(swigCPtr, this)); } /** * Get input audio format. */ public AudioFormat.Type getInputFormat() { return AudioFormat.Type.swigToEnum(VideoJNI.MediaAudioResampler_getInputFormat(swigCPtr, this)); } /** * Get number of input channels (derived from #getInputLayout()). */ public int getInputChannels() { return VideoJNI.MediaAudioResampler_getInputChannels(swigCPtr, this); } /** * Get number of output channels (derived from #getOutputLayout()). */ public int getOutputChannels() { return VideoJNI.MediaAudioResampler_getOutputChannels(swigCPtr, this); } /** * Opens the resampler so it can be ready for resampling.
* You should NOT set options after you open this object. */ public void open() { VideoJNI.MediaAudioResampler_open(swigCPtr, this); } /** * Convert audio.
*
* in can be set to null to flush the last few samples out at the
* end.
*
* If more input is provided than output space then the input will be buffered.
* You can avoid this buffering by providing more output space than input.
* Conversion will run directly without copying whenever possible.
*
* @param out output audio object. caller is responsible for making the correct size.
* @param in input audio.
*
* @return number of samples output per channel.
* @throws RuntimeError if we get an error or InvalidArgument if the attributes of
* in or out do not match what this resampler expected. */ public int resample(MediaSampled out, MediaSampled in) { return VideoJNI.MediaAudioResampler_resample(swigCPtr, this, MediaSampled.getCPtr(out), out, MediaSampled.getCPtr(in), in); } public int resampleAudio(MediaAudio out, MediaAudio in) { return VideoJNI.MediaAudioResampler_resampleAudio(swigCPtr, this, MediaAudio.getCPtr(out), out, MediaAudio.getCPtr(in), in); } /** * Convert the next timestamp from input to output
* timestamps are in 1/(in_sample_rate * out_sample_rate) units.
*
* Note: There are 2 slightly differently behaving modes.
* First is when automatic timestamp compensation is not used, (min_compensation >= FLT_MAX)
* in this case timestamps will be passed through with delays compensated
* Second is when automatic timestamp compensation is used, (min_compensation < FLT_MAX)
* in this case the output timestamps will match output sample numbers
*
* @param pts timestamp for the next input sample, INT64_MIN if unknown
* @return the output timestamp for the next output sample */ public long getNextPts(long pts) { return VideoJNI.MediaAudioResampler_getNextPts(swigCPtr, this, pts); } /** * Activate resampling compensation. */ public void setCompensation(int sample_delta, int compensation_distance) { VideoJNI.MediaAudioResampler_setCompensation(swigCPtr, this, sample_delta, compensation_distance); } /** * Set a customized input channel mapping.
*
*
* @return AVERROR error code in case of failure.
*
* Set a customized remix matrix.
*
*
*
* @return AVERROR error code in case of failure.
*
* Drops the specified number of output samples.
* @return # of samples dropped. */ public int dropOutput(int count) { return VideoJNI.MediaAudioResampler_dropOutput(swigCPtr, this, count); } /** * Injects the specified number of silence samples.
* @return # of samples injected. */ public int injectSilence(int count) { return VideoJNI.MediaAudioResampler_injectSilence(swigCPtr, this, count); } /** * Gets the delay the next input sample will experience relative to the next output sample.
*
* The resampler can buffer data if more input has been provided than available
* output space, also converting between sample rates needs a delay.
* This function returns the sum of all such delays.
* The exact delay is not necessarily an integer value in either input or
* output sample rate. Especially when downsampling by a large value, the
* output sample rate may be a poor choice to represent the delay, similarly
* for upsampling and the input sample rate.
*
* @param base timebase in which the returned delay will be
* if its set to 1 the returned delay is in seconds
* if its set to 1000 the returned delay is in milli seconds
* if its set to the input sample rate then the returned delay is in input samples
* if its set to the output sample rate then the returned delay is in output samples
* an exact rounding free delay can be found by using LCM(in_sample_rate, out_sample_rate)
* @return the delay in 1/base units. */ public long getDelay(long base) { return VideoJNI.MediaAudioResampler_getDelay(swigCPtr, this, base); } /** * Returns the number of resampled samples (rounded up) that would
* be required when resampling a given number of samples.
* That was a mouthful, yes? So here's the way to think of this. If your input
* audio is at 48000 hz, and you pass in 0.5 seconds of audio, that's 24,000 input samples.
* But if you're resampling to 22050 hz, then 0.5 seconds of audio is 11,025 output samples.
* So getNumResampledSamples(24000) would return 11025 if the input
* sample rate was 48,000 and hte output was 22,050. */ public int getNumResampledSamples(int numSamples) { return VideoJNI.MediaAudioResampler_getNumResampledSamples(swigCPtr, this, numSamples); } /** * Get the timebase used when outputting time stamps for audio.
*
* Defaults to 1 / (the lowest common multiple of getInputSampleRate()
* and getOutputSampleRate()) in order to ensure that no rounding
* of time stamps occur.
*
* For example, if the input sample rate is 22050 and the output sample
* rate is 44100, then the output time base will be (1/44100). But if the
* input sample rate is 48000 and the output sample rate is 22050, then
* the output time base will be (1/lcm(48000,22050)) which will be 1/7056000
* (trust me). This is done so that timestamp values do not get rounded (and
* therefore introduce drift). */ public Rational getTimeBase() { long cPtr = VideoJNI.MediaAudioResampler_getTimeBase(swigCPtr, this); return (cPtr == 0) ? null : new Rational(cPtr, false); } /** * Set the timebase to use for timestamps on output audio.
*
* @throws InvalidArgument if null. */ public void setTimeBase(Rational rational) { VideoJNI.MediaAudioResampler_setTimeBase(swigCPtr, this, Rational.getCPtr(rational), rational); } public MediaResampler.State getState() { return MediaResampler.State.swigToEnum(VideoJNI.MediaAudioResampler_getState(swigCPtr, this)); } }




© 2015 - 2024 Weber Informatics LLC | Privacy Policy