eu.dindoffer.yin.pda.lib.impl.decode.AudioStreamDecoder Maven / Gradle / Ivy
package eu.dindoffer.yin.pda.lib.impl.decode;
import java.io.BufferedInputStream;
import java.io.Closeable;
import java.io.IOException;
import java.io.InputStream;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import java.util.Iterator;
import java.util.concurrent.atomic.AtomicBoolean;
import javax.sound.sampled.AudioFormat;
import javax.sound.sampled.AudioInputStream;
import javax.sound.sampled.AudioSystem;
import javax.sound.sampled.UnsupportedAudioFileException;
/**
* Decodes audio streams into arrays of samples.
*
* @author Martin Dindoffer
*/
public abstract class AudioStreamDecoder implements Iterator, Closeable {
private final AudioInputStream audioStream;
private final AudioFormat audioFormat;
private final long numOfBytesInStream;
private final long frameLength;
private final int bytesPerFrame;
private final AtomicBoolean pendindFirstRead = new AtomicBoolean(true);
protected final int defaultChunkSize;
private int bytesRead;
/**
* Creates a new decoder for an InputStream.
*
* @param inputStream audio to decode
* @param chunkSize size of chunk to decode in one iteration, in frames
* @throws IOException if an I/O exception occurs
* @throws UnsupportedAudioFileException if the stream does not point
* to a valid audio file data recognized by the system.
* For further requirements see {@link #AudioStreamDecoder(AudioInputStream, int)}
*/
public AudioStreamDecoder(InputStream inputStream, int chunkSize) throws IOException, UnsupportedAudioFileException {
this(AudioSystem.getAudioInputStream(new BufferedInputStream(inputStream)), chunkSize);
}
/**
* Creates a new decoder for an AudioInputStream.
*
* @param audioInputStream audio to decode.
* @param defaultChunkSize size of chunk to decode in one iteration, in frames
* @throws UnsupportedAudioFileException if:
* A)audio stream contains more than 32 channels
* B)doesn't have 16 bit depth
* C)does not support mark/reset
*/
public AudioStreamDecoder(AudioInputStream audioInputStream, int defaultChunkSize) throws UnsupportedAudioFileException {
AudioFormat format = audioInputStream.getFormat();
if (format.getChannels() > 32) {
throw new UnsupportedAudioFileException("Audio input has more than 32 channels. It's probably damaged.");
}
if (format.getSampleSizeInBits() != 16) {
throw new UnsupportedAudioFileException("The only supported bit depth is 16");
}
this.audioStream = audioInputStream;
this.audioFormat = format;
this.bytesPerFrame = format.getFrameSize() == AudioSystem.NOT_SPECIFIED ? 1 : format.getFrameSize();
this.frameLength = audioInputStream.getFrameLength();
this.numOfBytesInStream = bytesPerFrame * audioInputStream.getFrameLength();
this.defaultChunkSize = defaultChunkSize;
}
/**
* Decodes next chunk of frames from the input audio.
*
* @param framesToDecode number of audio frames to decode
* @return decoded bits of audio
* @throws IOException if an i/o error occurs while reading the input stream
*/
protected synchronized AudioChunk nextChunk(int framesToDecode) throws IOException {
int bytesToRead = framesToDecode * bytesPerFrame;
byte[] audioBits = new byte[bytesToRead];
int currentlyReadBytes = audioStream.read(audioBits, 0, bytesToRead);//blocking read
if (currentlyReadBytes == -1) {
//if end EOS
return null;
}
this.bytesRead += currentlyReadBytes;
return decodeSamples(audioBits, framesToDecode);
}
/**
* Processes given frames of audio bytes into an array of samples.
*
* @param audioBits audio byte array
* @param framesToDecode number of frames stored in the give audio byte array
* @return decoded samples
*/
private AudioChunk decodeSamples(byte[] audioBits, int framesToDecode) {
ByteBuffer bb = ByteBuffer.wrap(audioBits);
short[] outputBuffer = new short[framesToDecode];
if (audioFormat.isBigEndian()) {
bb.order(ByteOrder.BIG_ENDIAN);
} else {
bb.order(ByteOrder.LITTLE_ENDIAN);
}
int channels = audioFormat.getChannels();
if (channels == AudioSystem.NOT_SPECIFIED) {
channels = 1;
}
int i = 0;
while (bb.remaining() > 1) {
int sumForFrame = 0;
for (int x = 0; x < channels; x++) {
short currentSample = bb.getShort();//this is tailored for 16b depth == reading two bytes
sumForFrame += currentSample;
}
outputBuffer[i++] = (short) Math.round((double) sumForFrame / channels);
}
return new AudioChunk(pendindFirstRead.getAndSet(false), outputBuffer);
}
@Override
public synchronized boolean hasNext() {
if (frameLength != AudioSystem.NOT_SPECIFIED) {
return bytesRead < numOfBytesInStream;
}
return true;
}
@Override
public void close() throws IOException {
audioStream.close();
}
/**
* Returns the format of the audio file.
*
* @return format of the audio file
*/
public AudioFormat getFormat() {
return audioFormat;
}
public long getFrameLength() {
return frameLength;
}
public long getNumOfBytesInStream() {
return numOfBytesInStream;
}
public synchronized int getBytesRead() {
return bytesRead;
}
}
© 2015 - 2025 Weber Informatics LLC | Privacy Policy