All Downloads are FREE. Search and download functionalities are using the official Maven repository.

io.humble.video.ContainerStream Maven / Gradle / Ivy

Go to download

This is the main Humble Video Java library. It contains no native code, but all Java runtime code. It must be paired up with the correct humble-video-arch-*.jar library for your OS. For most users, depending on humble-video-all will work better.

The newest version!
/* ----------------------------------------------------------------------------
 * This file was automatically generated by SWIG (http://www.swig.org).
 * Version 2.0.6
 *
 * Do not make changes to this file unless you know what you are doing--modify
 * the SWIG interface file instead.
 * ----------------------------------------------------------------------------- */

package io.humble.video;
import io.humble.ferry.*;
/**
 * Represents a stream of similar data (eg video) in a Container.
*


* Streams are really virtual concepts; Container objects really just contain
* a bunch of Packets. But each Packet usually has a stream
* id associated with it, and all Packets with that stream id represent
* the same type of (usually time-based) data. For example in many FLV
* video files, there is a stream with id "0" that contains all video data, and
* a stream with id "1" that contains all audio data.
*


* You use an Stream object to get properly configured Decoders
* for decoding, and to tell Encoders how to encode Packets when
* decoding.
*

*/ public class ContainerStream extends RefCounted { // JNIHelper.swg: Start generated code // >>>>>>>>>>>>>>>>>>>>>>>>>>> /** * This method is only here to use some references and remove * a Eclipse compiler warning. */ @SuppressWarnings("unused") private void noop() { Buffer.make(null, 1); } private volatile long swigCPtr; /** * Internal Only. */ protected ContainerStream(long cPtr, boolean cMemoryOwn) { super(VideoJNI.ContainerStream_SWIGUpcast(cPtr), cMemoryOwn); swigCPtr = cPtr; } /** * Internal Only. */ protected ContainerStream(long cPtr, boolean cMemoryOwn, java.util.concurrent.atomic.AtomicLong ref) { super(VideoJNI.ContainerStream_SWIGUpcast(cPtr), cMemoryOwn, ref); swigCPtr = cPtr; } /** * Internal Only. Not part of public API. * * Get the raw value of the native object that obj is proxying for. * * @param obj The java proxy object for a native object. * @return The raw pointer obj is proxying for. */ protected static long getCPtr(ContainerStream obj) { if (obj == null) return 0; return obj.getMyCPtr(); } /** * Internal Only. Not part of public API. * * Get the raw value of the native object that we're proxying for. * * @return The raw pointer we're proxying for. */ protected long getMyCPtr() { if (swigCPtr == 0) throw new IllegalStateException("underlying native object already deleted"); return swigCPtr; } /** * Create a new ContainerStream object that is actually referring to the * exact same underlying native object. * * @return the new Java object. */ @Override public ContainerStream copyReference() { if (swigCPtr == 0) return null; else return new ContainerStream(swigCPtr, swigCMemOwn, getJavaRefCount()); } /** * Compares two values, returning true if the underlying objects in native code are the same object. * * That means you can have two different Java objects, but when you do a comparison, you'll find out * they are the EXACT same object. * * @return True if the underlying native object is the same. False otherwise. */ public boolean equals(Object obj) { boolean equal = false; if (obj instanceof ContainerStream) equal = (((ContainerStream)obj).swigCPtr == this.swigCPtr); return equal; } /** * Get a hashable value for this object. * * @return the hashable value. */ public int hashCode() { return (int)swigCPtr; } // <<<<<<<<<<<<<<<<<<<<<<<<<<< // JNIHelper.swg: End generated code /** * Get an ordered sequence of index entries in this {@link Stream}. * * @return A list of entries. Will always return a non-null * list, but if there are no entries the list size will be zero. */ public java.util.List getIndexEntries() { final int numEntries = getNumIndexEntries(); java.util.List retval = new java.util.ArrayList(Math.max(numEntries, 10)); for(int i = 0; i < numEntries; i++) { final IndexEntry entry = getIndexEntry(i); if (entry != null) { retval.add(entry); } } return retval; } /** * Get the relative position this stream has in the hosting
* Container object.
* @return The Index within the Container of this stream. */ public int getIndex() { return VideoJNI.ContainerStream_getIndex(swigCPtr, this); } /** * Return a container format specific id for this stream.
* @return The (container format specific) id of this stream. */ public int getId() { return VideoJNI.ContainerStream_getId(swigCPtr, this); } /** * Get the (sometimes estimated) average frame rate of this container.
* For variable frame-rate containers (they do exist) this is just
* an approximation. Better to use getTimeBase().
*
* For contant frame-rate containers, this will be 1 / ( getTimeBase() )
*
* @return The frame-rate of this container. */ public Rational getFrameRate() { long cPtr = VideoJNI.ContainerStream_getFrameRate(swigCPtr, this); return (cPtr == 0) ? null : new Rational(cPtr, false); } /** * The time base in which all timestamps (e.g. Presentation Time Stamp (PTS)
* and Decompression Time Stamp (DTS)) are represented. For example
* if the time base is 1/1000, then the difference between a PTS of 1 and
* a PTS of 2 is 1 millisecond. If the timebase is 1/1, then the difference
* between a PTS of 1 and a PTS of 2 is 1 second.
*
* @return The time base of this stream. */ public Rational getTimeBase() { long cPtr = VideoJNI.ContainerStream_getTimeBase(swigCPtr, this); return (cPtr == 0) ? null : new Rational(cPtr, false); } /** * Return the start time, in #getTimeBase() units, when this stream
* started.
* @return The start time. */ public long getStartTime() { return VideoJNI.ContainerStream_getStartTime(swigCPtr, this); } /** * Return the duration, in #getTimeBase() units, of this stream,
* or Global#NO_PTS if unknown.
* @return The duration (in getTimeBase units) of this stream, if known. */ public long getDuration() { return VideoJNI.ContainerStream_getDuration(swigCPtr, this); } /** * The current Decompression Time Stamp that will be used on this stream,
* in #getTimeBase() units.
* @return The current Decompression Time Stamp that will be used on this stream. */ public long getCurrentDts() { return VideoJNI.ContainerStream_getCurrentDts(swigCPtr, this); } /** * Get the number of index entries in this stream.
* @return The number of index entries in this stream.
* @see #getIndexEntry(int) */ public int getNumIndexEntries() { return VideoJNI.ContainerStream_getNumIndexEntries(swigCPtr, this); } /** * Returns the number of encoded frames if known. Note that frames here means
* encoded frames, which can consist of many encoded audio samples, or
* an encoded video frame.
*
* @return The number of frames (encoded) in this stream. */ public long getNumFrames() { return VideoJNI.ContainerStream_getNumFrames(swigCPtr, this); } /** * Gets the sample aspect ratio.
*
* @return The sample aspect ratio. */ public Rational getSampleAspectRatio() { long cPtr = VideoJNI.ContainerStream_getSampleAspectRatio(swigCPtr, this); return (cPtr == 0) ? null : new Rational(cPtr, false); } /** * Get the underlying container for this stream, or null if Humble Video
* doesn't know.
*
* @return the container, or null if we don't know. */ public Container getContainer() { long cPtr = VideoJNI.ContainerStream_getContainer(swigCPtr, this); return (cPtr == 0) ? null : new Container(cPtr, false); } /** * Get how the decoding codec should parse data from this stream.
* @return the parse type. */ public ContainerStream.ParseType getParseType() { return ContainerStream.ParseType.swigToEnum(VideoJNI.ContainerStream_getParseType(swigCPtr, this)); } /** * Set the parse type the decoding codec should use. Set to
* ParseType#PARSE_NONE if you don't want any parsing
* to be done.
*


* Warning: do not set this flag unless you know what you're doing,
* and do not set after you've started reading packets.
*


*
* @param type The type to set. */ public void setParseType(ContainerStream.ParseType type) { VideoJNI.ContainerStream_setParseType(swigCPtr, this, type.swigValue()); } /** * Get the KeyValueBag for this object,
* or null if none.
*


* If the Container or Stream object
* that this KeyValueBag came from was opened
* for reading, then changes via KeyValueBag#setValue(String, String)
* will have no effect on the underlying media.
*


*


* If the Container or Stream object
* that this KeyValueBag came from was opened
* for writing, then changes via KeyValueBag#setValue(String, String)
* will have no effect after Container#writeHeader()
* is called.
*


* @return the KeyValueBag. */ public KeyValueBag getMetaData() { long cPtr = VideoJNI.ContainerStream_getMetaData(swigCPtr, this); return (cPtr == 0) ? null : new KeyValueBag(cPtr, false); } /** * Search for the given time stamp in the key-frame index for this Stream.
*


* Not all ContainerFormat implementations
* maintain key frame indexes, but if they have one,
* then this method searches in the Stream index
* to quickly find the byte-offset of the nearest key-frame to
* the given time stamp.
*


* @param wantedTimeStamp the time stamp wanted, in the stream's
* time base units.
* @param flags A bitmask of the SEEK_FLAG_* flags, or 0 to turn
* all flags off. If Container#SEEK_FLAG_BACKWARDS then the returned
* index will correspond to the time stamp which is <=
* the requested one (not supported by all demuxers).
* If Container#SEEK_FLAG_BACKWARDS is not set then it will be >=.
* if Container#SEEK_FLAG_ANY seek to any frame, only
* keyframes otherwise (not supported by all demuxers).
* @return The IndexEntry for the nearest appropriate timestamp
* in the index, or null if it can't be found. */ public IndexEntry findTimeStampEntryInIndex(long wantedTimeStamp, int flags) { long cPtr = VideoJNI.ContainerStream_findTimeStampEntryInIndex(swigCPtr, this, wantedTimeStamp, flags); return (cPtr == 0) ? null : new IndexEntry(cPtr, false); } /** * Search for the given time stamp in the key-frame index for this Stream.
*


* Not all ContainerFormat implementations
* maintain key frame indexes, but if they have one,
* then this method searches in the Stream index
* to quickly find the index entry position of the nearest key-frame to
* the given time stamp.
*


* @param wantedTimeStamp the time stamp wanted, in the stream's
* time base units.
* @param flags A bitmask of the SEEK_FLAG_* flags, or 0 to turn
* all flags off. If Container#SEEK_FLAG_BACKWARDS then the returned
* index will correspond to the time stamp which is <=
* the requested one (not supported by all demuxers).
* If Container#SEEK_FLAG_BACKWARDS is not set then it will be >=.
* if Container#SEEK_FLAG_ANY seek to any frame, only
* keyframes otherwise (not supported by all demuxers).
* @return The position in this Stream index, or -1 if it cannot
* be found or an index is not maintained.
* @see #getIndexEntry(int) */ public int findTimeStampPositionInIndex(long wantedTimeStamp, int flags) { return VideoJNI.ContainerStream_findTimeStampPositionInIndex(swigCPtr, this, wantedTimeStamp, flags); } /** * Get the IndexEntry at the given position in this
* Stream object's index.
*


* Not all ContainerFormat types maintain
* Stream indexes, but if they do,
* this method can return those entries.
*


*


* Do not modify the Container this stream
* is from between calls to this method and
* #getNumIndexEntries() as indexes may
* be compacted while processing.
*


* @param position The position in the index table. */ public IndexEntry getIndexEntry(int position) { long cPtr = VideoJNI.ContainerStream_getIndexEntry(swigCPtr, this, position); return (cPtr == 0) ? null : new IndexEntry(cPtr, false); } /** * Get the Stream.Disposition of this stream. */ public ContainerStream.Disposition getDisposition() { return ContainerStream.Disposition.swigToEnum(VideoJNI.ContainerStream_getDisposition(swigCPtr, this)); } /** * For containers with Stream.Disposition.DISPOSITION_ATTACHED_PIC,
* this returns a read-only copy of the packet containing the
* picture (needs to be decoded separately). */ public MediaPacket getAttachedPic() { long cPtr = VideoJNI.ContainerStream_getAttachedPic(swigCPtr, this); return (cPtr == 0) ? null : new MediaPacket(cPtr, false); } /** * Check if the stream is matched by the stream specifier.
*
* See the "stream specifiers" chapter in the FFmpeg documentation for the syntax
* of specifier: https://www.ffmpeg.org/ffmpeg.html#Stream-selection
*
* @param specifier the specifier string
* @return true if this stream is matched by specifier; false if this stream is not
* matched by specifier;
*
* @throws InvalidArgument if the specifier is invalid.
*
* Note: A stream specifier can match several streams in a container. */ public boolean matchSpecifier(String specifier) { return VideoJNI.ContainerStream_matchSpecifier(swigCPtr, this, specifier); } /** * The disposition of this stream. Some streams can have special
* meanings in some Containers. */ public enum Disposition { DISPOSITION_NONE(VideoJNI.ContainerStream_DISPOSITION_NONE_get()), DISPOSITION_DEFAULT(VideoJNI.ContainerStream_DISPOSITION_DEFAULT_get()), DISPOSITION_DUB(VideoJNI.ContainerStream_DISPOSITION_DUB_get()), DISPOSITION_ORIGINAL(VideoJNI.ContainerStream_DISPOSITION_ORIGINAL_get()), DISPOSITION_COMMENT(VideoJNI.ContainerStream_DISPOSITION_COMMENT_get()), DISPOSITION_LYRICS(VideoJNI.ContainerStream_DISPOSITION_LYRICS_get()), DISPOSITION_KARAOKE(VideoJNI.ContainerStream_DISPOSITION_KARAOKE_get()), /** * Track should be used during playback by default.
* Useful for subtitle track that should be displayed
* even when user did not explicitly ask for subtitles. */ DISPOSITION_FORCED(VideoJNI.ContainerStream_DISPOSITION_FORCED_get()), /** * stream for hearing impaired audiences */ DISPOSITION_HEARING_IMPAIRED(VideoJNI.ContainerStream_DISPOSITION_HEARING_IMPAIRED_get()), /** * stream for visual impaired audiences stream without voice */ DISPOSITION_VISUAL_IMPAIRED(VideoJNI.ContainerStream_DISPOSITION_VISUAL_IMPAIRED_get()), DISPOSITION_CLEAN_EFFECTS(VideoJNI.ContainerStream_DISPOSITION_CLEAN_EFFECTS_get()), /** * The stream is stored in the file as an attached picture/"cover art" (e.g.
* APIC frame in ID3v2). The single packet associated with it will be returned
* among the first few packets read from the file unless seeking takes place.
* It can also be accessed at any time in #getAttachedPic(). */ DISPOSITION_ATTACHED_PIC(VideoJNI.ContainerStream_DISPOSITION_ATTACHED_PIC_get()), ; public final int swigValue() { return swigValue; } public static Disposition swigToEnum(int swigValue) { Disposition[] swigValues = Disposition.class.getEnumConstants(); if (swigValue < swigValues.length && swigValue >= 0 && swigValues[swigValue].swigValue == swigValue) return swigValues[swigValue]; for (Disposition swigEnum : swigValues) if (swigEnum.swigValue == swigValue) return swigEnum; throw new IllegalArgumentException("No enum " + Disposition.class + " with value " + swigValue); } @SuppressWarnings("unused") private Disposition() { this.swigValue = SwigNext.next++; } @SuppressWarnings("unused") private Disposition(int swigValue) { this.swigValue = swigValue; SwigNext.next = swigValue+1; } @SuppressWarnings("unused") private Disposition(Disposition swigEnum) { this.swigValue = swigEnum.swigValue; SwigNext.next = this.swigValue+1; } private final int swigValue; private static class SwigNext { private static int next = 0; } } /** * What types of parsing can we do on a call to
* Source#read(Packet) */ public enum ParseType { /** * No special instructions */ PARSE_NONE(VideoJNI.ContainerStream_PARSE_NONE_get()), /** * full parsing and repack */ PARSE_FULL(VideoJNI.ContainerStream_PARSE_FULL_get()), /** * Only parse headers, do not repack. */ PARSE_HEADERS(VideoJNI.ContainerStream_PARSE_HEADERS_get()), /** * full parsing and interpolation of timestamps for frames not starting on a packet boundary */ PARSE_TIMESTAMPS(VideoJNI.ContainerStream_PARSE_TIMESTAMPS_get()), /** * full parsing and repack of the first frame only, only implemented for H.264 currently */ PARSE_FULL_ONCE(VideoJNI.ContainerStream_PARSE_FULL_ONCE_get()), /** * full parsing and repack with timestamp and position generation by parser for raw
* this assumes that each packet in the file contains no demuxer level headers and
* just codec level data, otherwise position generation would fail */ PARSE_FULL_RAW(VideoJNI.ContainerStream_PARSE_FULL_RAW_get()), ; public final int swigValue() { return swigValue; } public static ParseType swigToEnum(int swigValue) { ParseType[] swigValues = ParseType.class.getEnumConstants(); if (swigValue < swigValues.length && swigValue >= 0 && swigValues[swigValue].swigValue == swigValue) return swigValues[swigValue]; for (ParseType swigEnum : swigValues) if (swigEnum.swigValue == swigValue) return swigEnum; throw new IllegalArgumentException("No enum " + ParseType.class + " with value " + swigValue); } @SuppressWarnings("unused") private ParseType() { this.swigValue = SwigNext.next++; } @SuppressWarnings("unused") private ParseType(int swigValue) { this.swigValue = swigValue; SwigNext.next = swigValue+1; } @SuppressWarnings("unused") private ParseType(ParseType swigEnum) { this.swigValue = swigEnum.swigValue; SwigNext.next = this.swigValue+1; } private final int swigValue; private static class SwigNext { private static int next = 0; } } }




© 2015 - 2024 Weber Informatics LLC | Privacy Policy