All Downloads are FREE. Search and download functionalities are using the official Maven repository.

io.humble.video.MediaPacket Maven / Gradle / Ivy

Go to download

This is the main Humble Video Java library. It contains no native code, but all Java runtime code. It must be paired up with the correct humble-video-arch-*.jar library for your OS. For most users, depending on humble-video-all will work better.

The newest version!
/* ----------------------------------------------------------------------------
 * This file was automatically generated by SWIG (http://www.swig.org).
 * Version 2.0.6
 *
 * Do not make changes to this file unless you know what you are doing--modify
 * the SWIG interface file instead.
 * ----------------------------------------------------------------------------- */

package io.humble.video;
import io.humble.ferry.*;
/**
 * A packet of encoded data that was read from a Demuxer or
* will be written to a Muxer. */ public class MediaPacket extends MediaEncoded { // JNIHelper.swg: Start generated code // >>>>>>>>>>>>>>>>>>>>>>>>>>> /** * This method is only here to use some references and remove * a Eclipse compiler warning. */ @SuppressWarnings("unused") private void noop() { Buffer.make(null, 1); } private volatile long swigCPtr; /** * Internal Only. */ protected MediaPacket(long cPtr, boolean cMemoryOwn) { super(VideoJNI.MediaPacket_SWIGUpcast(cPtr), cMemoryOwn); swigCPtr = cPtr; } /** * Internal Only. */ protected MediaPacket(long cPtr, boolean cMemoryOwn, java.util.concurrent.atomic.AtomicLong ref) { super(VideoJNI.MediaPacket_SWIGUpcast(cPtr), cMemoryOwn, ref); swigCPtr = cPtr; } /** * Internal Only. Not part of public API. * * Get the raw value of the native object that obj is proxying for. * * @param obj The java proxy object for a native object. * @return The raw pointer obj is proxying for. */ protected static long getCPtr(MediaPacket obj) { if (obj == null) return 0; return obj.getMyCPtr(); } /** * Internal Only. Not part of public API. * * Get the raw value of the native object that we're proxying for. * * @return The raw pointer we're proxying for. */ protected long getMyCPtr() { if (swigCPtr == 0) throw new IllegalStateException("underlying native object already deleted"); return swigCPtr; } /** * Create a new MediaPacket object that is actually referring to the * exact same underlying native object. * * @return the new Java object. */ @Override public MediaPacket copyReference() { if (swigCPtr == 0) return null; else return new MediaPacket(swigCPtr, swigCMemOwn, getJavaRefCount()); } /** * Compares two values, returning true if the underlying objects in native code are the same object. * * That means you can have two different Java objects, but when you do a comparison, you'll find out * they are the EXACT same object. * * @return True if the underlying native object is the same. False otherwise. */ public boolean equals(Object obj) { boolean equal = false; if (obj instanceof MediaPacket) equal = (((MediaPacket)obj).swigCPtr == this.swigCPtr); return equal; } /** * Get a hashable value for this object. * * @return the hashable value. */ public int hashCode() { return (int)swigCPtr; } // <<<<<<<<<<<<<<<<<<<<<<<<<<< // JNIHelper.swg: End generated code @Override public String toString() { final StringBuilder b = new StringBuilder(super.toString()); b.append("[") .append("pts="+getPts()+";") .append("dts="+getDts()+";") .append("complete="+isComplete()+";") .append("size="+getSize()+";") .append("maxSize="+getMaxSize()+";") .append("streamIndex="+getStreamIndex()+";") .append("key="+isKey()+";") .append("flags="+getFlags()+";") .append("duration="+getDuration()+";") .append("position="+getPosition()+";") .append("convergenceDuration="+getConvergenceDuration()+";") .append("]"); return b.toString(); } /** * Create a new Packet */ public static MediaPacket make() { long cPtr = VideoJNI.MediaPacket_make__SWIG_0(); return (cPtr == 0) ? null : new MediaPacket(cPtr, false); } /** * Allocate a new packet that wraps an existing Buffer.
*
* NOTE: At least 16 bytes of the passed in buffer will be used
* for header information, so the resulting Packet.getSize()
* will be smaller than Buffer.getBufferSize() .
*
* @param buffer The Buffer to wrap.
* @return a new packet or null on error. */ public static MediaPacket make(Buffer buffer) { long cPtr = VideoJNI.MediaPacket_make__SWIG_1(Buffer.getCPtr(buffer), buffer); return (cPtr == 0) ? null : new MediaPacket(cPtr, false); } /** * Allocate a new packet wrapping the existing contents of
* a passed in packet. Callers can then modify
* #getPts(),
* #getDts() and other get/set methods without
* modifying the original packet.
*
* @param packet Packet to reuse buffer from and to
* copy settings from.
* @param copyData if true copy data from packet
* into our own buffer. If false, share the same
* data buffer that packet uses
*
* @return a new packet or null on error. */ public static MediaPacket make(MediaPacket packet, boolean copyData) { long cPtr = VideoJNI.MediaPacket_make__SWIG_2(MediaPacket.getCPtr(packet), packet, copyData); return (cPtr == 0) ? null : new MediaPacket(cPtr, false); } /** * Allocate a new packet.
*


* Note that any buffers this packet needs will be
* lazily allocated (i.e. we won't actually grab all
* the memory until we need it).
*


* @param size The maximum size, in bytes, of data you
* want to put in this packet.
*
* @return a new packet, or null on error. */ public static MediaPacket make(int size) { long cPtr = VideoJNI.MediaPacket_make__SWIG_3(size); return (cPtr == 0) ? null : new MediaPacket(cPtr, false); } /** * Get any underlying raw data available for this packet.
*
* @return The raw data, or null if not accessible. */ public Buffer getData() { long cPtr = VideoJNI.MediaPacket_getData(swigCPtr, this); return (cPtr == 0) ? null : new Buffer(cPtr, false); } /** * Get the number of side data elements in this packet. */ public int getNumSideDataElems() { return VideoJNI.MediaPacket_getNumSideDataElems(swigCPtr, this); } /** * Get the n'th item of SideData.
*


* WARNING: Callers must ensure that the the packet object
* this is called form is NOT reset or destroyed while using this buffer,
* as unfortunately we cannot ensure this buffer survives the
* underlying packet data.
*


*
* @param n The n'th item to get.
* @return the data, or null if none found
* @throws InvalidArgument if n < 0 || n >= #getNumSideDataElems() */ public Buffer getSideData(int n) { long cPtr = VideoJNI.MediaPacket_getSideData(swigCPtr, this, n); return (cPtr == 0) ? null : new Buffer(cPtr, false); } /** * Get the n'th item of SideData.
*
* @param n The n'th item to get.
* @return the data, or SideDataType.DATA_UNKNOWN if none found
* @throws InvalidArgument if n < 0 || n >= #getNumSideDataElems() */ public MediaPacket.SideDataType getSideDataType(int n) { return MediaPacket.SideDataType.swigToEnum(VideoJNI.MediaPacket_getSideDataType(swigCPtr, this, n)); } /** * Get the Presentation Time Stamp (PTS) for this packet.
*
* This is the time at which the payload for this packet should
* be presented to the user, in units of
* #getTimeBase(), relative to the start of stream.
*
* @return Get the Presentation Timestamp for this packet. */ public long getPts() { return VideoJNI.MediaPacket_getPts(swigCPtr, this); } /** * Set a new Presentation Time Stamp (PTS) for this packet.
*
* @param aPts a new PTS for this packet.
*
* @see #getPts() */ public void setPts(long aPts) { VideoJNI.MediaPacket_setPts(swigCPtr, this, aPts); } /** * Get the Decompression Time Stamp (DTS) for this packet.
*


* This is the time at which the payload for this packet should
* be decompressed, in units of
* #getTimeBase(), relative to the start of stream.
*


*


* Some media codecs can require packets from the "future" to
* be decompressed before earliest packets as an additional way to compress
* data. In general you don't need to worry about this, but if you're
* curious start reading about the difference between I-Frames, P-Frames
* and B-Frames (or Bi-Directional Frames). B-Frames can use information
* from future frames when compressed.
*


* @return Get the Decompression Timestamp (i.e. when this was read relative
* to the start of reading packets). */ public long getDts() { return VideoJNI.MediaPacket_getDts(swigCPtr, this); } /** * Set a new Decompression Time Stamp (DTS) for this packet.
* @param aDts a new DTS for this packet.
* @see #getDts() */ public void setDts(long aDts) { VideoJNI.MediaPacket_setDts(swigCPtr, this, aDts); } /** * Get the size in bytes of the payload currently in this packet.
* @return Size (in bytes) of payload currently in packet. */ public int getSize() { return VideoJNI.MediaPacket_getSize(swigCPtr, this); } /** * Get the maximum size (in bytes) of payload this packet can hold.
* @return Get maximum size (in bytes) of payload this packet can hold. */ public int getMaxSize() { return VideoJNI.MediaPacket_getMaxSize(swigCPtr, this); } /** * Get the container-specific index for the stream this packet is
* part of.
* @return Stream in container that this packet has data for, or <0 if unsure. */ public int getStreamIndex() { return VideoJNI.MediaPacket_getStreamIndex(swigCPtr, this); } /** * Get any flags set on this packet, as a 4-byte binary-ORed bit-mask.
* This is access to raw FFMPEG
* flags, but it is easier to use the is* methods below.
* @return Any flags on the packet. */ public int getFlags() { return VideoJNI.MediaPacket_getFlags(swigCPtr, this); } /** * Does this packet contain Key data? i.e. data that needs no other
* frames or samples to decode.
* @return true if key; false otherwise. */ public boolean isKeyPacket() { return VideoJNI.MediaPacket_isKeyPacket(swigCPtr, this); } /** * Return the duration of this packet, in units of #getTimeBase()
* @return Duration of this packet, in same time-base as the PTS. */ public long getDuration() { return VideoJNI.MediaPacket_getDuration(swigCPtr, this); } /** * Return the position (in bytes) of this packet in the stream.
* @return The position of this packet in the stream, or -1 if
* unknown. */ public long getPosition() { return VideoJNI.MediaPacket_getPosition(swigCPtr, this); } /** * Set if this is a key packet.
*
* @param keyPacket true for yes, false for no. */ public void setKeyPacket(boolean keyPacket) { VideoJNI.MediaPacket_setKeyPacket(swigCPtr, this, keyPacket); } /** * Set any internal flags.
*
* @param flags Flags to set */ public void setFlags(int flags) { VideoJNI.MediaPacket_setFlags(swigCPtr, this, flags); } /** * Set the stream index for this packet.
*
* @param streamIndex The stream index, as determined from the IContainer this packet will be written to. */ public void setStreamIndex(int streamIndex) { VideoJNI.MediaPacket_setStreamIndex(swigCPtr, this, streamIndex); } /** * Set the duration.
* @param duration new duration
* @see #getDuration() */ public void setDuration(long duration) { VideoJNI.MediaPacket_setDuration(swigCPtr, this, duration); } /** * Set the position.
* @param position new position
* @see #getPosition() */ public void setPosition(long position) { VideoJNI.MediaPacket_setPosition(swigCPtr, this, position); } /** * Time difference in IStream#getTimeBase() units
* from the presentation time stamp of this
* packet to the point at which the output from the decoder has converged
* independent from the availability of previous frames. That is, the
* frames are virtually identical no matter if decoding started from
* the very first frame or from this keyframe.
* Is Global#NO_PTS if unknown.
* This field is not the display duration of the current packet.
*


* The purpose of this field is to allow seeking in streams that have no
* keyframes in the conventional sense. It corresponds to the
* recovery point SEI in H.264 and match_time_delta in NUT. It is also
* essential for some types of subtitle streams to ensure that all
* subtitles are correctly displayed after seeking.
*


*


* If you didn't follow that, try drinking one to two glasses
* of Absinthe. It won't help, but it'll be more fun.
*


*
* @return the convergence duration */ public long getConvergenceDuration() { return VideoJNI.MediaPacket_getConvergenceDuration(swigCPtr, this); } /** * Set the convergence duration.
* @param duration the new duration */ public void setConvergenceDuration(long duration) { VideoJNI.MediaPacket_setConvergenceDuration(swigCPtr, this, duration); } /** * Discard the current payload and allocate a new payload.
*


* Note that if any people have access to the old payload using
* getData(), the memory will continue to be available to them
* until they release their hold of the Buffer.
*


*


* When requesting a packet size, the system
* may allocate a larger payloadSize.
*


* @param payloadSize The (minimum) payloadSize of this packet in bytes. It is ok to
* pass in 0 here, in which case the packet will later allocate memory if needed. */ public void reset(int payloadSize) { VideoJNI.MediaPacket_reset(swigCPtr, this, payloadSize); } public enum SideDataType { DATA_UNKNOWN(VideoJNI.MediaPacket_DATA_UNKNOWN_get()), DATA_PALETTE(VideoJNI.MediaPacket_DATA_PALETTE_get()), DATA_NEW_EXTRADATA(VideoJNI.MediaPacket_DATA_NEW_EXTRADATA_get()), /** * An AV_PKT_DATA_PARAM_CHANGE side data packet is laid out as follows:
* {@code u32le param_flags if (param_flags AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT) s32le channel_count if (param_flags AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT) u64le channel_layout if (param_flags AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE) s32le sample_rate if (param_flags AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS) s32le width s32le height } */ DATA_PARAM_CHANGE(VideoJNI.MediaPacket_DATA_PARAM_CHANGE_get()), /** * An AV_PKT_DATA_H263_MB_INFO side data packet contains a number of
* structures with info about macroblocks relevant to splitting the
* packet into smaller packets on macroblock edges (e.g. as for RFC 2190).
* That is, it does not necessarily contain info about all macroblocks,
* as long as the distance between macroblocks in the info is smaller
* than the target payload size.
* Each MB info structure is 12 bytes, and is laid out as follows:
* {@code u32le bit offset from the start of the packet u8 current quantizer at the start of the macroblock u8 GOB number u16le macroblock address within the GOB u8 horizontal MV predictor u8 vertical MV predictor u8 horizontal MV predictor for block number 3 u8 vertical MV predictor for block number 3 } */ DATA_H263_MB_INFO(VideoJNI.MediaPacket_DATA_H263_MB_INFO_get()), /** * Recommmends skipping the specified number of samples
* {@code u32le number of samples to skip from start of this packet u32le number of samples to skip from end of this packet u8 reason for start skip u8 reason for end skip (0=padding silence, 1=convergence) } */ DATA_SKIP_SAMPLES(VideoJNI.MediaPacket_DATA_SKIP_SAMPLES_get()), /** * An AV_PKT_DATA_JP_DUALMONO side data packet indicates that
* the packet may contain "dual mono" audio specific to Japanese DTV
* and if it is true, recommends only the selected channel to be used.
* {@code u8 selected channels (0=mail/left, 1=sub/right, 2=both) } */ DATA_JP_DUALMONO(VideoJNI.MediaPacket_DATA_JP_DUALMONO_get()), /** * A list of zero terminated key/value strings. There is no end marker for
* the list, so it is required to rely on the side data size to stop. */ DATA_STRINGS_METADATA(VideoJNI.MediaPacket_DATA_STRINGS_METADATA_get()), /** * Subtitle event position
* {@code u32le x1 u32le y1 u32le x2 u32le y2 } */ DATA_SUBTITLE_POSITION(VideoJNI.MediaPacket_DATA_SUBTITLE_POSITION_get()), /** * Data found in BlockAdditional element of matroska container. There is
* no end marker for the data, so it is required to rely on the side data
* size to recognize the end. 8 byte id (as found in BlockAddId) followed
* by data. */ DATA_MATROSKA_BLOCKADDITIONAL(VideoJNI.MediaPacket_DATA_MATROSKA_BLOCKADDITIONAL_get()), /** * The optional first identifier line of a WebVTT cue. */ DATA_WEBVTT_IDENTIFIER(VideoJNI.MediaPacket_DATA_WEBVTT_IDENTIFIER_get()), /** * The optional settings (rendering instructions) that immediately
* follow the timestamp specifier of a WebVTT cue. */ DATA_WEBVTT_SETTINGS(VideoJNI.MediaPacket_DATA_WEBVTT_SETTINGS_get()), ; public final int swigValue() { return swigValue; } public static SideDataType swigToEnum(int swigValue) { SideDataType[] swigValues = SideDataType.class.getEnumConstants(); if (swigValue < swigValues.length && swigValue >= 0 && swigValues[swigValue].swigValue == swigValue) return swigValues[swigValue]; for (SideDataType swigEnum : swigValues) if (swigEnum.swigValue == swigValue) return swigEnum; throw new IllegalArgumentException("No enum " + SideDataType.class + " with value " + swigValue); } @SuppressWarnings("unused") private SideDataType() { this.swigValue = SwigNext.next++; } @SuppressWarnings("unused") private SideDataType(int swigValue) { this.swigValue = swigValue; SwigNext.next = swigValue+1; } @SuppressWarnings("unused") private SideDataType(SideDataType swigEnum) { this.swigValue = swigEnum.swigValue; SwigNext.next = this.swigValue+1; } private final int swigValue; private static class SwigNext { private static int next = 0; } } }




© 2015 - 2024 Weber Informatics LLC | Privacy Policy