libvdpau-va-gl-0.4.2/000077500000000000000000000000001277566164500143525ustar00rootroot00000000000000libvdpau-va-gl-0.4.2/.gitignore000066400000000000000000000000071277566164500163370ustar00rootroot00000000000000build/ libvdpau-va-gl-0.4.2/3rdparty/000077500000000000000000000000001277566164500161225ustar00rootroot00000000000000libvdpau-va-gl-0.4.2/3rdparty/vdpau/000077500000000000000000000000001277566164500172415ustar00rootroot00000000000000libvdpau-va-gl-0.4.2/3rdparty/vdpau/vdpau.h000066400000000000000000005273531277566164500205500ustar00rootroot00000000000000/* * This source file is documented using Doxygen markup. * See http://www.stack.nl/~dimitri/doxygen/ */ /* * This copyright notice applies to this header file: * * Copyright (c) 2008-2015 NVIDIA Corporation * * Permission is hereby granted, free of charge, to any person * obtaining a copy of this software and associated documentation * files (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, * copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following * conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ /** * \mainpage Video Decode and Presentation API for Unix * * \section intro Introduction * * The Video Decode and Presentation API for Unix (VDPAU) provides * a complete solution for decoding, post-processing, compositing, * and displaying compressed or uncompressed video streams. These * video streams may be combined (composited) with bitmap content, * to implement OSDs and other application user interfaces. * * \section api_partitioning API Partitioning * * VDPAU is split into two distinct modules: * - \ref api_core * - \ref api_winsys * * The intent is that most VDPAU functionality exists and * operates identically across all possible Windowing Systems. * This functionality is the \ref api_core. * * However, a small amount of functionality must be included that * is tightly coupled to the underlying Windowing System. This * functionality is the \ref api_winsys. Possibly examples * include: * - Creation of the initial VDPAU \ref VdpDevice "VdpDevice" * handle, since this act requires intimate knowledge of the * underlying Window System, such as specific display handle or * driver identification. * - Conversion of VDPAU surfaces to/from underlying Window * System surface types, e.g. to allow manipulation of * VDPAU-generated surfaces via native Window System APIs. * * \section objects Object Types * * VDPAU is roughly object oriented; most functionality is * exposed by creating an object (handle) of a certain class * (type), then executing various functions against that handle. * The set of object classes supported, and their purpose, is * discussed below. * * \subsection device_type Device Type * * A \ref VdpDevice "VdpDevice" is the root object in VDPAU's * object system. The \ref api_winsys allows creation of a \ref * VdpDevice "VdpDevice" object handle, from which all other API * entry points can be retrieved and invoked. * * \subsection surface_types Surface Types * * A surface stores pixel information. Various types of surfaces * existing for different purposes: * * - \ref VdpVideoSurface "VdpVideoSurface"s store decompressed * YCbCr video frames in an implementation-defined internal * format. * - \ref VdpOutputSurface "VdpOutputSurface"s store RGB 4:4:4 * data. They are legal render targets for video * post-processing and compositing operations. * - \ref VdpBitmapSurface "VdpBitmapSurface"s store RGB 4:4:4 * data. These surfaces are designed to contain read-only * bitmap data, to be used for OSD or application UI * compositing. * * \subsection transfer_types Transfer Types * * A data transfer object reads data from a surface (or * surfaces), processes it, and writes the result to another * surface. Various types of processing are possible: * * - \ref VdpDecoder "VdpDecoder" objects process compressed video * data, and generate decompressed images. * - \ref VdpOutputSurface "VdpOutputSurface"s have their own \ref * VdpOutputSurfaceRender "rendering functionality". * - \ref VdpVideoMixer "VdpVideoMixer" objects perform video * post-processing, de-interlacing, and compositing. * - \ref VdpPresentationQueue "VdpPresentationQueue" is * responsible for timestamp-based display of surfaces. * * \section data_flow Data Flow * * Compressed video data originates in the application's memory * space. This memory is typically obtained using \c malloc, and * filled via regular file or network read system calls. * Alternatively, the application may \c mmap a file. * * The compressed data is then processed using a \ref VdpDecoder * "VdpDecoder", which will decompress the field or frame, * and write the result into a \ref VdpVideoSurface * "VdpVideoSurface". This action may require reading pixel data * from some number of other \ref VdpVideoSurface "VdpVideoSurface" * objects, depending on the type of compressed data and * field/frame in question. * * If the application wishes to display any form of OSD or * user-interface, this must be created in a \ref * VdpOutputSurface "VdpOutputSurface". * * This process begins with the creation of \ref VdpBitmapSurface * "VdpBitmapSurface" objects to contain the OSD/UI's static data, * such as individual glyphs. * * \ref VdpOutputSurface "VdpOutputSurface" \ref * VdpOutputSurfaceRender "rendering functionality" may be used * to composite together various \ref VdpBitmapSurface * "VdpBitmapSurface"s and \ref VdpOutputSurface * "VdpOutputSurface"s, into another VdpOutputSurface * "VdpOutputSurface". * * Once video has been decoded, it must be post-processed. This * involves various steps such as color space conversion, * de-interlacing, and other video adjustments. This step is * performed using an \ref VdpVideoMixer "VdpVideoMixer" object. * This object can not only perform the aforementioned video * post-processing, but also composite the video with a number of * \ref VdpOutputSurface "VdpOutputSurface"s, thus allowing complex * user interfaces to be built. The final result is written into * another \ref VdpOutputSurface "VdpOutputSurface". * * Note that at this point, the resultant \ref VdpOutputSurface * "VdpOutputSurface" may be fed back through the above path, * either using \ref VdpOutputSurface "VdpOutputSurface" \ref * VdpOutputSurfaceRender "rendering functionality", * or as input to the \ref VdpVideoMixer "VdpVideoMixer" object. * * Finally, the resultant \ref VdpOutputSurface * "VdpOutputSurface" must be displayed on screen. This is the job * of the \ref VdpPresentationQueue "VdpPresentationQueue" object. * * \image html vdpau_data_flow.png * * \section entry_point_retrieval Entry Point Retrieval * * VDPAU is designed so that multiple implementations can be * used without application changes. For example, VDPAU could be * hosted on X11, or via direct GPU access. * * The key technology behind this is the use of function * pointers and a "get proc address" style API for all entry * points. Put another way, functions are not called directly * via global symbols set up by the linker, but rather through * pointers. * * In practical terms, the \ref api_winsys provides factory * functions which not only create and return \ref VdpDevice * "VdpDevice" objects, but also a function pointer to a \ref * VdpGetProcAddress function, through which all entry point * function pointers will be retrieved. * * \subsection entry_point_philosophy Philosophy * * It is entirely possible to envisage a simpler scheme whereby * such function pointers are hidden. That is, the application * would link against a wrapper library that exposed "real" * functions. The application would then call such functions * directly, by symbol, like any other function. The wrapper * library would handle loading the appropriate back-end, and * implementing a similar "get proc address" scheme internally. * * However, the above scheme does not work well in the context * of separated \ref api_core and \ref api_winsys. In this * scenario, one would require a separate wrapper library per * Window System, since each Window System would have a * different function name and prototype for the main factory * function. If an application then wanted to be Window System * agnostic (making final determination at run-time via some * form of plugin), it may then need to link against two * wrapper libraries, which would cause conflicts for all * symbols other than the main factory function. * * Another disadvantage of the wrapper library approach is the * extra level of function call required; the wrapper library * would internally implement the existing "get proc address" * and "function pointer" style dispatch anyway. Exposing this * directly to the application is slightly more efficient. * * \section threading Multi-threading * * All VDPAU functionality is fully thread-safe; any number of * threads may call into any VDPAU functions at any time. VDPAU * may not be called from signal-handlers. * * Note, however, that this simply guarantees that internal * VDPAU state will not be corrupted by thread usage, and that * crashes and deadlocks will not occur. Completely arbitrary * thread usage may not generate the results that an application * desires. In particular, care must be taken when multiple * threads are performing operations on the same VDPAU objects. * * VDPAU implementations guarantee correct flow of surface * content through the rendering pipeline, but only when * function calls that read from or write to a surface return to * the caller prior to any thread calling any other function(s) * that read from or write to the surface. Invoking multiple * reads from a surface in parallel is OK. * * Note that this restriction is placed upon VDPAU function * invocations, and specifically not upon any back-end * hardware's physical rendering operations. VDPAU * implementations are expected to internally synchronize such * hardware operations. * * In a single-threaded application, the above restriction comes * naturally; each function call completes before it is possible * to begin a new function call. * * In a multi-threaded application, threads may need to be * synchronized. For example, consider the situation where: * * - Thread 1 is parsing compressed video data, passing them * through a \ref VdpDecoder "VdpDecoder" object, and filling a * ring-buffer of \ref VdpVideoSurface "VdpVideoSurface"s * - Thread 2 is consuming those \ref VdpVideoSurface * "VdpVideoSurface"s, and using a \ref VdpVideoMixer * "VdpVideoMixer" to process them and composite them with UI. * * In this case, the threads must synchronize to ensure that * thread 1's call to \ref VdpDecoderRender has returned prior to * thread 2's call(s) to \ref VdpVideoMixerRender that use that * specific surface. This could be achieved using the following * pseudo-code: * * \code * Queue q_full_surfaces; * Queue q_empty_surfaces; * * thread_1() { * for (;;) { * VdpVideoSurface s = q_empty_surfaces.get(); * // Parse compressed stream here * VdpDecoderRender(s, ...); * q_full_surfaces.put(s); * } * } * * // This would need to be more complex if * // VdpVideoMixerRender were to be provided with more * // than one field/frame at a time. * thread_2() { * for (;;) { * // Possibly, other rendering operations to mixer * // layer surfaces here. * VdpOutputSurface t = ...; * VdpPresentationQueueBlockUntilSurfaceIdle(t); * VdpVideoSurface s = q_full_surfaces.get(); * VdpVideoMixerRender(s, t, ...); * q_empty_surfaces.put(s); * // Possibly, other rendering operations to "t" here * VdpPresentationQueueDisplay(t, ...); * } * } * \endcode * * Finally, note that VDPAU makes no guarantees regarding any * level of parallelism in any given implementation. Put another * way, use of multi-threading is not guaranteed to yield any * performance gain, and in theory could even slightly reduce * performance due to threading/synchronization overhead. * * However, the intent of the threading requirements is to allow * for e.g. video decoding and video mixer operations to proceed * in parallel in hardware. Given a (presumably multi-threaded) * application that kept each portion of the hardware busy, this * would yield a performance increase. * * \section endianness Surface Endianness * * When dealing with surface content, i.e. the input/output of * Put/GetBits functions, applications must take care to access * memory in the correct fashion, so as to avoid endianness * issues. * * By established convention in the 3D graphics world, RGBA data * is defined to be an array of 32-bit pixels containing packed * RGBA components, not as an array of bytes or interleaved RGBA * components. VDPAU follows this convention. As such, * applications are expected to access such surfaces as arrays * of 32-bit components (i.e. using a 32-bit pointer), and not * as interleaved arrays of 8-bit components (i.e. using an * 8-bit pointer.) Deviation from this convention will lead to * endianness issues, unless appropriate care is taken. * * The same convention is followed for some packed YCbCr formats * such as \ref VDP_YCBCR_FORMAT_Y8U8V8A8; i.e. they are * considered arrays of 32-bit pixels, and hence should be * accessed as such. * * For YCbCr formats with chroma decimation and/or planar * formats, however, this convention is awkward. Therefore, * formats such as \ref VDP_YCBCR_FORMAT_NV12 are defined as * arrays of (potentially interleaved) byte-sized components. * Hence, applications should manipulate such data 8-bits at a * time, using 8-bit pointers. * * Note that one common usage for the input/output of * Put/GetBits APIs is file I/O. Typical file I/O APIs treat all * memory as a simple array of 8-bit values. This violates the * rule requiring surface data to be accessed in its true native * format. As such, applications may be required to solve * endianness issues. Possible solutions include: * * - Authoring static UI data files according to the endianness * of the target execution platform. * - Conditionally byte-swapping Put/GetBits data buffers at * run-time based on execution platform. * * Note: Complete details regarding each surface format's * precise pixel layout is included with the documentation of * each surface type. For example, see \ref * VDP_RGBA_FORMAT_B8G8R8A8. * * \section video_decoder_usage Video Decoder Usage * * VDPAU is a slice-level API. Put another way, VDPAU implementations accept * "slice" data from the bitstream, and perform all required processing of * those slices (e.g VLD decoding, IDCT, motion compensation, in-loop * deblocking, etc.). * * The client application is responsible for: * * - Extracting the slices from the bitstream (e.g. parsing/demultiplexing * container formats, scanning the data to determine slice start positions * and slice sizes). * - Parsing various bitstream headers/structures (e.g. sequence header, * sequence parameter set, picture parameter set, entry point structures, * etc.) Various fields from the parsed header structures needs to be * provided to VDPAU alongside the slice bitstream in a "picture info" * structure. * - Surface management (e.g. H.264 DPB processing, display re-ordering) * * It is recommended that applications pass solely the slice data to VDPAU; * specifically that any header data structures be excluded from the portion * of the bitstream passed to VDPAU. VDPAU implementations must operate * correctly if non-slice data is included, at least for formats employing * start codes to delimit slice data. However, any extra data may need * to be uploaded to hardware for parsing thus lowering performance, and/or, * in the worst case, may even overflow internal buffers that are sized solely * for slice data. * * The exact data that should be passed to VDPAU is detailed below for each * supported format: * * \subsection bitstream_mpeg1_mpeg2 MPEG-1 and MPEG-2 * * Include all slices beginning with start codes 0x00000101 through * 0x000001AF. The slice start code must be included for all slices. * * \subsection bitstream_h264 H.264 * * Include all NALs with nal_unit_type of 1 or 5 (coded slice of non-IDR/IDR * picture respectively). The complete slice start code (including 0x000001 * prefix) must be included for all slices, even when the prefix is not * included in the bitstream. * * Note that if desired: * * - The slice start code prefix may be included in a separate bitstream * buffer array entry to the actual slice data extracted from the bitstream. * - Multiple bitstream buffer array entries (e.g. one per slice) may point at * the same physical data storage for the slice start code prefix. * * \subsection bitstream_vc1_sp_mp VC-1 Simple and Main Profile * * VC-1 simple/main profile bitstreams always consist of a single slice per * picture, and do not use start codes to delimit pictures. Instead, the * container format must indicate where each picture begins/ends. * * As such, no slice start codes should be included in the data passed to * VDPAU; simply pass in the exact data from the bitstream. * * Header information contained in the bitstream should be parsed by the * application and passed to VDPAU using the "picture info" data structure; * this header information explicitly must not be included in the bitstream * data passed to VDPAU for this encoding format. * * \subsection bitstream_vc1_ap VC-1 Advanced Profile * * Include all slices beginning with start codes 0x0000010D (frame), * 0x0000010C (field) or 0x0000010B (slice). The slice start code should be * included in all cases. * * Some VC-1 advanced profile streams do not contain slice start codes; again, * the container format must indicate where picture data begins and ends. In * this case, pictures are assumed to be progressive and to contain a single * slice. It is highly recommended that applications detect this condition, * and add the missing start codes to the bitstream passed to VDPAU. However, * VDPAU implementations must allow bitstreams with missing start codes, and * act as if a 0x0000010D (frame) start code had been present. * * Note that pictures containing multiple slices, or interlace streams, must * contain a complete set of slice start codes in the original bitstream; * without them, it is not possible to correctly parse and decode the stream. * * The bitstream passed to VDPAU should contain all original emulation * prevention bytes present in the original bitstream; do not remove these * from the bitstream. * * \subsection bitstream_mpeg4part2 MPEG-4 Part 2 and DivX * * Include all slices beginning with start codes 0x000001B6. The slice start * code must be included for all slices. * * \subsection bitstream_hevc H.265/HEVC - High Efficiency Video Codec * * Include all video coding layer (VCL) NAL units, with nal_unit_type values * of 0 (TRAIL_N) through 31 (RSV_VCL31) inclusive. In addition to parsing * and providing NAL units, an H.265/HEVC decoder application using VDPAU * for decoding must parse certain values of the first slice segment header * in a VCL NAL unit and provide it through VdpPictureInfoHEVC. Please see * the documentation for VdpPictureInfoHEVC below for further details. * * The complete slice start code (including the 0x000001 prefix) must be * included for all slices, even when the prefix is not included in the * bitstream. * * Note that if desired: * * - The slice start code prefix may be included in a separate bitstream * buffer array entry to the actual slice data extracted from the bitstream. * - Multiple bitstream buffer array entries (e.g. one per slice) may point at * the same physical data storage for the slice start code prefix. * * \section video_mixer_usage Video Mixer Usage * * \subsection video_surface_content VdpVideoSurface Content * * Each \ref VdpVideoSurface "VdpVideoSurface" is expected to contain an * entire frame's-worth of data, irrespective of whether an interlaced of * progressive sequence is being decoded. * * Depending on the exact encoding structure of the compressed video stream, * the application may need to call \ref VdpDecoderRender twice to fill a * single \ref VdpVideoSurface "VdpVideoSurface". When the stream contains an * encoded progressive frame, or a "frame coded" interlaced field-pair, a * single \ref VdpDecoderRender call will fill the entire surface. When the * stream contains separately encoded interlaced fields, two * \ref VdpDecoderRender calls will be required; one for the top field, and * one for the bottom field. * * Implementation note: When \ref VdpDecoderRender renders an interlaced * field, this operation must not disturb the content of the other field in * the surface. * * \subsection vm_surf_list VdpVideoMixer Surface List * * An video stream is logically composed of a sequence of fields. An * example is shown below, in display order, assuming top field first: * *
t0 b0 t1 b1 t2 b2 t3 b3 t4 b4 t5 b5 t6 b6 t7 b7 t8 b8 t9 b9
* * The canonical usage is to call \ref VdpVideoMixerRender once for decoded * field, in display order, to yield one post-processed frame for display. * * For each call to \ref VdpVideoMixerRender, the field to be processed should * be provided as the \b video_surface_current parameter. * * To enable operation of advanced de-interlacing algorithms and/or * post-processing algorithms, some past and/or future surfaces should be * provided as context. These are provided in the \b video_surface_past and * \b video_surface_future lists. In general, these lists may contain any * number of surfaces. Specific implementations may have specific requirements * determining the minimum required number of surfaces for optimal operation, * and the maximum number of useful surfaces, beyond which surfaces are not * used. It is recommended that in all cases other than plain bob/weave, at * least 2 past and 1 future field be provided. * * Note that it is entirely possible, in general, for any of the * \ref VdpVideoMixer "VdpVideoMixer" post-processing steps other than * de-interlacing to require access to multiple input fields/frames. For * example, an motion-sensitive noise-reduction algorithm. * * For example, when processing field t4, the \ref VdpVideoMixerRender * parameters may contain the following values, if the application chose to * provide 3 fields of context for both the past and future: * *
 * current_picture_structure: VDP_VIDEO_MIXER_PICTURE_STRUCTURE_TOP_FIELD
 * past:    [b3, t3, b2]
 * current: t4
 * future:  [b4, t5, b5]
 * 
* * Note that for both the past/future lists, array index 0 represents the * field temporally closest to current, in display order. * * The \ref VdpVideoMixerRender parameter \b current_picture_structure applies * to \b video_surface_current. The picture structure for the other surfaces * will be automatically derived from that for the current picture. The * derivation algorithm is extremely simple; the concatenated list * past/current/future is simply assumed to have an alternating top/bottom * pattern throughout. * * Continuing the example above, subsequent calls to \ref VdpVideoMixerRender * would provide the following sets of parameters: * *
 * current_picture_structure: VDP_VIDEO_MIXER_PICTURE_STRUCTURE_BOTTOM_FIELD
 * past:    [t4, b3, t3]
 * current: b4
 * future:  [t5, b5, t6]
 * 
* * then: * *
 * current_picture_structure: VDP_VIDEO_MIXER_PICTURE_STRUCTURE_TOP_FIELD
 * past:    [b4, t4, b3]
 * current: t5
 * future:  [b5, t6, b7]
 * 
* * In other words, the concatenated list of past/current/future frames simply * forms a window that slides through the sequence of decoded fields. * * It is syntactically legal for an application to choose not to provide a * particular entry in the past or future lists. In this case, the "slot" in * the surface list must be filled with the special value * \ref VDP_INVALID_HANDLE, to explicitly indicate that the picture is * missing; do not simply shuffle other surfaces together to fill in the gap. * Note that entries should only be omitted under special circumstances, such * as failed decode due to bitstream error during picture header parsing, * since missing entries will typically cause advanced de-interlacing * algorithms to experience significantly degraded operation. * * Specific examples for different de-interlacing types are presented below. * * \subsection deint_weave Weave De-interlacing * * Weave de-interlacing is the act of interleaving the lines of two temporally * adjacent fields to form a frame for display. * * To disable de-interlacing for progressive streams, simply specify * \b current_picture_structure as \ref VDP_VIDEO_MIXER_PICTURE_STRUCTURE_FRAME; * no de-interlacing will be applied. * * Weave de-interlacing for interlaced streams is identical to disabling * de-interlacing, as describe immediately above, because each * \ref VdpVideoSurface already contains an entire frame's worth (i.e. two * fields) of picture data. * * Inverse telecine is disabled when using weave de-interlacing. * * Weave de-interlacing produces one output frame for each input frame. The * application should make one \ref VdpVideoMixerRender call per pair of * decoded fields, or per decoded frame. * * Weave de-interlacing requires no entries in the past/future lists. * * All implementations must support weave de-interlacing. * * \subsection deint_bob Bob De-interlacing * * Bob de-interlacing is the act of vertically scaling a single field to the * size of a single frame. * * To achieve bob de-interlacing, simply provide a single field as * \b video_surface_current, and set \b current_picture_structure * appropriately, to indicate whether a top or bottom field was provided. * * Inverse telecine is disabled when using bob de-interlacing. * * Bob de-interlacing produces one output frame for each input field. The * application should make one \ref VdpVideoMixerRender call per decoded * field. * * Bob de-interlacing requires no entries in the past/future lists. * * Bob de-interlacing is the default when no advanced method is requested and * enabled. Advanced de-interlacing algorithms may fall back to bob e.g. when * required past/future fields are missing. * * All implementations must support bob de-interlacing. * * \subsection deint_adv Advanced De-interlacing * * Operation of both temporal and temporal-spatial de-interlacing is * identical; the only difference is the internal processing the algorithm * performs in generating the output frame. * * These algorithms use various advanced processing on the pixels of both the * current and various past/future fields in order to determine how best to * de-interlacing individual portions of the image. * * Inverse telecine may be enabled when using advanced de-interlacing. * * Advanced de-interlacing produces one output frame for each input field. The * application should make one \ref VdpVideoMixerRender call per decoded * field. * * Advanced de-interlacing requires entries in the past/future lists. * * Availability of advanced de-interlacing algorithms is implementation * dependent. * * \subsection deint_rate De-interlacing Rate * * For all de-interlacing algorithms except weave, a choice may be made to * call \ref VdpVideoMixerRender for either each decoded field, or every * second decoded field. * * If \ref VdpVideoMixerRender is called for every decoded field, the * generated post-processed frame rate is equal to the decoded field rate. * Put another way, the generated post-processed nominal field rate is equal * to 2x the decoded field rate. This is standard practice. * * If \ref VdpVideoMixerRender is called for every second decoded field (say * every top field), the generated post-processed frame rate is half to the * decoded field rate. This mode of operation is thus referred to as * "half-rate". * * Implementations may choose whether to support half-rate de-interlacing * or not. Regular full-rate de-interlacing should be supported by any * supported advanced de-interlacing algorithm. * * The descriptions of de-interlacing algorithms above assume that regular * (not half-rate) operation is being performed, when detailing the number of * VdpVideoMixerRender calls. * * Recall that the concatenation of past/current/future surface lists simply * forms a window into the stream of decoded fields. To achieve standard * de-interlacing, the window is slid through the list of decoded fields one * field at a time, and a call is made to \ref VdpVideoMixerRender for each * movement of the window. To achieve half-rate de-interlacing, the window is * slid through the* list of decoded fields two fields at a time, and a * call is made to \ref VdpVideoMixerRender for each movement of the window. * * \subsection invtc Inverse Telecine * * Assuming the implementation supports it, inverse telecine may be enabled * alongside any advanced de-interlacing algorithm. Inverse telecine is never * active for bob or weave. * * Operation of \ref VdpVideoMixerRender with inverse telecine active is * identical to the basic operation mechanisms describe above in every way; * all inverse telecine processing is performed internally to the * \ref VdpVideoMixer "VdpVideoMixer". * * In particular, there is no provision way for \ref VdpVideoMixerRender to * indicate when identical input fields have been observed, and consequently * identical output frames may have been produced. * * De-interlacing (and inverse telecine) may be applied to streams that are * marked as being progressive. This will allow detection of, and correct * de-interlacing of, mixed interlace/progressive streams, bad edits, etc. * To implement de-interlacing/inverse-telecine on progressive material, * simply treat the stream of decoded frames as a stream of decoded fields, * apply any telecine flags (see the next section), and then apply * de-interlacing to those fields as described above. * * Implementations are free to determine whether inverse telecine operates * in conjunction with half-rate de-interlacing or not. It should always * operate with regular de-interlacing, when advertized. * * \subsection tcflags Telecine (Pull-Down) Flags * * Some media delivery formats, e.g. DVD-Video, include flags that are * intended to modify the decoded field sequence before display. This allows * e.g. 24p content to be encoded at 48i, which saves space relative to a 60i * encoded stream, but still displayed at 60i, to match target consumer * display equipment. * * If the inverse telecine option is not activated in the * \ref VdpVideoMixer "VdpVideoMixer", these flags should be ignored, and the * decoded fields passed directly to \ref VdpVideoMixerRender as detailed * above. * * However, to make full use of the inverse telecine feature, these flags * should be applied to the field stream, yielding another field stream with * some repeated fields, before passing the field stream to * \ref VdpVideoMixerRender. In this scenario, the sliding window mentioned * in the descriptions above applies to the field stream after application of * flags. * * \section extending Extending the API * * \subsection extend_enums Enumerations and Other Constants * * VDPAU defines a number of enumeration types. * * When modifying VDPAU, existing enumeration constants must * continue to exist (although they may be deprecated), and do * so in the existing order. * * The above discussion naturally applies to "manually" defined * enumerations, using pre-processor macros, too. * * \subsection extend_structs Structures * * In most case, VDPAU includes no provision for modifying existing * structure definitions, although they may be deprecated. * * New structures may be created, together with new API entry * points or feature/attribute/parameter values, to expose new * functionality. * * A few structures are considered plausible candidates for future extension. * Such structures include a version number as the first field, indicating the * exact layout of the client-provided data. When changing such structures, the * old structure must be preserved and a new structure created. This allows * applications built against the old version of the structure to continue to * interoperate. For example, to extend the VdpProcamp structure, define a new * VdpProcamp1 and update VdpGenerateCSCMatrix to take the new structure as an * argument. Document in a comment that the caller must fill the struct_version * field with the value 1. VDPAU implementations should use the struct_version * field to determine which version of the structure the application was built * against. Note that you cannot simply increment the value of * VDP_PROCAMP_VERSION because applications recompiled against a newer version * of vdpau.h but that have not been updated to use the new structure must still * report that they're using version 0. * * Note that the layouts of VdpPictureInfo structures are defined by their * corresponding VdpDecoderProfile numbers, so no struct_version field is * needed for them. This layout includes the size of the structure, so new * profiles that extend existing functionality may incorporate the old * VdpPictureInfo as a substructure, but may not modify existing VdpPictureInfo * structures. * * \subsection extend_functions Functions * * Existing functions may not be modified, although they may be * deprecated. * * New functions may be added at will. Note the enumeration * requirements when modifying the enumeration that defines the * list of entry points. * * \section preemption_note Display Preemption * * Please note that the display may be preempted away from * VDPAU at any time. See \ref display_preemption for more * details. * * \subsection trademarks Trademarks * * VDPAU is a trademark of NVIDIA Corporation. You may freely use the * VDPAU trademark, as long as trademark ownership is attributed to * NVIDIA Corporation. */ /** * \file vdpau.h * \brief The Core API * * This file contains the \ref api_core "Core API". */ #ifndef _VDPAU_H #define _VDPAU_H #include #ifdef __cplusplus extern "C" { #endif /** * \defgroup api_core Core API * * The core API encompasses all VDPAU functionality that operates * in the same fashion across all Window Systems. * * @{ */ /** * \defgroup base_types Basic Types * * VDPAU primarily uses ISO C99 types from \c stdint.h. * * @{ */ /** \brief A true \ref VdpBool value */ #define VDP_TRUE 1 /** \brief A false \ref VdpBool value */ #define VDP_FALSE 0 /** * \brief A boolean value, holding \ref VDP_TRUE or \ref * VDP_FALSE. */ typedef int VdpBool; /*@}*/ /** * \defgroup misc_types Miscellaneous Types * * @{ */ /** * \brief An invalid object handle value. * * This value may be used to represent an invalid, or * non-existent, object (\ref VdpDevice "VdpDevice", * \ref VdpVideoSurface "VdpVideoSurface", etc.) * * Note that most APIs require valid object handles in all * cases, and will fail when presented with this value. */ #define VDP_INVALID_HANDLE 0xffffffffU /** * \brief The set of all chroma formats for \ref VdpVideoSurface * "VdpVideoSurface"s. */ typedef uint32_t VdpChromaType; /** \hideinitializer \brief 4:2:0 chroma format. */ #define VDP_CHROMA_TYPE_420 ((VdpChromaType)0) /** \hideinitializer \brief 4:2:2 chroma format. */ #define VDP_CHROMA_TYPE_422 ((VdpChromaType)1) /** \hideinitializer \brief 4:4:4 chroma format. */ #define VDP_CHROMA_TYPE_444 ((VdpChromaType)2) /** * \brief The set of all known YCbCr surface formats. */ typedef uint32_t VdpYCbCrFormat; /** * \hideinitializer * \brief The "NV12" YCbCr surface format. * * This format has a two planes, a Y plane and a UV plane. * * The Y plane is an array of byte-sized Y components. * Applications should access this data via a uint8_t pointer. * * The UV plane is an array of interleaved byte-sized U and V * components, in the order U, V, U, V. Applications should * access this data via a uint8_t pointer. */ #define VDP_YCBCR_FORMAT_NV12 ((VdpYCbCrFormat)0) /** * \hideinitializer * \brief The "YV12" YCbCr surface format. * * This format has a three planes, a Y plane, a V plane, and a U * plane. * * Each of the planes is an array of byte-sized components. * * Applications should access this data via a uint8_t pointer. */ #define VDP_YCBCR_FORMAT_YV12 ((VdpYCbCrFormat)1) /** * \hideinitializer * \brief The "UYVY" YCbCr surface format. * * This format may also be known as Y422, UYNV, HDYC. * * This format has a single plane. * * This plane is an array of interleaved byte-sized Y, U, and V * components, in the order U, Y, V, Y, U, Y, V, Y. * * Applications should access this data via a uint8_t pointer. */ #define VDP_YCBCR_FORMAT_UYVY ((VdpYCbCrFormat)2) /** * \hideinitializer * \brief The "YUYV" YCbCr surface format. * * This format may also be known as YUY2, YUNV, V422. * * This format has a single plane. * * This plane is an array of interleaved byte-sized Y, U, and V * components, in the order Y, U, Y, V, Y, U, Y, V. * * Applications should access this data via a uint8_t pointer. */ #define VDP_YCBCR_FORMAT_YUYV ((VdpYCbCrFormat)3) /** * \hideinitializer * \brief A packed YCbCr format. * * This format has a single plane. * * This plane is an array packed 32-bit pixel data. Within each * 32-bit pixel, bits [31:24] contain A, bits [23:16] contain V, * bits [15:8] contain U, and bits [7:0] contain Y. * * Applications should access this data via a uint32_t pointer. */ #define VDP_YCBCR_FORMAT_Y8U8V8A8 ((VdpYCbCrFormat)4) /** * \hideinitializer * \brief A packed YCbCr format. * * This format has a single plane. * * This plane is an array packed 32-bit pixel data. Within each * 32-bit pixel, bits [31:24] contain A, bits [23:16] contain Y, * bits [15:8] contain U, and bits [7:0] contain V. * * Applications should access this data via a uint32_t pointer. */ #define VDP_YCBCR_FORMAT_V8U8Y8A8 ((VdpYCbCrFormat)5) /** * \brief The set of all known RGB surface formats. */ typedef uint32_t VdpRGBAFormat; /** * \hideinitializer * \brief A packed RGB format. * * This format has a single plane. * * This plane is an array packed 32-bit pixel data. Within each * 32-bit pixel, bits [31:24] contain A, bits [23:16] contain R, * bits [15:8] contain G, and bits [7:0] contain B. * * Applications should access this data via a uint32_t pointer. */ #define VDP_RGBA_FORMAT_B8G8R8A8 ((VdpRGBAFormat)0) /** * \hideinitializer * \brief A packed RGB format. * * This format has a single plane. * * This plane is an array packed 32-bit pixel data. Within each * 32-bit pixel, bits [31:24] contain A, bits [23:16] contain B, * bits [15:8] contain G, and bits [7:0] contain R. * * Applications should access this data via a uint32_t pointer. */ #define VDP_RGBA_FORMAT_R8G8B8A8 ((VdpRGBAFormat)1) /** * \hideinitializer * \brief A packed RGB format. * * This format has a single plane. * * This plane is an array packed 32-bit pixel data. Within each * 32-bit pixel, bits [31:30] contain A, bits [29:20] contain B, * bits [19:10] contain G, and bits [9:0] contain R. * * Applications should access this data via a uint32_t pointer. */ #define VDP_RGBA_FORMAT_R10G10B10A2 ((VdpRGBAFormat)2) /** * \hideinitializer * \brief A packed RGB format. * * This format has a single plane. * * This plane is an array packed 32-bit pixel data. Within each * 32-bit pixel, bits [31:30] contain A, bits [29:20] contain R, * bits [19:10] contain G, and bits [9:0] contain B. * * Applications should access this data via a uint32_t pointer. */ #define VDP_RGBA_FORMAT_B10G10R10A2 ((VdpRGBAFormat)3) /** * \hideinitializer * \brief An alpha-only surface format. * * This format has a single plane. * * This plane is an array of byte-sized components. * * Applications should access this data via a uint8_t pointer. */ #define VDP_RGBA_FORMAT_A8 ((VdpRGBAFormat)4) /** * \brief The set of all known indexed surface formats. */ typedef uint32_t VdpIndexedFormat; /** * \hideinitializer * \brief A 4-bit indexed format, with alpha. * * This format has a single plane. * * This plane is an array of byte-sized components. Within each * byte, bits [7:4] contain I (index), and bits [3:0] contain A. * * Applications should access this data via a uint8_t pointer. */ #define VDP_INDEXED_FORMAT_A4I4 ((VdpIndexedFormat)0) /** * \hideinitializer * \brief A 4-bit indexed format, with alpha. * * This format has a single plane. * * This plane is an array of byte-sized components. Within each * byte, bits [7:4] contain A, and bits [3:0] contain I (index). * * Applications should access this data via a uint8_t pointer. */ #define VDP_INDEXED_FORMAT_I4A4 ((VdpIndexedFormat)1) /** * \hideinitializer * \brief A 8-bit indexed format, with alpha. * * This format has a single plane. * * This plane is an array of interleaved byte-sized A and I * (index) components, in the order A, I, A, I. * * Applications should access this data via a uint8_t pointer. */ #define VDP_INDEXED_FORMAT_A8I8 ((VdpIndexedFormat)2) /** * \hideinitializer * \brief A 8-bit indexed format, with alpha. * * This format has a single plane. * * This plane is an array of interleaved byte-sized A and I * (index) components, in the order I, A, I, A. * * Applications should access this data via a uint8_t pointer. */ #define VDP_INDEXED_FORMAT_I8A8 ((VdpIndexedFormat)3) /** * \brief A location within a surface. * * The VDPAU co-ordinate system has its origin at the top-left * of a surface, with x and y components increasing right and * down. */ typedef struct { /** X co-ordinate. */ uint32_t x; /** Y co-ordinate. */ uint32_t y; } VdpPoint; /** * \brief A rectangular region of a surface. * * The co-ordinates are top-left inclusive, bottom-right * exclusive. * * The VDPAU co-ordinate system has its origin at the top-left * of a surface, with x and y components increasing right and * down. */ typedef struct { /** Left X co-ordinate. Inclusive. */ uint32_t x0; /** Top Y co-ordinate. Inclusive. */ uint32_t y0; /** Right X co-ordinate. Exclusive. */ uint32_t x1; /** Bottom Y co-ordinate. Exclusive. */ uint32_t y1; } VdpRect; /** * A constant RGBA color. * * Note that the components are stored as float values in the * range 0.0...1.0 rather than format-specific integer values. * This allows VdpColor values to be independent from the exact * surface format(s) in use. */ typedef struct { float red; float green; float blue; float alpha; } VdpColor; /*@}*/ /** * \defgroup error_handling Error Handling * * @{ */ /** * \hideinitializer * \brief The set of all possible error codes. */ typedef enum { /** The operation completed successfully; no error. */ VDP_STATUS_OK = 0, /** * No backend implementation could be loaded. */ VDP_STATUS_NO_IMPLEMENTATION, /** * The display was preempted, or a fatal error occurred. * * The application must re-initialize VDPAU. */ VDP_STATUS_DISPLAY_PREEMPTED, /** * An invalid handle value was provided. * * Either the handle does not exist at all, or refers to an object of an * incorrect type. */ VDP_STATUS_INVALID_HANDLE, /** * An invalid pointer was provided. * * Typically, this means that a NULL pointer was provided for an "output" * parameter. */ VDP_STATUS_INVALID_POINTER, /** * An invalid/unsupported \ref VdpChromaType value was supplied. */ VDP_STATUS_INVALID_CHROMA_TYPE, /** * An invalid/unsupported \ref VdpYCbCrFormat value was supplied. */ VDP_STATUS_INVALID_Y_CB_CR_FORMAT, /** * An invalid/unsupported \ref VdpRGBAFormat value was supplied. */ VDP_STATUS_INVALID_RGBA_FORMAT, /** * An invalid/unsupported \ref VdpIndexedFormat value was supplied. */ VDP_STATUS_INVALID_INDEXED_FORMAT, /** * An invalid/unsupported \ref VdpColorStandard value was supplied. */ VDP_STATUS_INVALID_COLOR_STANDARD, /** * An invalid/unsupported \ref VdpColorTableFormat value was supplied. */ VDP_STATUS_INVALID_COLOR_TABLE_FORMAT, /** * An invalid/unsupported \ref VdpOutputSurfaceRenderBlendFactor value was * supplied. */ VDP_STATUS_INVALID_BLEND_FACTOR, /** * An invalid/unsupported \ref VdpOutputSurfaceRenderBlendEquation value * was supplied. */ VDP_STATUS_INVALID_BLEND_EQUATION, /** * An invalid/unsupported flag value/combination was supplied. */ VDP_STATUS_INVALID_FLAG, /** * An invalid/unsupported \ref VdpDecoderProfile value was supplied. */ VDP_STATUS_INVALID_DECODER_PROFILE, /** * An invalid/unsupported \ref VdpVideoMixerFeature value was supplied. */ VDP_STATUS_INVALID_VIDEO_MIXER_FEATURE, /** * An invalid/unsupported \ref VdpVideoMixerParameter value was supplied. */ VDP_STATUS_INVALID_VIDEO_MIXER_PARAMETER, /** * An invalid/unsupported \ref VdpVideoMixerAttribute value was supplied. */ VDP_STATUS_INVALID_VIDEO_MIXER_ATTRIBUTE, /** * An invalid/unsupported \ref VdpVideoMixerPictureStructure value was * supplied. */ VDP_STATUS_INVALID_VIDEO_MIXER_PICTURE_STRUCTURE, /** * An invalid/unsupported \ref VdpFuncId value was supplied. */ VDP_STATUS_INVALID_FUNC_ID, /** * The size of a supplied object does not match the object it is being * used with. * * For example, a \ref VdpVideoMixer "VdpVideoMixer" is configured to * process \ref VdpVideoSurface "VdpVideoSurface" objects of a specific * size. If presented with a \ref VdpVideoSurface "VdpVideoSurface" of a * different size, this error will be raised. */ VDP_STATUS_INVALID_SIZE, /** * An invalid/unsupported value was supplied. * * This is a catch-all error code for values of type other than those * with a specific error code. */ VDP_STATUS_INVALID_VALUE, /** * An invalid/unsupported structure version was specified in a versioned * structure. This implies that the implementation is older than the * header file the application was built against. */ VDP_STATUS_INVALID_STRUCT_VERSION, /** * The system does not have enough resources to complete the requested * operation at this time. */ VDP_STATUS_RESOURCES, /** * The set of handles supplied are not all related to the same VdpDevice. * * When performing operations that operate on multiple surfaces, such as * \ref VdpOutputSurfaceRenderOutputSurface or \ref VdpVideoMixerRender, * all supplied surfaces must have been created within the context of the * same \ref VdpDevice "VdpDevice" object. This error is raised if they were * not. */ VDP_STATUS_HANDLE_DEVICE_MISMATCH, /** * A catch-all error, used when no other error code applies. */ VDP_STATUS_ERROR, } VdpStatus; /** * \brief Retrieve a string describing an error code. * \param[in] status The error code. * \return A pointer to the string. Note that this is a * statically allocated read-only string. As such, the * application must not free the returned pointer. The * pointer is valid as long as the VDPAU implementation is * present within the application's address space. */ typedef char const * VdpGetErrorString( VdpStatus status ); /*@}*/ /** * \defgroup versioning Versioning * * * @{ */ /** * \brief The VDPAU interface version described by this header file. * * This version will only increase if a major incompatible change is made. * For example, if the parameters passed to an existing function are modified, * rather than simply adding new functions/enumerations), or if the mechanism * used to load the backend driver is modified incompatibly. Such changes are * unlikely. * * This value also represents the DSO version of VDPAU-related * shared-libraries. * * VDPAU version numbers are simple integers that increase monotonically * (typically by value 1). */ #define VDPAU_INTERFACE_VERSION 1 /** * \brief The VDPAU version described by this header file. * * This version will increase whenever any non-documentation change is made to * vdpau.h, or related header files such as vdpau_x11.h. Such changes * typically involve the addition of new functions, constants, or features. * Such changes are expected to be completely backwards-compatible. * * VDPAU version numbers are simple integers that increase monotonically * (typically by value 1). */ #define VDPAU_VERSION 1 /** * \brief Retrieve the VDPAU version implemented by the backend. * \param[out] api_version The API version. * \return VdpStatus The completion status of the operation. */ typedef VdpStatus VdpGetApiVersion( /* output parameters follow */ uint32_t * api_version ); /** * \brief Retrieve an implementation-specific string description * of the implementation. This typically includes detailed version * information. * \param[out] information_string A pointer to the information * string. Note that this is a statically allocated * read-only string. As such, the application must not * free the returned pointer. The pointer is valid as long * as the implementation is present within the * application's address space. * \return VdpStatus The completion status of the operation. * * Note that the returned string is useful for information * reporting. It is not intended that the application should * parse this string in order to determine any information about * the implementation. */ typedef VdpStatus VdpGetInformationString( /* output parameters follow */ char const * * information_string ); /*@}*/ /** * \defgroup VdpDevice VdpDevice; Primary API object * * The VdpDevice is the root of the VDPAU object system. Using a * VdpDevice object, all other object types may be created. See * the sections describing those other object types for details * on object creation. * * Note that VdpDevice objects are created using the \ref * api_winsys. * * @{ */ /** * \brief An opaque handle representing a VdpDevice object. */ typedef uint32_t VdpDevice; /** * \brief Destroy a VdpDevice. * \param[in] device The device to destroy. * \return VdpStatus The completion status of the operation. */ typedef VdpStatus VdpDeviceDestroy( VdpDevice device ); /*@}*/ /** * \defgroup VdpCSCMatrix VdpCSCMatrix; CSC Matrix Manipulation * * When converting from YCbCr to RGB data formats, a color space * conversion operation must be performed. This operation is * parameterized using a "color space conversion matrix". The * VdpCSCMatrix is a data structure representing this * information. * * @{ */ /** * \brief Storage for a color space conversion matrix. * * Note that the application may choose to construct the matrix * content by either: * - Directly filling in the fields of the CSC matrix * - Using the \ref VdpGenerateCSCMatrix helper function. * * The color space conversion equation is as follows: * * \f[ * \left( \begin{array}{c} R \\ G \\ B \end{array} \right) * = * \left( \begin{array}{cccc} * m_{0,0} & m_{0,1} & m_{0,2} & m_{0,3} \\ * m_{1,0} & m_{1,1} & m_{1,2} & m_{1,3} \\ * m_{2,0} & m_{2,1} & m_{2,2} & m_{2,3} * \end{array} * \right) * * * \left( \begin{array}{c} Y \\ Cb \\ Cr \\ 1.0 \end{array} * \right) * \f] */ typedef float VdpCSCMatrix[3][4]; #define VDP_PROCAMP_VERSION 0 /** * \brief Procamp operation parameterization data. * * When performing a color space conversion operation, various * adjustments can be performed at the same time, such as * brightness and contrast. This structure defines the level of * adjustments to make. */ typedef struct { /** * This field must be filled with VDP_PROCAMP_VERSION */ uint32_t struct_version; /** * Brightness adjustment amount. A value clamped between * -1.0 and 1.0. 0.0 represents no modification. */ float brightness; /** * Contrast adjustment amount. A value clamped between * 0.0 and 10.0. 1.0 represents no modification. */ float contrast; /** * Saturation adjustment amount. A value clamped between 0.0 and * 10.0. 1.0 represents no modification. */ float saturation; /** * Hue adjustment amount. A value clamped between * -PI and PI. 0.0 represents no modification. */ float hue; } VdpProcamp; /** * \brief YCbCr color space specification. * * A number of YCbCr color spaces exist. This enumeration * defines the specifications known to VDPAU. */ typedef uint32_t VdpColorStandard; /** \hideinitializer \brief ITU-R BT.601 */ #define VDP_COLOR_STANDARD_ITUR_BT_601 ((VdpColorStandard)0) /** \hideinitializer \brief ITU-R BT.709 */ #define VDP_COLOR_STANDARD_ITUR_BT_709 ((VdpColorStandard)1) /** \hideinitializer \brief SMPTE-240M */ #define VDP_COLOR_STANDARD_SMPTE_240M ((VdpColorStandard)2) /** * \brief Generate a color space conversion matrix * \param[in] procamp The procamp adjustments to make. If NULL, * no adjustments will be made. * \param[in] standard The YCbCr color space to convert from. * \param[out] csc_matrix The CSC matrix to initialize. * \return VdpStatus The completion status of the operation. */ typedef VdpStatus VdpGenerateCSCMatrix( VdpProcamp * procamp, VdpColorStandard standard, /* output parameters follow */ VdpCSCMatrix * csc_matrix ); /*@}*/ /** * \defgroup VdpVideoSurface VdpVideoSurface; Video Surface object * * A VdpVideoSurface stores YCbCr data in an internal format, * with a variety of possible chroma sub-sampling options. * * A VdpVideoSurface may be filled with: * - Data provided by the CPU via \ref * VdpVideoSurfacePutBitsYCbCr (i.e. software decode.) * - The result of applying a \ref VdpDecoder "VdpDecoder" to * compressed video data. * * VdpVideoSurface content may be accessed by: * - The application via \ref VdpVideoSurfaceGetBitsYCbCr * - The Hardware that implements \ref VdpOutputSurface * "VdpOutputSurface" \ref VdpOutputSurfaceRender * "rendering functionality". * - The Hardware the implements \ref VdpVideoMixer * "VdpVideoMixer" functionality. * * VdpVideoSurfaces are not directly displayable. They must be * converted into a displayable format using \ref VdpVideoMixer * "VdpVideoMixer" objects. * * See \ref video_mixer_usage for additional information. * * @{ */ /** * \brief Query the implementation's VdpVideoSurface * capabilities. * \param[in] device The device to query. * \param[in] surface_chroma_type The type of chroma type for * which information is requested. * \param[out] is_supported Is this chroma type supported? * \param[out] max_width The maximum supported surface width for * this chroma type. * \param[out] max_height The maximum supported surface height * for this chroma type. * \return VdpStatus The completion status of the operation. */ typedef VdpStatus VdpVideoSurfaceQueryCapabilities( VdpDevice device, VdpChromaType surface_chroma_type, /* output parameters follow */ VdpBool * is_supported, uint32_t * max_width, uint32_t * max_height ); /** * \brief Query the implementation's VdpVideoSurface * GetBits/PutBits capabilities. * \param[in] device The device to query. * \param[in] surface_chroma_type The type of chroma type for * which information is requested. * \param[in] bits_ycbcr_format The format of application "bits" * buffer for which information is requested. * \param[out] is_supported Is this chroma type supported? * \return VdpStatus The completion status of the operation. */ typedef VdpStatus VdpVideoSurfaceQueryGetPutBitsYCbCrCapabilities( VdpDevice device, VdpChromaType surface_chroma_type, VdpYCbCrFormat bits_ycbcr_format, /* output parameters follow */ VdpBool * is_supported ); /** * \brief An opaque handle representing a VdpVideoSurface * object. */ typedef uint32_t VdpVideoSurface; /** * \brief Create a VdpVideoSurface. * \param[in] device The device that will contain the surface. * \param[in] chroma_type The chroma type of the new surface. * \param[in] width The width of the new surface. * \param[in] height The height of the new surface. * \param[out] surface The new surface's handle. * \return VdpStatus The completion status of the operation. * * The memory backing the surface may not be initialized during * creation. Applications are expected to initialize any region * that they use, via \ref VdpDecoderRender or \ref * VdpVideoSurfacePutBitsYCbCr. * * Note that certain widths/heights are impossible for specific values of * chroma_type. For example, the definition of VDP_CHROMA_TYPE_420 implies * that the width must be even, since each single chroma sample covers two * luma samples horizontally. A similar argument applies to surface heights, * although doubly so, since interlaced pictures must be supported; each * field's height must itself be a multiple of 2. Hence the overall surface's * height must be a multiple of 4. * * Similar rules apply to other chroma_type values. * * Implementations may also impose additional restrictions on the surface * sizes they support, potentially requiring additional rounding of actual * surface sizes. * * In most cases, this is not an issue, since: * - Video streams are encoded as an array of macro-blocks, which typically * have larger size alignment requirements than video surfaces do. * - APIs such as \ref VdpVideoMixerRender allow specification of a sub-region * of the surface to read, which allows the padding data to be clipped away. * * However, other APIs such as \ref VdpVideoSurfaceGetBitsYCbCr and * \ref VdpVideoSurfacePutBitsYCbCr do not allow a sub-region to be specified, * and always operate on surface size that was actually allocated, rather * than the surface size that was requested. In this case, applications need * to be aware of the actual surface size, in order to allocate appropriately * sized buffers for the get-/put-bits operations. * * For this reason, applications may need to call * \ref VdpVideoSurfaceGetParameters after creation, in order to retrieve the * actual surface size. */ typedef VdpStatus VdpVideoSurfaceCreate( VdpDevice device, VdpChromaType chroma_type, uint32_t width, uint32_t height, /* output parameters follow */ VdpVideoSurface * surface ); /** * \brief Destroy a VdpVideoSurface. * \param[in] surface The surface's handle. * \return VdpStatus The completion status of the operation. */ typedef VdpStatus VdpVideoSurfaceDestroy( VdpVideoSurface surface ); /** * \brief Retrieve the parameters used to create a * VdpVideoSurface. * \param[in] surface The surface's handle. * \param[out] chroma_type The chroma type of the surface. * \param[out] width The width of the surface. * \param[out] height The height of the surface. * \return VdpStatus The completion status of the operation. */ typedef VdpStatus VdpVideoSurfaceGetParameters( VdpVideoSurface surface, /* output parameters follow */ VdpChromaType * chroma_type, uint32_t * width, uint32_t * height ); /** * \brief Copy image data from a VdpVideoSurface to application * memory in a specified YCbCr format. * \param[in] surface The surface's handle. * \param[in] destination_ycbcr_format The format of the * application's data buffers. * \param[in] destination_data Pointers to the application data * buffers into which the image data will be written. Note * that this is an array of pointers, one per plane. The * destination_format parameter will define how many * planes are required. * \param[in] destination_pitches Pointers to the pitch values * for the application data buffers. Note that this is an * array of pointers, one per plane. The * destination_format parameter will define how many * planes are required. * \return VdpStatus The completion status of the operation. */ typedef VdpStatus VdpVideoSurfaceGetBitsYCbCr( VdpVideoSurface surface, VdpYCbCrFormat destination_ycbcr_format, void * const * destination_data, uint32_t const * destination_pitches ); /** * \brief Copy image data from application memory in a specific * YCbCr format to a VdpVideoSurface. * \param[in] surface The surface's handle. * \param[in] source_ycbcr_format The format of the * application's data buffers. * \param[in] source_data Pointers to the application data * buffers from which the image data will be copied. Note * that this is an array of pointers, one per plane. The * source_format parameter will define how many * planes are required. * \param[in] source_pitches Pointers to the pitch values * for the application data buffers. Note that this is an * array of pointers, one per plane. The * source_format parameter will define how many * planes are required. * \return VdpStatus The completion status of the operation. */ typedef VdpStatus VdpVideoSurfacePutBitsYCbCr( VdpVideoSurface surface, VdpYCbCrFormat source_ycbcr_format, void const * const * source_data, uint32_t const * source_pitches ); /*@}*/ /** * \defgroup VdpOutputSurface VdpOutputSurface; Output Surface \ * object * * A VdpOutputSurface stores RGBA data in a defined format. * * A VdpOutputSurface may be filled with: * - Data provided by the CPU via the various * VdpOutputSurfacePutBits functions. * - Using the VdpOutputSurface \ref VdpOutputSurfaceRender * "rendering functionality". * - Using a \ref VdpVideoMixer "VdpVideoMixer" object. * * VdpOutputSurface content may be accessed by: * - The application via the various VdpOutputSurfaceGetBits * functions. * - The Hardware that implements VdpOutputSurface * \ref VdpOutputSurfaceRender "rendering functionality". * - The Hardware the implements \ref VdpVideoMixer * "VdpVideoMixer" functionality. * - The Hardware that implements \ref VdpPresentationQueue * "VdpPresentationQueue" functionality, * * VdpVideoSurfaces are directly displayable using a \ref * VdpPresentationQueue "VdpPresentationQueue" object. * * @{ */ /** * \brief The set of all known color table formats, for use with * \ref VdpOutputSurfacePutBitsIndexed. */ typedef uint32_t VdpColorTableFormat; /** * \hideinitializer * \brief 8-bit per component packed into 32-bits * * This format is an array of packed 32-bit RGB color values. * Bits [31:24] are unused, bits [23:16] contain R, bits [15:8] * contain G, and bits [7:0] contain B. Note: The format is * physically an array of uint32_t values, and should be accessed * as such by the application in order to avoid endianness * issues. */ #define VDP_COLOR_TABLE_FORMAT_B8G8R8X8 ((VdpColorTableFormat)0) /** * \brief Query the implementation's VdpOutputSurface * capabilities. * \param[in] device The device to query. * \param[in] surface_rgba_format The surface format for * which information is requested. * \param[out] is_supported Is this surface format supported? * \param[out] max_width The maximum supported surface width for * this chroma type. * \param[out] max_height The maximum supported surface height * for this chroma type. * \return VdpStatus The completion status of the operation. */ typedef VdpStatus VdpOutputSurfaceQueryCapabilities( VdpDevice device, VdpRGBAFormat surface_rgba_format, /* output parameters follow */ VdpBool * is_supported, uint32_t * max_width, uint32_t * max_height ); /** * \brief Query the implementation's capability to perform a * PutBits operation using application data matching the * surface's format. * \param[in] device The device to query. * \param[in] surface_rgba_format The surface format for * which information is requested. * \param[out] is_supported Is this surface format supported? * \return VdpStatus The completion status of the operation. */ typedef VdpStatus VdpOutputSurfaceQueryGetPutBitsNativeCapabilities( VdpDevice device, VdpRGBAFormat surface_rgba_format, /* output parameters follow */ VdpBool * is_supported ); /** * \brief Query the implementation's capability to perform a * PutBits operation using application data in a specific * indexed format. * \param[in] device The device to query. * \param[in] surface_rgba_format The surface format for * which information is requested. * \param[in] bits_indexed_format The format of the application * data buffer. * \param[in] color_table_format The format of the color lookup * table. * \param[out] is_supported Is this surface format supported? * \return VdpStatus The completion status of the operation. */ typedef VdpStatus VdpOutputSurfaceQueryPutBitsIndexedCapabilities( VdpDevice device, VdpRGBAFormat surface_rgba_format, VdpIndexedFormat bits_indexed_format, VdpColorTableFormat color_table_format, /* output parameters follow */ VdpBool * is_supported ); /** * \brief Query the implementation's capability to perform a * PutBits operation using application data in a specific * YCbCr/YUB format. * \param[in] device The device to query. * \param[in] surface_rgba_format The surface format for which * information is requested. * \param[in] bits_ycbcr_format The format of the application * data buffer. * \param[out] is_supported Is this surface format supported? * \return VdpStatus The completion status of the operation. */ typedef VdpStatus VdpOutputSurfaceQueryPutBitsYCbCrCapabilities( VdpDevice device, VdpRGBAFormat surface_rgba_format, VdpYCbCrFormat bits_ycbcr_format, /* output parameters follow */ VdpBool * is_supported ); /** * \brief An opaque handle representing a VdpOutputSurface * object. */ typedef uint32_t VdpOutputSurface; /** * \brief Create a VdpOutputSurface. * \param[in] device The device that will contain the surface. * \param[in] rgba_format The format of the new surface. * \param[in] width The width of the new surface. * \param[in] height The height of the new surface. * \param[out] surface The new surface's handle. * \return VdpStatus The completion status of the operation. * * The memory backing the surface will be initialized to 0 color * and 0 alpha (i.e. black.) */ typedef VdpStatus VdpOutputSurfaceCreate( VdpDevice device, VdpRGBAFormat rgba_format, uint32_t width, uint32_t height, /* output parameters follow */ VdpOutputSurface * surface ); /** * \brief Destroy a VdpOutputSurface. * \param[in] surface The surface's handle. * \return VdpStatus The completion status of the operation. */ typedef VdpStatus VdpOutputSurfaceDestroy( VdpOutputSurface surface ); /** * \brief Retrieve the parameters used to create a * VdpOutputSurface. * \param[in] surface The surface's handle. * \param[out] rgba_format The format of the surface. * \param[out] width The width of the surface. * \param[out] height The height of the surface. * \return VdpStatus The completion status of the operation. */ typedef VdpStatus VdpOutputSurfaceGetParameters( VdpOutputSurface surface, /* output parameters follow */ VdpRGBAFormat * rgba_format, uint32_t * width, uint32_t * height ); /** * \brief Copy image data from a VdpOutputSurface to application * memory in the surface's native format. * \param[in] surface The surface's handle. * \param[in] source_rect The sub-rectangle of the source * surface to copy. If NULL, the entire surface will be * retrieved. * \param[in] destination_data Pointers to the application data * buffers into which the image data will be written. Note * that this is an array of pointers, one per plane. The * destination_format parameter will define how many * planes are required. * \param[in] destination_pitches Pointers to the pitch values * for the application data buffers. Note that this is an * array of pointers, one per plane. The * destination_format parameter will define how many * planes are required. * \return VdpStatus The completion status of the operation. */ typedef VdpStatus VdpOutputSurfaceGetBitsNative( VdpOutputSurface surface, VdpRect const * source_rect, void * const * destination_data, uint32_t const * destination_pitches ); /** * \brief Copy image data from application memory in the * surface's native format to a VdpOutputSurface. * \param[in] surface The surface's handle. * \param[in] source_data Pointers to the application data * buffers from which the image data will be copied. Note * that this is an array of pointers, one per plane. The * source_format parameter will define how many * planes are required. * \param[in] source_pitches Pointers to the pitch values * for the application data buffers. Note that this is an * array of pointers, one per plane. The * source_format parameter will define how many * planes are required. * \param[in] destination_rect The sub-rectangle of the surface * to fill with application data. If NULL, the entire * surface will be updated. * \return VdpStatus The completion status of the operation. */ typedef VdpStatus VdpOutputSurfacePutBitsNative( VdpOutputSurface surface, void const * const * source_data, uint32_t const * source_pitches, VdpRect const * destination_rect ); /** * \brief Copy image data from application memory in a specific * indexed format to a VdpOutputSurface. * \param[in] surface The surface's handle. * \param[in] source_indexed_format The format of the * application's data buffers. * \param[in] source_data Pointers to the application data * buffers from which the image data will be copied. Note * that this is an array of pointers, one per plane. The * source_indexed_format parameter will define how many * planes are required. * \param[in] source_pitches Pointers to the pitch values * for the application data buffers. Note that this is an * array of pointers, one per plane. The * source_indexed_format parameter will define how many * planes are required. * \param[in] destination_rect The sub-rectangle of the surface * to fill with application data. If NULL, the entire * surface will be updated. * \param[in] color_table_format The format of the color_table. * \param[in] color_table A table that maps between source index * and target color data. See \ref VdpColorTableFormat for * details regarding the memory layout. * \return VdpStatus The completion status of the operation. */ typedef VdpStatus VdpOutputSurfacePutBitsIndexed( VdpOutputSurface surface, VdpIndexedFormat source_indexed_format, void const * const * source_data, uint32_t const * source_pitch, VdpRect const * destination_rect, VdpColorTableFormat color_table_format, void const * color_table ); /** * \brief Copy image data from application memory in a specific * YCbCr format to a VdpOutputSurface. * \param[in] surface The surface's handle. * \param[in] source_ycbcr_format The format of the * application's data buffers. * \param[in] source_data Pointers to the application data * buffers from which the image data will be copied. Note * that this is an array of pointers, one per plane. The * source_ycbcr_format parameter will define how many * planes are required. * \param[in] source_pitches Pointers to the pitch values * for the application data buffers. Note that this is an * array of pointers, one per plane. The * source_ycbcr_format parameter will define how many * planes are required. * \param[in] destination_rect The sub-rectangle of the surface * to fill with application data. If NULL, the entire * surface will be updated. * \param[in] csc_matrix The color space conversion matrix used * by the copy operation. If NULL, a default matrix will * be used internally. Th default matrix is equivalent to * ITU-R BT.601 with no procamp changes. * \return VdpStatus The completion status of the operation. */ typedef VdpStatus VdpOutputSurfacePutBitsYCbCr( VdpOutputSurface surface, VdpYCbCrFormat source_ycbcr_format, void const * const * source_data, uint32_t const * source_pitches, VdpRect const * destination_rect, VdpCSCMatrix const * csc_matrix ); /*@}*/ /** * \defgroup VdpBitmapSurface VdpBitmapSurface; Bitmap Surface \ * object * * A VdpBitmapSurface stores RGBA data in a defined format. * * A VdpBitmapSurface may be filled with: * - Data provided by the CPU via the \ref * VdpBitmapSurfacePutBitsNative function. * * VdpBitmapSurface content may be accessed by: * - The Hardware that implements \ref VdpOutputSurface * "VdpOutputSurface" \ref VdpOutputSurfaceRender * "rendering functionality" * * VdpBitmapSurface objects are intended to store static read-only data, such * as font glyphs, and the bitmaps used to compose an applications' * user-interface. * * The primary differences between VdpBitmapSurfaces and * \ref VdpOutputSurface "VdpOutputSurface"s are: * * - You cannot render to a VdpBitmapSurface, just upload native data via * the PutBits API. * * - The read-only nature of a VdpBitmapSurface gives the implementation more * flexibility in its choice of data storage location for the bitmap data. * For example, some implementations may choose to store some/all * VdpBitmapSurface objects in system memory to relieve GPU memory pressure. * * - VdpBitmapSurface and VdpOutputSurface may support different subsets of all * known RGBA formats. * * @{ */ /** * \brief Query the implementation's VdpBitmapSurface * capabilities. * \param[in] device The device to query. * \param[in] surface_rgba_format The surface format for * which information is requested. * \param[out] is_supported Is this surface format supported? * \param[out] max_width The maximum supported surface width for * this chroma type. * \param[out] max_height The maximum supported surface height * for this chroma type. * \return VdpStatus The completion status of the operation. */ typedef VdpStatus VdpBitmapSurfaceQueryCapabilities( VdpDevice device, VdpRGBAFormat surface_rgba_format, /* output parameters follow */ VdpBool * is_supported, uint32_t * max_width, uint32_t * max_height ); /** * \brief An opaque handle representing a VdpBitmapSurface * object. */ typedef uint32_t VdpBitmapSurface; /** * \brief Create a VdpBitmapSurface. * \param[in] device The device that will contain the surface. * \param[in] rgba_format The format of the new surface. * \param[in] width The width of the new surface. * \param[in] height The height of the new surface. * \param[in] frequently_accessed Is this bitmap used * frequently, or infrequently, by compositing options? * Implementations may use this as a hint to determine how * to allocate the underlying storage for the surface. * \param[out] surface The new surface's handle. * \return VdpStatus The completion status of the operation. * * The memory backing the surface may not be initialized * during creation. Applications are expected initialize any * region that they use, via \ref VdpBitmapSurfacePutBitsNative. */ typedef VdpStatus VdpBitmapSurfaceCreate( VdpDevice device, VdpRGBAFormat rgba_format, uint32_t width, uint32_t height, VdpBool frequently_accessed, /* output parameters follow */ VdpBitmapSurface * surface ); /** * \brief Destroy a VdpBitmapSurface. * \param[in] surface The surface's handle. * \return VdpStatus The completion status of the operation. */ typedef VdpStatus VdpBitmapSurfaceDestroy( VdpBitmapSurface surface ); /** * \brief Retrieve the parameters used to create a * VdpBitmapSurface. * \param[in] surface The surface's handle. * \param[out] rgba_format The format of the surface. * \param[out] width The width of the surface. * \param[out] height The height of the surface. * \param[out] frequently_accessed The frequently_accessed state * of the surface. * \return VdpStatus The completion status of the operation. */ typedef VdpStatus VdpBitmapSurfaceGetParameters( VdpBitmapSurface surface, /* output parameters follow */ VdpRGBAFormat * rgba_format, uint32_t * width, uint32_t * height, VdpBool * frequently_accessed ); /** * \brief Copy image data from application memory in the * surface's native format to a VdpBitmapSurface. * \param[in] surface The surface's handle. * \param[in] source_data Pointers to the application data * buffers from which the image data will be copied. Note * that this is an array of pointers, one per plane. The * source_format parameter will define how many * planes are required. * \param[in] source_pitches Pointers to the pitch values * for the application data buffers. Note that this is an * array of pointers, one per plane. The * source_format parameter will define how many * planes are required. * \param[in] destination_rect The sub-rectangle of the surface * to fill with application data. If NULL, the entire * surface will be updated. * \return VdpStatus The completion status of the operation. */ typedef VdpStatus VdpBitmapSurfacePutBitsNative( VdpBitmapSurface surface, void const * const * source_data, uint32_t const * source_pitches, VdpRect const * destination_rect ); /*@}*/ /** * \defgroup VdpOutputSurfaceRender VdpOutputSurface Rendering \ * Functionality * * \ref VdpOutputSurface "VdpOutputSurface" objects * directly provide some rendering/compositing operations. These * are described below. * * @{ */ /** * \hideinitializer * \brief The blending equation factors. */ typedef enum { VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ZERO = 0, VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ONE = 1, VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_SRC_COLOR = 2, VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ONE_MINUS_SRC_COLOR = 3, VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_SRC_ALPHA = 4, VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA = 5, VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_DST_ALPHA = 6, VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ONE_MINUS_DST_ALPHA = 7, VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_DST_COLOR = 8, VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ONE_MINUS_DST_COLOR = 9, VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_SRC_ALPHA_SATURATE = 10, VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_CONSTANT_COLOR = 11, VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR = 12, VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_CONSTANT_ALPHA = 13, VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA = 14, } VdpOutputSurfaceRenderBlendFactor; /** * \hideinitializer * \brief The blending equations. */ typedef enum { VDP_OUTPUT_SURFACE_RENDER_BLEND_EQUATION_SUBTRACT = 0, VDP_OUTPUT_SURFACE_RENDER_BLEND_EQUATION_REVERSE_SUBTRACT = 1, VDP_OUTPUT_SURFACE_RENDER_BLEND_EQUATION_ADD = 2, VDP_OUTPUT_SURFACE_RENDER_BLEND_EQUATION_MIN = 3, VDP_OUTPUT_SURFACE_RENDER_BLEND_EQUATION_MAX = 4, } VdpOutputSurfaceRenderBlendEquation; #define VDP_OUTPUT_SURFACE_RENDER_BLEND_STATE_VERSION 0 /** * \brief Complete blending operation definition. * * A "blend state" operation controls the math behind certain rendering * operations. * * The blend math is the familiar OpenGL blend math: * \f[ * dst.a = equation(blendFactorDstAlpha*dst.a, * blendFactorSrcAlpha*src.a); * \f] * \f[ * dst.rgb = equation(blendFactorDstColor*dst.rgb, * blendFactorSrcColor*src.rgb); * \f] * * Note that when equation is MIN or MAX, the blend factors and constants * are ignored, and are treated as if they were 1.0. */ typedef struct { /** * This field must be filled with VDP_OUTPUT_SURFACE_RENDER_BLEND_STATE_VERSIION */ uint32_t struct_version; VdpOutputSurfaceRenderBlendFactor blend_factor_source_color; VdpOutputSurfaceRenderBlendFactor blend_factor_destination_color; VdpOutputSurfaceRenderBlendFactor blend_factor_source_alpha; VdpOutputSurfaceRenderBlendFactor blend_factor_destination_alpha; VdpOutputSurfaceRenderBlendEquation blend_equation_color; VdpOutputSurfaceRenderBlendEquation blend_equation_alpha; VdpColor blend_constant; } VdpOutputSurfaceRenderBlendState; /** * \hideinitializer * \brief Do not rotate source_surface prior to compositing. */ #define VDP_OUTPUT_SURFACE_RENDER_ROTATE_0 0 /** * \hideinitializer * \brief Rotate source_surface 90 degrees clockwise prior to * compositing. */ #define VDP_OUTPUT_SURFACE_RENDER_ROTATE_90 1 /** * \hideinitializer * \brief Rotate source_surface 180 degrees prior to * compositing. */ #define VDP_OUTPUT_SURFACE_RENDER_ROTATE_180 2 /** * \hideinitializer * \brief Rotate source_surface 270 degrees clockwise prior to * compositing. */ #define VDP_OUTPUT_SURFACE_RENDER_ROTATE_270 3 /** * \hideinitializer * \brief A separate color is used for each vertex of the * smooth-shaded quad. Hence, colors array contains 4 * elements rather than 1. See description of colors * array. */ #define VDP_OUTPUT_SURFACE_RENDER_COLOR_PER_VERTEX (1 << 2) /** * \brief Composite a sub-rectangle of a \ref VdpOutputSurface * "VdpOutputSurface" into a sub-rectangle of another * \ref VdpOutputSurface VdpOutputSurface. * \param[in] destination_surface The destination surface of the * compositing operation. * \param[in] destination_rect The sub-rectangle of the * destination surface to update. If NULL, the entire * destination surface will be updated. * \param[in] source_surface The source surface for the * compositing operation. The surface is treated as having * four components: red, green, blue and alpha. Any * missing components are treated as 1.0. For example, for * an A8 VdpOutputSurface, alpha will come from the surface * but red, green and blue will be treated as 1.0. If * source_surface is VDP_INVALID_HANDLE, all components will * be treated as 1.0. Note that destination_surface and * source_surface must have been allocated via the same * \ref VdpDevice "VdpDevice". * \param[in] source_rect The sub-rectangle of the source * surface to read from. If NULL, the entire * source_surface will be read. Left/right and/or top/bottom * co-ordinates may be swapped to flip the source. Any * flip occurs prior to any requested rotation. Values * from outside the source surface are valid and samples * at those locations will be taken from the nearest edge. * \param[in] colors A pointer to an array of \ref VdpColor * "VdpColor" objects. If the flag * VDP_OUTPUT_SURFACE_RENDER_COLOR_PER_VERTEX is set, * VDPAU will four entries from the array, and treat them * as the colors corresponding to the upper-left, * upper-right, lower-right and lower-left corners of the * post-rotation source (i.e. indices 0, 1, 2 and 3 run * clockwise from the upper left corner). If the flag * VDP_OUTPUT_SURFACE_RENDER_COLOR_PER_VERTEX is not * set, VDPAU will use the single VdpColor for all four * corners. If colors is NULL then red, green, blue and * alpha values of 1.0 will be used. * \param[in] blend_state If a blend state is provided, the * blend state will be used for the composite operation. If * NULL, blending is effectively disabled, which is * equivalent to a blend equation of ADD, source blend * factors of ONE and destination blend factors of ZERO. * See \ref VdpOutputSurfaceRenderBlendState for details * regarding the mathematics of the blending operation. * \param[in] flags A set of flags influencing how the * compositing operation works. * \arg \ref VDP_OUTPUT_SURFACE_RENDER_ROTATE_0 * \arg \ref VDP_OUTPUT_SURFACE_RENDER_ROTATE_90 * \arg \ref VDP_OUTPUT_SURFACE_RENDER_ROTATE_180 * \arg \ref VDP_OUTPUT_SURFACE_RENDER_ROTATE_270 * \arg \ref VDP_OUTPUT_SURFACE_RENDER_COLOR_PER_VERTEX * \return VdpStatus The completion status of the operation. * * The general compositing pipeline is as follows. * * -# Extract source_rect from source_surface. * * -# The extracted source is rotated 0, 90, 180 or 270 degrees * according to the flags. * * -# The rotated source is component-wise multiplied by a * smooth-shaded quad with a (potentially) different color at * each vertex. * * -# The resulting rotated, smooth-shaded quad is scaled to the * size of destination_rect and composited with * destination_surface using the provided blend state. * */ typedef VdpStatus VdpOutputSurfaceRenderOutputSurface( VdpOutputSurface destination_surface, VdpRect const * destination_rect, VdpOutputSurface source_surface, VdpRect const * source_rect, VdpColor const * colors, VdpOutputSurfaceRenderBlendState const * blend_state, uint32_t flags ); /** * \brief Composite a sub-rectangle of a \ref VdpBitmapSurface * "VdpBitmapSurface" into a sub-rectangle of a * \ref VdpOutputSurface VdpOutputSurface. * \param[in] destination_surface The destination surface of the * compositing operation. * \param[in] destination_rect The sub-rectangle of the * destination surface to update. If NULL, the entire * destination surface will be updated. * \param[in] source_surface The source surface for the * compositing operation. The surface is treated as having * four components: red, green, blue and alpha. Any * missing components are treated as 1.0. For example, for * an A8 VdpBitmapSurface, alpha will come from the surface * but red, green and blue will be treated as 1.0. If * source_surface is VDP_INVALID_HANDLE, all components will * be treated as 1.0. Note that destination_surface and * source_surface must have been allocated via the same * \ref VdpDevice "VdpDevice". * \param[in] source_rect The sub-rectangle of the source * surface to read from. If NULL, the entire * source_surface will be read. Left/right ot top/bottom * co-ordinates may be swapped to flip the source. Any * flip occurs prior to any requested rotation. Values * from outside the source surface are valid and samples * at those locations will be taken from the nearest edge. * \param[in] colors A pointer to an array of \ref VdpColor * "VdpColor" objects. If the flag * VDP_OUTPUT_SURFACE_RENDER_COLOR_PER_VERTEX is set, * VDPAU will four entries from the array, and treat them * as the colors corresponding to the upper-left, * upper-right, lower-right and lower-left corners of the * post-rotation source (i.e. indices 0, 1, 2 and 3 run * clockwise from the upper left corner). If the flag * VDP_OUTPUT_SURFACE_RENDER_COLOR_PER_VERTEX is not * set, VDPAU will use the single VdpColor for all four * corners. If colors is NULL then red, green, blue and * alpha values of 1.0 will be used. * \param[in] blend_state If a blend state is provided, the * blend state will be used for the composite operation. If * NULL, blending is effectively disabled, which is * equivalent to a blend equation of ADD, source blend * factors of ONE and destination blend factors of ZERO. * See \ref VdpOutputSurfaceRenderBlendState for details * regarding the mathematics of the blending operation. * \param[in] flags A set of flags influencing how the * compositing operation works. * \arg \ref VDP_OUTPUT_SURFACE_RENDER_ROTATE_0 * \arg \ref VDP_OUTPUT_SURFACE_RENDER_ROTATE_90 * \arg \ref VDP_OUTPUT_SURFACE_RENDER_ROTATE_180 * \arg \ref VDP_OUTPUT_SURFACE_RENDER_ROTATE_270 * \arg \ref VDP_OUTPUT_SURFACE_RENDER_COLOR_PER_VERTEX * \return VdpStatus The completion status of the operation. * * The general compositing pipeline is as follows. * * -# Extract source_rect from source_surface. * * -# The extracted source is rotated 0, 90, 180 or 270 degrees * according to the flags. * * -# The rotated source is component-wise multiplied by a * smooth-shaded quad with a (potentially) different color at * each vertex. * * -# The resulting rotated, smooth-shaded quad is scaled to the * size of destination_rect and composited with * destination_surface using the provided blend state. * */ typedef VdpStatus VdpOutputSurfaceRenderBitmapSurface( VdpOutputSurface destination_surface, VdpRect const * destination_rect, VdpBitmapSurface source_surface, VdpRect const * source_rect, VdpColor const * colors, VdpOutputSurfaceRenderBlendState const * blend_state, uint32_t flags ); /*@}*/ /** * \defgroup VdpDecoder VdpDecoder; Video Decoding object * * The VdpDecoder object decodes compressed video data, writing * the results to a \ref VdpVideoSurface "VdpVideoSurface". * * A specific VDPAU implementation may support decoding multiple * types of compressed video data. However, VdpDecoder objects * are able to decode a specific type of compressed video data. * This type must be specified during creation. * * @{ */ /** * \brief The set of all known compressed video formats, and * associated profiles, that may be decoded. */ typedef uint32_t VdpDecoderProfile; /** \hideinitializer */ #define VDP_DECODER_PROFILE_MPEG1 ((VdpDecoderProfile)0) /** \hideinitializer */ #define VDP_DECODER_PROFILE_MPEG2_SIMPLE ((VdpDecoderProfile)1) /** \hideinitializer */ #define VDP_DECODER_PROFILE_MPEG2_MAIN ((VdpDecoderProfile)2) /** \hideinitializer */ /** \brief MPEG 4 part 10 == H.264 == AVC */ #define VDP_DECODER_PROFILE_H264_BASELINE ((VdpDecoderProfile)6) /** \hideinitializer */ #define VDP_DECODER_PROFILE_H264_MAIN ((VdpDecoderProfile)7) /** \hideinitializer */ #define VDP_DECODER_PROFILE_H264_HIGH ((VdpDecoderProfile)8) /** \hideinitializer */ #define VDP_DECODER_PROFILE_VC1_SIMPLE ((VdpDecoderProfile)9) /** \hideinitializer */ #define VDP_DECODER_PROFILE_VC1_MAIN ((VdpDecoderProfile)10) /** \hideinitializer */ #define VDP_DECODER_PROFILE_VC1_ADVANCED ((VdpDecoderProfile)11) /** \hideinitializer */ #define VDP_DECODER_PROFILE_MPEG4_PART2_SP ((VdpDecoderProfile)12) /** \hideinitializer */ #define VDP_DECODER_PROFILE_MPEG4_PART2_ASP ((VdpDecoderProfile)13) /** \hideinitializer */ #define VDP_DECODER_PROFILE_DIVX4_QMOBILE ((VdpDecoderProfile)14) /** \hideinitializer */ #define VDP_DECODER_PROFILE_DIVX4_MOBILE ((VdpDecoderProfile)15) /** \hideinitializer */ #define VDP_DECODER_PROFILE_DIVX4_HOME_THEATER ((VdpDecoderProfile)16) /** \hideinitializer */ #define VDP_DECODER_PROFILE_DIVX4_HD_1080P ((VdpDecoderProfile)17) /** \hideinitializer */ #define VDP_DECODER_PROFILE_DIVX5_QMOBILE ((VdpDecoderProfile)18) /** \hideinitializer */ #define VDP_DECODER_PROFILE_DIVX5_MOBILE ((VdpDecoderProfile)19) /** \hideinitializer */ #define VDP_DECODER_PROFILE_DIVX5_HOME_THEATER ((VdpDecoderProfile)20) /** \hideinitializer */ #define VDP_DECODER_PROFILE_DIVX5_HD_1080P ((VdpDecoderProfile)21) /** \hideinitializer */ #define VDP_DECODER_PROFILE_H264_CONSTRAINED_BASELINE ((VdpDecoderProfile)22) /** \hideinitializer */ #define VDP_DECODER_PROFILE_H264_EXTENDED ((VdpDecoderProfile)23) /** \hideinitializer */ #define VDP_DECODER_PROFILE_H264_PROGRESSIVE_HIGH ((VdpDecoderProfile)24) /** \hideinitializer */ #define VDP_DECODER_PROFILE_H264_CONSTRAINED_HIGH ((VdpDecoderProfile)25) /** \hideinitializer */ /** \brief Support for 8 bit depth only */ #define VDP_DECODER_PROFILE_H264_HIGH_444_PREDICTIVE ((VdpDecoderProfile)26) /** \hideinitializer */ /** \brief MPEG-H Part 2 == H.265 == HEVC */ #define VDP_DECODER_PROFILE_HEVC_MAIN ((VdpDecoderProfile)100) /** \hideinitializer */ #define VDP_DECODER_PROFILE_HEVC_MAIN_10 ((VdpDecoderProfile)101) /** \hideinitializer */ #define VDP_DECODER_PROFILE_HEVC_MAIN_STILL ((VdpDecoderProfile)102) /** \hideinitializer */ #define VDP_DECODER_PROFILE_HEVC_MAIN_12 ((VdpDecoderProfile)103) /** \hideinitializer */ #define VDP_DECODER_PROFILE_HEVC_MAIN_444 ((VdpDecoderProfile)104) /** \hideinitializer */ #define VDP_DECODER_LEVEL_MPEG1_NA 0 /** \hideinitializer */ #define VDP_DECODER_LEVEL_MPEG2_LL 0 /** \hideinitializer */ #define VDP_DECODER_LEVEL_MPEG2_ML 1 /** \hideinitializer */ #define VDP_DECODER_LEVEL_MPEG2_HL14 2 /** \hideinitializer */ #define VDP_DECODER_LEVEL_MPEG2_HL 3 /** \hideinitializer */ #define VDP_DECODER_LEVEL_H264_1 10 /** \hideinitializer */ #define VDP_DECODER_LEVEL_H264_1b 9 /** \hideinitializer */ #define VDP_DECODER_LEVEL_H264_1_1 11 /** \hideinitializer */ #define VDP_DECODER_LEVEL_H264_1_2 12 /** \hideinitializer */ #define VDP_DECODER_LEVEL_H264_1_3 13 /** \hideinitializer */ #define VDP_DECODER_LEVEL_H264_2 20 /** \hideinitializer */ #define VDP_DECODER_LEVEL_H264_2_1 21 /** \hideinitializer */ #define VDP_DECODER_LEVEL_H264_2_2 22 /** \hideinitializer */ #define VDP_DECODER_LEVEL_H264_3 30 /** \hideinitializer */ #define VDP_DECODER_LEVEL_H264_3_1 31 /** \hideinitializer */ #define VDP_DECODER_LEVEL_H264_3_2 32 /** \hideinitializer */ #define VDP_DECODER_LEVEL_H264_4 40 /** \hideinitializer */ #define VDP_DECODER_LEVEL_H264_4_1 41 /** \hideinitializer */ #define VDP_DECODER_LEVEL_H264_4_2 42 /** \hideinitializer */ #define VDP_DECODER_LEVEL_H264_5 50 /** \hideinitializer */ #define VDP_DECODER_LEVEL_H264_5_1 51 /** \hideinitializer */ #define VDP_DECODER_LEVEL_VC1_SIMPLE_LOW 0 /** \hideinitializer */ #define VDP_DECODER_LEVEL_VC1_SIMPLE_MEDIUM 1 /** \hideinitializer */ #define VDP_DECODER_LEVEL_VC1_MAIN_LOW 0 /** \hideinitializer */ #define VDP_DECODER_LEVEL_VC1_MAIN_MEDIUM 1 /** \hideinitializer */ #define VDP_DECODER_LEVEL_VC1_MAIN_HIGH 2 /** \hideinitializer */ #define VDP_DECODER_LEVEL_VC1_ADVANCED_L0 0 /** \hideinitializer */ #define VDP_DECODER_LEVEL_VC1_ADVANCED_L1 1 /** \hideinitializer */ #define VDP_DECODER_LEVEL_VC1_ADVANCED_L2 2 /** \hideinitializer */ #define VDP_DECODER_LEVEL_VC1_ADVANCED_L3 3 /** \hideinitializer */ #define VDP_DECODER_LEVEL_VC1_ADVANCED_L4 4 /** \hideinitializer */ #define VDP_DECODER_LEVEL_MPEG4_PART2_SP_L0 0 /** \hideinitializer */ #define VDP_DECODER_LEVEL_MPEG4_PART2_SP_L1 1 /** \hideinitializer */ #define VDP_DECODER_LEVEL_MPEG4_PART2_SP_L2 2 /** \hideinitializer */ #define VDP_DECODER_LEVEL_MPEG4_PART2_SP_L3 3 /** \hideinitializer */ #define VDP_DECODER_LEVEL_MPEG4_PART2_ASP_L0 0 /** \hideinitializer */ #define VDP_DECODER_LEVEL_MPEG4_PART2_ASP_L1 1 /** \hideinitializer */ #define VDP_DECODER_LEVEL_MPEG4_PART2_ASP_L2 2 /** \hideinitializer */ #define VDP_DECODER_LEVEL_MPEG4_PART2_ASP_L3 3 /** \hideinitializer */ #define VDP_DECODER_LEVEL_MPEG4_PART2_ASP_L4 4 /** \hideinitializer */ #define VDP_DECODER_LEVEL_MPEG4_PART2_ASP_L5 5 /** \hideinitializer */ #define VDP_DECODER_LEVEL_DIVX_NA 0 /** * The VDPAU H.265/HEVC decoder levels correspond to the values of * general_level_idc as described in the H.265 Specification, Annex A, * Table A.1. The enumeration values are equal to thirty times the level * number. */ #define VDP_DECODER_LEVEL_HEVC_1 30 /** \hideinitializer */ #define VDP_DECODER_LEVEL_HEVC_2 60 /** \hideinitializer */ #define VDP_DECODER_LEVEL_HEVC_2_1 63 /** \hideinitializer */ #define VDP_DECODER_LEVEL_HEVC_3 90 /** \hideinitializer */ #define VDP_DECODER_LEVEL_HEVC_3_1 93 /** \hideinitializer */ #define VDP_DECODER_LEVEL_HEVC_4 120 /** \hideinitializer */ #define VDP_DECODER_LEVEL_HEVC_4_1 123 /** \hideinitializer */ #define VDP_DECODER_LEVEL_HEVC_5 150 /** \hideinitializer */ #define VDP_DECODER_LEVEL_HEVC_5_1 153 /** \hideinitializer */ #define VDP_DECODER_LEVEL_HEVC_5_2 156 /** \hideinitializer */ #define VDP_DECODER_LEVEL_HEVC_6 180 /** \hideinitializer */ #define VDP_DECODER_LEVEL_HEVC_6_1 183 /** \hideinitializer */ #define VDP_DECODER_LEVEL_HEVC_6_2 186 /** * \brief Query the implementation's VdpDecoder capabilities. * \param[in] device The device to query. * \param[in] profile The decoder profile for which information is requested. * \param[out] is_supported Is this profile supported? * \param[out] max_level The maximum specification level supported for this * profile. * \param[out] max_macroblocks The maximum supported surface size in * macroblocks. Note that this could be greater than that dictated by * the maximum level. * \param[out] max_width The maximum supported surface width for this profile. * Note that this could be greater than that dictated by the maximum * level. * \param[out] max_height The maximum supported surface height for this * profile. Note that this could be greater than that dictated by the * maximum level. * \return VdpStatus The completion status of the operation. */ typedef VdpStatus VdpDecoderQueryCapabilities( VdpDevice device, VdpDecoderProfile profile, /* output parameters follow */ VdpBool * is_supported, uint32_t * max_level, uint32_t * max_macroblocks, uint32_t * max_width, uint32_t * max_height ); /** * \brief An opaque handle representing a VdpDecoder object. */ typedef uint32_t VdpDecoder; /** * \brief Create a VdpDecoder. * \param[in] device The device that will contain the surface. * \param[in] profile The video format the decoder will decode. * \param[in] width The width of the new surface. * \param[in] height The height of the new surface. * \param[in] max_references The maximum number of references that may be * used by a single frame in the stream to be decoded. This parameter * exists mainly for formats such as H.264, where different streams * may use a different number of references. Requesting too many * references may waste memory, but decoding should still operate * correctly. Requesting too few references will cause decoding to * fail. * \param[out] decoder The new decoder's handle. * \return VdpStatus The completion status of the operation. */ typedef VdpStatus VdpDecoderCreate( VdpDevice device, VdpDecoderProfile profile, uint32_t width, uint32_t height, uint32_t max_references, /* output parameters follow */ VdpDecoder * decoder ); /** * \brief Destroy a VdpDecoder. * \param[in] surface The decoder's handle. * \return VdpStatus The completion status of the operation. */ typedef VdpStatus VdpDecoderDestroy( VdpDecoder decoder ); /** * \brief Retrieve the parameters used to create a * VdpDecoder. * \param[in] surface The surface's handle. * \param[out] profile The video format used to create the * decoder. * \param[out] width The width of surfaces decode by the * decoder. * \param[out] height The height of surfaces decode by the * decoder * \return VdpStatus The completion status of the operation. */ typedef VdpStatus VdpDecoderGetParameters( VdpDecoder decoder, /* output parameters follow */ VdpDecoderProfile * profile, uint32_t * width, uint32_t * height ); #define VDP_BITSTREAM_BUFFER_VERSION 0 /** * \brief Application data buffer containing compressed video * data. */ typedef struct { /** * This field must be filled with VDP_BITSTREAM_BUFFER_VERSION */ uint32_t struct_version; /** A pointer to the bitstream data bytes */ void const * bitstream; /** The number of data bytes */ uint32_t bitstream_bytes; } VdpBitstreamBuffer; /** * \brief A generic "picture information" type. * * This type serves solely to document the expected usage of a * generic (void *) function parameter. In actual usage, the * application is expected to physically provide a pointer to an * instance of one of the "real" VdpPictureInfo* structures, * picking the type appropriate for the decoder object in * question. */ typedef void VdpPictureInfo; /** * \brief Picture parameter information for an MPEG 1 or MPEG 2 * picture. * * Note: References to bitstream fields below may refer to data literally parsed * from the bitstream, or derived from the bitstream using a mechanism described * in the specification. */ typedef struct { /** * Reference used by B and P frames. * Set to VDP_INVALID_HANDLE when not used. */ VdpVideoSurface forward_reference; /** * Reference used by B frames. * Set to VDP_INVALID_HANDLE when not used. */ VdpVideoSurface backward_reference; /** Number of slices in the bitstream provided. */ uint32_t slice_count; /** \name MPEG bitstream * * Copies of the MPEG bitstream fields. * @{ */ uint8_t picture_structure; uint8_t picture_coding_type; uint8_t intra_dc_precision; uint8_t frame_pred_frame_dct; uint8_t concealment_motion_vectors; uint8_t intra_vlc_format; uint8_t alternate_scan; uint8_t q_scale_type; uint8_t top_field_first; /** MPEG-1 only. For MPEG-2, set to 0. */ uint8_t full_pel_forward_vector; /** MPEG-1 only. For MPEG-2, set to 0. */ uint8_t full_pel_backward_vector; /** For MPEG-1, fill both horizontal and vertical entries. */ uint8_t f_code[2][2]; /** Convert to raster order. */ uint8_t intra_quantizer_matrix[64]; /** Convert to raster order. */ uint8_t non_intra_quantizer_matrix[64]; /** @} */ } VdpPictureInfoMPEG1Or2; /** * \brief Information about an H.264 reference frame * * Note: References to bitstream fields below may refer to data literally parsed * from the bitstream, or derived from the bitstream using a mechanism described * in the specification. */ typedef struct { /** * The surface that contains the reference image. * Set to VDP_INVALID_HANDLE for unused entries. */ VdpVideoSurface surface; /** Is this a long term reference (else short term). */ VdpBool is_long_term; /** * Is the top field used as a reference. * Set to VDP_FALSE for unused entries. */ VdpBool top_is_reference; /** * Is the bottom field used as a reference. * Set to VDP_FALSE for unused entries. */ VdpBool bottom_is_reference; /** [0]: top, [1]: bottom */ int32_t field_order_cnt[2]; /** * Copy of the H.264 bitstream field: * frame_num from slice_header for short-term references, * LongTermPicNum from decoding algorithm for long-term references. */ uint16_t frame_idx; } VdpReferenceFrameH264; /** * \brief Picture parameter information for an H.264 picture. * * Note: The \ref referenceFrames array must contain the "DPB" as * defined by the H.264 specification. In particular, once a * reference frame has been decoded to a surface, that surface must * continue to appear in the DPB until no longer required to predict * any future frame. Once a surface is removed from the DPB, it can * no longer be used as a reference, unless decoded again. * * Also note that only surfaces previously generated using \ref * VdpDecoderRender may be used as reference frames. In particular, * surfaces filled using any "put bits" API will not work. * * Note: References to bitstream fields below may refer to data literally parsed * from the bitstream, or derived from the bitstream using a mechanism described * in the specification. * * Note: VDPAU clients must use VdpPictureInfoH264Predictive to describe the * attributes of a frame being decoded with * VDP_DECODER_PROFILE_H264_HIGH_444_PREDICTIVE. */ typedef struct { /** Number of slices in the bitstream provided. */ uint32_t slice_count; /** [0]: top, [1]: bottom */ int32_t field_order_cnt[2]; /** Will the decoded frame be used as a reference later. */ VdpBool is_reference; /** \name H.264 bitstream * * Copies of the H.264 bitstream fields. * @{ */ uint16_t frame_num; uint8_t field_pic_flag; uint8_t bottom_field_flag; uint8_t num_ref_frames; uint8_t mb_adaptive_frame_field_flag; uint8_t constrained_intra_pred_flag; uint8_t weighted_pred_flag; uint8_t weighted_bipred_idc; uint8_t frame_mbs_only_flag; uint8_t transform_8x8_mode_flag; int8_t chroma_qp_index_offset; int8_t second_chroma_qp_index_offset; int8_t pic_init_qp_minus26; uint8_t num_ref_idx_l0_active_minus1; uint8_t num_ref_idx_l1_active_minus1; uint8_t log2_max_frame_num_minus4; uint8_t pic_order_cnt_type; uint8_t log2_max_pic_order_cnt_lsb_minus4; uint8_t delta_pic_order_always_zero_flag; uint8_t direct_8x8_inference_flag; uint8_t entropy_coding_mode_flag; uint8_t pic_order_present_flag; uint8_t deblocking_filter_control_present_flag; uint8_t redundant_pic_cnt_present_flag; /** Convert to raster order. */ uint8_t scaling_lists_4x4[6][16]; /** Convert to raster order. */ uint8_t scaling_lists_8x8[2][64]; /** @} */ /** See \ref VdpPictureInfoH264 for instructions regarding this field. */ VdpReferenceFrameH264 referenceFrames[16]; } VdpPictureInfoH264; /** * \brief Picture parameter information for an H.264 Hi444PP picture. * * Note: VDPAU clients must use VdpPictureInfoH264Predictive to describe the * attributes of a frame being decoded with * VDP_DECODER_PROFILE_H264_HIGH_444_PREDICTIVE. * * Note: software drivers may choose to honor values of * qpprime_y_zero_transform_bypass_flag greater than 1 for internal use. */ typedef struct { /** \ref VdpPictureInfoH264 struct. */ VdpPictureInfoH264 pictureInfo; /** \name H.264 bitstream * * Copies of the H.264 bitstream fields. * @{ */ /** * 0 - lossless disabled * 1 - lossless enabled */ uint8_t qpprime_y_zero_transform_bypass_flag; /** * 0 - disabled * 1 - enabled */ uint8_t separate_colour_plane_flag; /** @} */ } VdpPictureInfoH264Predictive; /** * \brief Picture parameter information for a VC1 picture. * * Note: References to bitstream fields below may refer to data literally parsed * from the bitstream, or derived from the bitstream using a mechanism described * in the specification. */ typedef struct { /** * Reference used by B and P frames. * Set to VDP_INVALID_HANDLE when not used. */ VdpVideoSurface forward_reference; /** * Reference used by B frames. * Set to VDP_INVALID_HANDLE when not used. */ VdpVideoSurface backward_reference; /** Number of slices in the bitstream provided. */ uint32_t slice_count; /** I=0, P=1, B=3, BI=4 from 7.1.1.4. */ uint8_t picture_type; /** Progressive=0, Frame-interlace=2, Field-interlace=3; see VC-1 7.1.1.15. */ uint8_t frame_coding_mode; /** \name VC-1 bitstream * * Copies of the VC-1 bitstream fields. * @{ */ /** See VC-1 6.1.5. */ uint8_t postprocflag; /** See VC-1 6.1.8. */ uint8_t pulldown; /** See VC-1 6.1.9. */ uint8_t interlace; /** See VC-1 6.1.10. */ uint8_t tfcntrflag; /** See VC-1 6.1.11. */ uint8_t finterpflag; /** See VC-1 6.1.3. */ uint8_t psf; /** See VC-1 6.2.8. */ uint8_t dquant; /** See VC-1 6.2.3. */ uint8_t panscan_flag; /** See VC-1 6.2.4. */ uint8_t refdist_flag; /** See VC-1 6.2.11. */ uint8_t quantizer; /** See VC-1 6.2.7. */ uint8_t extended_mv; /** See VC-1 6.2.14. */ uint8_t extended_dmv; /** See VC-1 6.2.10. */ uint8_t overlap; /** See VC-1 6.2.9. */ uint8_t vstransform; /** See VC-1 6.2.5. */ uint8_t loopfilter; /** See VC-1 6.2.6. */ uint8_t fastuvmc; /** See VC-1 6.12.15. */ uint8_t range_mapy_flag; uint8_t range_mapy; /** See VC-1 6.2.16. */ uint8_t range_mapuv_flag; uint8_t range_mapuv; /** * See VC-1 J.1.10. * Only used by simple and main profiles. */ uint8_t multires; /** * See VC-1 J.1.16. * Only used by simple and main profiles. */ uint8_t syncmarker; /** * VC-1 SP/MP range reduction control. * Only used by simple and main profiles. * Bit 0: Copy of rangered VC-1 bitstream field; See VC-1 J.1.17. * Bit 1: Copy of rangeredfrm VC-1 bitstream fiels; See VC-1 7.1.13. */ uint8_t rangered; /** * See VC-1 J.1.17. * Only used by simple and main profiles. */ uint8_t maxbframes; /** @} */ /** * Out-of-loop deblocking enable. * Bit 0 of POSTPROC from VC-1 7.1.1.27 * Note that bit 1 of POSTPROC (dering enable) should not be included. */ uint8_t deblockEnable; /** * Parameter used by VC-1 Annex H deblocking algorithm. Note that VDPAU * implementations may choose which deblocking algorithm to use. * See VC-1 7.1.1.6 */ uint8_t pquant; } VdpPictureInfoVC1; /** * \brief Picture parameter information for an MPEG-4 Part 2 picture. * * Note: References to bitstream fields below may refer to data literally parsed * from the bitstream, or derived from the bitstream using a mechanism described * in the specification. */ typedef struct { /** * Reference used by B and P frames. * Set to VDP_INVALID_HANDLE when not used. */ VdpVideoSurface forward_reference; /** * Reference used by B frames. * Set to VDP_INVALID_HANDLE when not used. */ VdpVideoSurface backward_reference; /** \name MPEG 4 part 2 bitstream * * Copies of the MPEG 4 part 2 bitstream fields. * @{ */ int32_t trd[2]; int32_t trb[2]; uint16_t vop_time_increment_resolution; uint8_t vop_coding_type; uint8_t vop_fcode_forward; uint8_t vop_fcode_backward; uint8_t resync_marker_disable; uint8_t interlaced; uint8_t quant_type; uint8_t quarter_sample; uint8_t short_video_header; /** Derived from vop_rounding_type bitstream field. */ uint8_t rounding_control; uint8_t alternate_vertical_scan_flag; uint8_t top_field_first; uint8_t intra_quantizer_matrix[64]; uint8_t non_intra_quantizer_matrix[64]; /** @} */ } VdpPictureInfoMPEG4Part2; /** * \brief Picture parameter information for a DivX 4 picture. * * Due to similarites between MPEG-4 Part 2 and DivX 4, the picture * parameter structure is re-used. */ typedef VdpPictureInfoMPEG4Part2 VdpPictureInfoDivX4; /** * \brief Picture parameter information for a DivX 5 picture. * * Due to similarites between MPEG-4 Part 2 and DivX 5, the picture * parameter structure is re-used. */ typedef VdpPictureInfoMPEG4Part2 VdpPictureInfoDivX5; /** * \brief Picture parameter information for an H.265/HEVC picture. * * References to bitsream fields below may refer to data literally parsed from * the bitstream, or derived from the bitstream using a mechanism described in * Rec. ITU-T H.265 (04/2013), hereafter referred to as "the H.265/HEVC * Specification". * * VDPAU H.265/HEVC implementations implement the portion of the decoding * process described by clauses 8.4, 8.5, 8.6 and 8.7 of the the * H.265/HEVC Specification. VdpPictureInfoHEVC provides enough data * to complete this portion of the decoding process, plus additional * information not defined in the H.265/HEVC Specification that may be * useful to particular implementations. * * Client applications must supply every field in this struct. */ typedef struct { /** \name HEVC Sequence Parameter Set * * Copies of the HEVC Sequence Parameter Set bitstream fields. * @{ */ uint8_t chroma_format_idc; /** Only valid if chroma_format_idc == 3. Ignored otherwise.*/ uint8_t separate_colour_plane_flag; uint32_t pic_width_in_luma_samples; uint32_t pic_height_in_luma_samples; uint8_t bit_depth_luma_minus8; uint8_t bit_depth_chroma_minus8; uint8_t log2_max_pic_order_cnt_lsb_minus4; /** Provides the value corresponding to the nuh_temporal_id of the frame to be decoded. */ uint8_t sps_max_dec_pic_buffering_minus1; uint8_t log2_min_luma_coding_block_size_minus3; uint8_t log2_diff_max_min_luma_coding_block_size; uint8_t log2_min_transform_block_size_minus2; uint8_t log2_diff_max_min_transform_block_size; uint8_t max_transform_hierarchy_depth_inter; uint8_t max_transform_hierarchy_depth_intra; uint8_t scaling_list_enabled_flag; /** Scaling lists, in diagonal order, to be used for this frame. */ /** Scaling List for 4x4 quantization matrix, indexed as ScalingList4x4[matrixId][i]. */ uint8_t ScalingList4x4[6][16]; /** Scaling List for 8x8 quantization matrix, indexed as ScalingList8x8[matrixId][i]. */ uint8_t ScalingList8x8[6][64]; /** Scaling List for 16x16 quantization matrix, indexed as ScalingList16x16[matrixId][i]. */ uint8_t ScalingList16x16[6][64]; /** Scaling List for 32x32 quantization matrix, indexed as ScalingList32x32[matrixId][i]. */ uint8_t ScalingList32x32[2][64]; /** Scaling List DC Coefficients for 16x16, indexed as ScalingListDCCoeff16x16[matrixId]. */ uint8_t ScalingListDCCoeff16x16[6]; /** Scaling List DC Coefficients for 32x32, indexed as ScalingListDCCoeff32x32[matrixId]. */ uint8_t ScalingListDCCoeff32x32[2]; uint8_t amp_enabled_flag; uint8_t sample_adaptive_offset_enabled_flag; uint8_t pcm_enabled_flag; /** Only needs to be set if pcm_enabled_flag is set. Ignored otherwise. */ uint8_t pcm_sample_bit_depth_luma_minus1; /** Only needs to be set if pcm_enabled_flag is set. Ignored otherwise. */ uint8_t pcm_sample_bit_depth_chroma_minus1; /** Only needs to be set if pcm_enabled_flag is set. Ignored otherwise. */ uint8_t log2_min_pcm_luma_coding_block_size_minus3; /** Only needs to be set if pcm_enabled_flag is set. Ignored otherwise. */ uint8_t log2_diff_max_min_pcm_luma_coding_block_size; /** Only needs to be set if pcm_enabled_flag is set. Ignored otherwise. */ uint8_t pcm_loop_filter_disabled_flag; /** Per spec, when zero, assume short_term_ref_pic_set_sps_flag is also zero. */ uint8_t num_short_term_ref_pic_sets; uint8_t long_term_ref_pics_present_flag; /** Only needed if long_term_ref_pics_present_flag is set. Ignored otherwise. */ uint8_t num_long_term_ref_pics_sps; uint8_t sps_temporal_mvp_enabled_flag; uint8_t strong_intra_smoothing_enabled_flag; /** @} */ /** \name HEVC Picture Parameter Set * * Copies of the HEVC Picture Parameter Set bitstream fields. * @{ */ uint8_t dependent_slice_segments_enabled_flag; uint8_t output_flag_present_flag; uint8_t num_extra_slice_header_bits; uint8_t sign_data_hiding_enabled_flag; uint8_t cabac_init_present_flag; uint8_t num_ref_idx_l0_default_active_minus1; uint8_t num_ref_idx_l1_default_active_minus1; int8_t init_qp_minus26; uint8_t constrained_intra_pred_flag; uint8_t transform_skip_enabled_flag; uint8_t cu_qp_delta_enabled_flag; /** Only needed if cu_qp_delta_enabled_flag is set. Ignored otherwise. */ uint8_t diff_cu_qp_delta_depth; int8_t pps_cb_qp_offset; int8_t pps_cr_qp_offset; uint8_t pps_slice_chroma_qp_offsets_present_flag; uint8_t weighted_pred_flag; uint8_t weighted_bipred_flag; uint8_t transquant_bypass_enabled_flag; uint8_t tiles_enabled_flag; uint8_t entropy_coding_sync_enabled_flag; /** Only valid if tiles_enabled_flag is set. Ignored otherwise. */ uint8_t num_tile_columns_minus1; /** Only valid if tiles_enabled_flag is set. Ignored otherwise. */ uint8_t num_tile_rows_minus1; /** Only valid if tiles_enabled_flag is set. Ignored otherwise. */ uint8_t uniform_spacing_flag; /** Only need to set 0..num_tile_columns_minus1. The struct definition reserves up to the maximum of 20. Invalid values are ignored. */ uint16_t column_width_minus1[20]; /** Only need to set 0..num_tile_rows_minus1. The struct definition reserves up to the maximum of 22. Invalid values are ignored.*/ uint16_t row_height_minus1[22]; /** Only needed if tiles_enabled_flag is set. Invalid values are ignored. */ uint8_t loop_filter_across_tiles_enabled_flag; uint8_t pps_loop_filter_across_slices_enabled_flag; uint8_t deblocking_filter_control_present_flag; /** Only valid if deblocking_filter_control_present_flag is set. Ignored otherwise. */ uint8_t deblocking_filter_override_enabled_flag; /** Only valid if deblocking_filter_control_present_flag is set. Ignored otherwise. */ uint8_t pps_deblocking_filter_disabled_flag; /** Only valid if deblocking_filter_control_present_flag is set and pps_deblocking_filter_disabled_flag is not set. Ignored otherwise.*/ int8_t pps_beta_offset_div2; /** Only valid if deblocking_filter_control_present_flag is set and pps_deblocking_filter_disabled_flag is not set. Ignored otherwise. */ int8_t pps_tc_offset_div2; uint8_t lists_modification_present_flag; uint8_t log2_parallel_merge_level_minus2; uint8_t slice_segment_header_extension_present_flag; /** \name HEVC Slice Segment Header * * Copies of the HEVC Slice Segment Header bitstream fields and calculated * values detailed in the specification. * @{ */ /** Set to 1 if nal_unit_type is equal to IDR_W_RADL or IDR_N_LP. Set to zero otherwise. */ uint8_t IDRPicFlag; /** Set to 1 if nal_unit_type in the range of BLA_W_LP to RSV_IRAP_VCL23, inclusive. Set to zero otherwise.*/ uint8_t RAPPicFlag; /** See section 7.4.7.1 of the specification. */ uint8_t CurrRpsIdx; /** See section 7.4.7.2 of the specification. */ uint32_t NumPocTotalCurr; /** Corresponds to specification field, NumDeltaPocs[RefRpsIdx]. Only applicable when short_term_ref_pic_set_sps_flag == 0. Implementations will ignore this value in other cases. See 7.4.8. */ uint32_t NumDeltaPocsOfRefRpsIdx; /** Section 7.6.3.1 of the H.265/HEVC Specification defines the syntax of the slice_segment_header. This header contains information that some VDPAU implementations may choose to skip. The VDPAU API requires client applications to track the number of bits used in the slice header for structures associated with short term and long term reference pictures. First, VDPAU requires the number of bits used by the short_term_ref_pic_set array in the slice_segment_header. */ uint32_t NumShortTermPictureSliceHeaderBits; /** Second, VDPAU requires the number of bits used for long term reference pictures in the slice_segment_header. This is equal to the number of bits used for the contents of the block beginning with "if(long_term_ref_pics_present_flag)". */ uint32_t NumLongTermPictureSliceHeaderBits; /** @} */ /** Slice Decoding Process - Picture Order Count */ /** The value of PicOrderCntVal of the picture in the access unit containing the SEI message. The picture being decoded. */ int32_t CurrPicOrderCntVal; /** Slice Decoding Process - Reference Picture Sets */ /** Array of video reference surfaces. Set any unused positions to VDP_INVALID_HANDLE. */ VdpVideoSurface RefPics[16]; /** Array of picture order counts. These correspond to positions in the RefPics array. */ int32_t PicOrderCntVal[16]; /** Array used to specify whether a particular RefPic is a long term reference. A value of "1" indicates a long-term reference. */ uint8_t IsLongTerm[16]; /** Copy of specification field, see Section 8.3.2 of the H.265/HEVC Specification. */ uint8_t NumPocStCurrBefore; /** Copy of specification field, see Section 8.3.2 of the H.265/HEVC Specification. */ uint8_t NumPocStCurrAfter; /** Copy of specification field, see Section 8.3.2 of the H.265/HEVC Specification. */ uint8_t NumPocLtCurr; /** Reference Picture Set list, one of the short-term RPS. These correspond to positions in the RefPics array. */ uint8_t RefPicSetStCurrBefore[8]; /** Reference Picture Set list, one of the short-term RPS. These correspond to positions in the RefPics array. */ uint8_t RefPicSetStCurrAfter[8]; /** Reference Picture Set list, one of the long-term RPS. These correspond to positions in the RefPics array. */ uint8_t RefPicSetLtCurr[8]; } VdpPictureInfoHEVC; /** * \brief Decode a compressed field/frame and render the result * into a \ref VdpVideoSurface "VdpVideoSurface". * \param[in] decoder The decoder object that will perform the * decode operation. * \param[in] target The video surface to render to. * \param[in] picture_info A (pointer to a) structure containing * information about the picture to be decoded. Note that * the appropriate type of VdpPictureInfo* structure must * be provided to match to profile that the decoder was * created for. * \param[in] bitstream_buffer_count The number of bitstream * buffers containing compressed data for this picture. * \param[in] bitstream_buffers An array of bitstream buffers. * \return VdpStatus The completion status of the operation. * * See \ref video_mixer_usage for additional information. */ typedef VdpStatus VdpDecoderRender( VdpDecoder decoder, VdpVideoSurface target, VdpPictureInfo const * picture_info, uint32_t bitstream_buffer_count, VdpBitstreamBuffer const * bitstream_buffers ); /*@}*/ /** * \defgroup VdpVideoMixer VdpVideoMixer; Video Post-processing \ * and Compositing object * * VdpVideoMixer can perform some subset of the following * post-processing steps on video: * - De-interlacing * - Various types, with or without inverse telecine * - Noise-reduction * - Sharpness adjustment * - Color space conversion to RGB * - Chroma format upscaling to 4:4:4 * * A VdpVideoMixer takes a source \ref VdpVideoSurface * "VdpVideoSurface" VdpVideoSurface and performs various video * processing steps on it (potentially using information from * past or future video surfaces). It scales the video and * converts it to RGB, then optionally composites it with * multiple auxiliary \ref VdpOutputSurface "VdpOutputSurface"s * before writing the result to the destination \ref * VdpOutputSurface "VdpOutputSurface". * * The video mixer compositing model is as follows: * * - A rectangle will be rendered on an output surface. No * pixels will be rendered outside of this output rectangle. * The contents of this rectangle will be a composite of many * layers. * * - The first layer is the background color. The background * color will fill the entire rectangle. * * - The second layer is the processed video which has been * converted to RGB. These pixels will overwrite the * background color of the first layer except where the second * layer's rectangle does not completely cover the output * rectangle. In those regions the background color will * continue to show. If any portion of the second layer's * output rectangle is outside of the output rectangle, those * portions will be clipped. * * - The third layer contains some number of auxiliary layers * (in the form of \ref VdpOutputSurface "VdpOutputSurface"s) * which will be composited using the alpha value from the * those surfaces. The compositing operations are equivalent * to rendering with \ref VdpOutputSurfaceRenderOutputSurface * using a source blend factor of SOURCE_ALPHA, a destination * blend factor of ONE_MINUS_SOURCE_ALPHA and an equation of * ADD. * * @{ */ /** * \brief A VdpVideoMixer feature that must be requested at * creation time to be used. * * Certain advanced VdpVideoMixer features are optional, and the * ability to use those features at all must be requested when * the VdpVideoMixer object is created. Each feature is named via * a specific VdpVideoMixerFeature value. * * Once requested, these features are permanently available * within that specific VdpVideoMixer object. All features that * are not explicitly requested at creation time default to * being permanently unavailable. * * Even when requested, all features default to being initially * disabled. However, applications can subsequently enable and * disable features at any time. See \ref * VdpVideoMixerSetFeatureEnables. * * Some features allow configuration of their operation. Each * configurable item is an \ref VdpVideoMixerAttribute. These * attributes may be manipulated at any time using \ref * VdpVideoMixerSetAttributeValues. */ typedef uint32_t VdpVideoMixerFeature; /** * \hideinitializer * \brief A VdpVideoMixerFeature. * * When requested and enabled, motion adaptive temporal * deinterlacing will be used on interlaced content. * * When multiple de-interlacing options are requested and * enabled, the back-end implementation chooses the best * algorithm to apply. */ #define VDP_VIDEO_MIXER_FEATURE_DEINTERLACE_TEMPORAL ((VdpVideoMixerFeature)0) /** * \hideinitializer * \brief A VdpVideoMixerFeature. * * When requested and enabled, this enables a more advanced * version of temporal de-interlacing, that additionally uses * edge-guided spatial interpolation. * * When multiple de-interlacing options are requested and * enabled, the back-end implementation chooses the best * algorithm to apply. */ #define VDP_VIDEO_MIXER_FEATURE_DEINTERLACE_TEMPORAL_SPATIAL ((VdpVideoMixerFeature)1) /** * \hideinitializer * \brief A VdpVideoMixerFeature. * * When requested and enabled, cadence detection will be enabled * on interlaced content and the video mixer will try to extract * progressive frames from pull-down material. */ #define VDP_VIDEO_MIXER_FEATURE_INVERSE_TELECINE ((VdpVideoMixerFeature)2) /** * \hideinitializer * \brief A VdpVideoMixerFeature. * * When requested and enabled, a noise reduction algorithm will * be applied to the video. */ #define VDP_VIDEO_MIXER_FEATURE_NOISE_REDUCTION ((VdpVideoMixerFeature)3) /** * \hideinitializer * \brief A VdpVideoMixerFeature. * * When requested and enabled, a sharpening algorithm will be * applied to the video. */ #define VDP_VIDEO_MIXER_FEATURE_SHARPNESS ((VdpVideoMixerFeature)4) /** * \hideinitializer * \brief A VdpVideoMixerFeature. * * When requested and enabled, the alpha of the rendered * surface, which is normally set to the alpha of the background * color, will be forced to 0.0 on pixels corresponding to * source video surface luminance values in the range specified * by attributes \ref VDP_VIDEO_MIXER_ATTRIBUTE_LUMA_KEY_MIN_LUMA * to \ref VDP_VIDEO_MIXER_ATTRIBUTE_LUMA_KEY_MAX_LUMA. This * keying is performed after scaling and de-interlacing. */ #define VDP_VIDEO_MIXER_FEATURE_LUMA_KEY ((VdpVideoMixerFeature)5) /** * \hideinitializer * \brief A VdpVideoMixerFeature. * * A VDPAU implementation may support multiple scaling algorithms of * differing quality, and may potentially support a different subset * of algorithms on different hardware. * * In some cases, higher quality algorithms may require more resources * (memory size, memory bandwidth, etc.) to operate. Hence, these high * quality algorithms must be explicitly requested and enabled by the client * application. This allows applications operating in a resource-constrained * environment to have some level of control over resource usage. * * Basic scaling is always built into any video mixer, and is known as * level 0. Scaling quality increases beginning with optional level 1, * through optional level 9. * * If an application requests and enables multiple high quality scaling * algorithms, the highest level enabled scaling algorithm will be used. */ #define VDP_VIDEO_MIXER_FEATURE_HIGH_QUALITY_SCALING_L1 ((VdpVideoMixerFeature)11) /** * \hideinitializer * \brief A VdpVideoMixerFeature. * * See \ref VDP_VIDEO_MIXER_FEATURE_HIGH_QUALITY_SCALING_L1 for details. */ #define VDP_VIDEO_MIXER_FEATURE_HIGH_QUALITY_SCALING_L2 ((VdpVideoMixerFeature)12) /** * \hideinitializer * \brief A VdpVideoMixerFeature. * * See \ref VDP_VIDEO_MIXER_FEATURE_HIGH_QUALITY_SCALING_L1 for details. */ #define VDP_VIDEO_MIXER_FEATURE_HIGH_QUALITY_SCALING_L3 ((VdpVideoMixerFeature)13) /** * \hideinitializer * \brief A VdpVideoMixerFeature. * * See \ref VDP_VIDEO_MIXER_FEATURE_HIGH_QUALITY_SCALING_L1 for details. */ #define VDP_VIDEO_MIXER_FEATURE_HIGH_QUALITY_SCALING_L4 ((VdpVideoMixerFeature)14) /** * \hideinitializer * \brief A VdpVideoMixerFeature. * * See \ref VDP_VIDEO_MIXER_FEATURE_HIGH_QUALITY_SCALING_L1 for details. */ #define VDP_VIDEO_MIXER_FEATURE_HIGH_QUALITY_SCALING_L5 ((VdpVideoMixerFeature)15) /** * \hideinitializer * \brief A VdpVideoMixerFeature. * * See \ref VDP_VIDEO_MIXER_FEATURE_HIGH_QUALITY_SCALING_L1 for details. */ #define VDP_VIDEO_MIXER_FEATURE_HIGH_QUALITY_SCALING_L6 ((VdpVideoMixerFeature)16) /** * \hideinitializer * \brief A VdpVideoMixerFeature. * * See \ref VDP_VIDEO_MIXER_FEATURE_HIGH_QUALITY_SCALING_L1 for details. */ #define VDP_VIDEO_MIXER_FEATURE_HIGH_QUALITY_SCALING_L7 ((VdpVideoMixerFeature)17) /** * \hideinitializer * \brief A VdpVideoMixerFeature. * * See \ref VDP_VIDEO_MIXER_FEATURE_HIGH_QUALITY_SCALING_L1 for details. */ #define VDP_VIDEO_MIXER_FEATURE_HIGH_QUALITY_SCALING_L8 ((VdpVideoMixerFeature)18) /** * \hideinitializer * \brief A VdpVideoMixerFeature. * * See \ref VDP_VIDEO_MIXER_FEATURE_HIGH_QUALITY_SCALING_L1 for details. */ #define VDP_VIDEO_MIXER_FEATURE_HIGH_QUALITY_SCALING_L9 ((VdpVideoMixerFeature)19) /** * \brief A VdpVideoMixer creation parameter. * * When a VdpVideoMixer is created, certain parameters may be * supplied. Each parameter is named via a specific * VdpVideoMixerParameter value. * * Each parameter has a specific type, and specific default * value if not specified at VdpVideoMixer creation time. The * application may query the legal supported range for some * parameters. */ typedef uint32_t VdpVideoMixerParameter; /** * \hideinitializer * \brief The exact width of input video surfaces. * * This parameter's type is uint32_t. * * This parameter defaults to 0 if not specified, which entails * that it must be specified. * * The application may query this parameter's supported * range. */ #define VDP_VIDEO_MIXER_PARAMETER_VIDEO_SURFACE_WIDTH ((VdpVideoMixerParameter)0) /** * \hideinitializer * \brief The exact height of input video surfaces. * * This parameter's type is uint32_t. * * This parameter defaults to 0 if not specified, which entails * that it must be specified. * * The application may query this parameter's supported * range. */ #define VDP_VIDEO_MIXER_PARAMETER_VIDEO_SURFACE_HEIGHT ((VdpVideoMixerParameter)1) /** * \hideinitializer * \brief The chroma type of the input video surfaces the will * process. * * This parameter's type is VdpChromaType. * * If not specified, this parameter defaults to * VDP_CHROMA_TYPE_420. * * The application may not query this application's supported * range, since it is a potentially disjoint enumeration. */ #define VDP_VIDEO_MIXER_PARAMETER_CHROMA_TYPE ((VdpVideoMixerParameter)2) /** * \hideinitializer * \brief The number of auxiliary layers in the mixer's * compositing model. * * Note that this indicates the maximum number of layers that * may be processed by a given \ref VdpVideoMixer "VdpVideoMixer" * object. Each individual \ref VdpVideoMixerRender invocation * may choose to use a different number of actual layers, from 0 * up to this limit. * * This attribute's type is uint32_t. * * If not specified, this parameter defaults to 0. * * The application may query this parameter's supported * range. */ #define VDP_VIDEO_MIXER_PARAMETER_LAYERS ((VdpVideoMixerParameter)3) /** * \brief An adjustable attribute of VdpVideoMixer operation. * * Various attributes of VdpVideoMixer operation may be adjusted * at any time. Each attribute is named via a specific * VdpVideoMixerAttribute value. * * Each attribute has a specific type, and specific default * value if not specified at VdpVideoMixer creation time. The * application may query the legal supported range for some * attributes. */ typedef uint32_t VdpVideoMixerAttribute; /** * \hideinitializer * \brief The background color in the VdpVideoMixer's compositing * model. * * This attribute's type is VdpColor. * * This parameter defaults to black (all color components 0.0 * and alpha 1.0). * * The application may not query this parameter's supported * range, since the type is not scalar. */ #define VDP_VIDEO_MIXER_ATTRIBUTE_BACKGROUND_COLOR ((VdpVideoMixerAttribute)0) /** * \hideinitializer * \brief The color-space conversion matrix used by the * VdpVideoMixer. * * This attribute's type is \ref VdpCSCMatrix. * * Note: When using \ref VdpVideoMixerGetAttributeValues to retrieve the * current CSC matrix, the attribute_values array must contain a pointer to * a pointer a VdpCSCMatrix (VdpCSCMatrix** as a void *). The get function will * either initialize the referenced CSC matrix to the current value, *or* * clear the supplied pointer to NULL, if the previous set call supplied a * value of NULL in parameter_values, to request the default matrix. * * \code * VdpCSCMatrix matrix; * VdpCSCMatrix * matrix_ptr; * void * attribute_values[] = {&matrix_ptr}; * VdpStatus st = vdp_video_mixer_get_attribute_values(..., attribute_values, ...); * \endcode * * This parameter defaults to a matrix suitable for ITU-R BT.601 * input surfaces, with no procamp adjustments. * * The application may not query this parameter's supported * range, since the type is not scalar. */ #define VDP_VIDEO_MIXER_ATTRIBUTE_CSC_MATRIX ((VdpVideoMixerAttribute)1) /** * \hideinitializer * \brief The amount of noise reduction algorithm to apply. * * This attribute's type is float. * * This parameter defaults to 0.0, which equates to no noise * reduction. * * The application may query this parameter's supported range. * However, the range is fixed as 0.0...1.0. */ #define VDP_VIDEO_MIXER_ATTRIBUTE_NOISE_REDUCTION_LEVEL ((VdpVideoMixerAttribute)2) /** * \hideinitializer * \brief The amount of sharpening, or blurring, to apply. * * This attribute's type is float. * * This parameter defaults to 0.0, which equates to no * sharpening. * * Positive values request sharpening. Negative values request * blurring. * * The application may query this parameter's supported range. * However, the range is fixed as -1.0...1.0. */ #define VDP_VIDEO_MIXER_ATTRIBUTE_SHARPNESS_LEVEL ((VdpVideoMixerAttribute)3) /** * \hideinitializer * \brief The minimum luma value for the luma key algorithm. * * This attribute's type is float. * * This parameter defaults to 0.0. * * The application may query this parameter's supported range. * However, the range is fixed as 0.0...1.0. */ #define VDP_VIDEO_MIXER_ATTRIBUTE_LUMA_KEY_MIN_LUMA ((VdpVideoMixerAttribute)4) /** * \hideinitializer * \brief The maximum luma value for the luma key algorithm. * * This attribute's type is float. * * This parameter defaults to 1.0. * * The application may query this parameter's supported range. * However, the range is fixed as 0.0...1.0. */ #define VDP_VIDEO_MIXER_ATTRIBUTE_LUMA_KEY_MAX_LUMA ((VdpVideoMixerAttribute)5) /** * \hideinitializer * \brief Whether de-interlacers should operate solely on luma, and bob chroma. * * Note: This attribute only affects advanced de-interlacing algorithms, not * bob or weave. * * This attribute's type is uint8_t. * * This parameter defaults to 0. * * The application may query this parameter's supported range. * However, the range is fixed as 0 (no/off) ... 1 (yes/on). */ #define VDP_VIDEO_MIXER_ATTRIBUTE_SKIP_CHROMA_DEINTERLACE ((VdpVideoMixerAttribute)6) /** * \brief Query the implementation's support for a specific * feature. * \param[in] device The device to query. * \param[in] feature The feature for which support is to be * queried. * \param[out] is_supported Is the specified feature supported? * \return VdpStatus The completion status of the operation. */ typedef VdpStatus VdpVideoMixerQueryFeatureSupport( VdpDevice device, VdpVideoMixerFeature feature, /* output parameters follow */ VdpBool * is_supported ); /** * \brief Query the implementation's support for a specific * parameter. * \param[in] device The device to query. * \param[in] parameter The parameter for which support is to be * queried. * \param[out] is_supported Is the specified parameter * supported? * \return VdpStatus The completion status of the operation. */ typedef VdpStatus VdpVideoMixerQueryParameterSupport( VdpDevice device, VdpVideoMixerParameter parameter, /* output parameters follow */ VdpBool * is_supported ); /** * \brief Query the implementation's support for a specific * attribute. * \param[in] device The device to query. * \param[in] feature The feature for which support is to be * queried. * \param[out] is_supported Is the specified feature supported? * \return VdpStatus The completion status of the operation. */ typedef VdpStatus VdpVideoMixerQueryAttributeSupport( VdpDevice device, VdpVideoMixerAttribute attribute, /* output parameters follow */ VdpBool * is_supported ); /** * \brief Query the implementation's supported for a specific * parameter. * \param[in] device The device to query. * \param[in] parameter The parameter for which support is to be * queried. * \param[out] min_value The minimum supported value. * \param[out] max_value The maximum supported value. * \return VdpStatus The completion status of the operation. */ typedef VdpStatus VdpVideoMixerQueryParameterValueRange( VdpDevice device, VdpVideoMixerParameter parameter, /* output parameters follow */ void * min_value, void * max_value ); /** * \brief Query the implementation's supported for a specific * attribute. * \param[in] device The device to query. * \param[in] attribute The attribute for which support is to be * queried. * \param[out] min_value The minimum supported value. * \param[out] max_value The maximum supported value. * \return VdpStatus The completion status of the operation. */ typedef VdpStatus VdpVideoMixerQueryAttributeValueRange( VdpDevice device, VdpVideoMixerAttribute attribute, /* output parameters follow */ void * min_value, void * max_value ); /** * \brief An opaque handle representing a VdpVideoMixer object. */ typedef uint32_t VdpVideoMixer; /** * \brief Create a VdpVideoMixer. * \param[in] device The device that will contain the mixer. * \param[in] feature_count The number of features to request. * \param[in] features The list of features to request. * \param[in] parameter_count The number of parameters to set. * \param[in] parameters The list of parameters to set. * \param[in] parameter_values The values for the parameters. Note that each * entry in the value array is a pointer to the actual value. In other * words, the values themselves are not cast to "void *" and passed * "inside" the array. * \param[out] mixer The new mixer's handle. * \return VdpStatus The completion status of the operation. * * Initially, all requested features will be disabled. They can * be enabled using \ref VdpVideoMixerSetFeatureEnables. * * Initially, all attributes will have default values. Values * can be changed using \ref VdpVideoMixerSetAttributeValues. */ typedef VdpStatus VdpVideoMixerCreate( VdpDevice device, // The set of features to request uint32_t feature_count, VdpVideoMixerFeature const * features, // The parameters used during creation uint32_t parameter_count, VdpVideoMixerParameter const * parameters, void const * const * parameter_values, /* output parameters follow */ VdpVideoMixer * mixer ); /** * \brief Enable or disable features. * \param[in] mixer The mixer to manipulate. * \param[in] feature_count The number of features to * enable/disable. * \param[in] features The list of features to enable/disable. * \param[in] feature_enables The list of new feature enable * values. * \return VdpStatus The completion status of the operation. */ typedef VdpStatus VdpVideoMixerSetFeatureEnables( VdpVideoMixer mixer, uint32_t feature_count, VdpVideoMixerFeature const * features, VdpBool const * feature_enables ); /** * \brief Set attribute values * \param[in] mixer The mixer to manipulate. * \param[in] attribute_count The number of attributes to set. * \param[in] attributes The list of attributes to set. * \param[in] attribute_values The values for the attributes. Note that each * entry in the value array is a pointer to the actual value. In other * words, the values themselves are not cast to "void *" and passed * "inside" the array. A NULL pointer requests that the default value be * set for that attribute. * \return VdpStatus The completion status of the operation. */ typedef VdpStatus VdpVideoMixerSetAttributeValues( VdpVideoMixer mixer, uint32_t attribute_count, VdpVideoMixerAttribute const * attributes, void const * const * attribute_values ); /** * \brief Retrieve whether features were requested at creation * time. * \param[in] mixer The mixer to query. * \param[in] feature_count The number of features to query. * \param[in] features The list of features to query. * \param[out] feature_supported A list of values indicating * whether the feature was requested, and hence is * available. * \return VdpStatus The completion status of the operation. */ typedef VdpStatus VdpVideoMixerGetFeatureSupport( VdpVideoMixer mixer, uint32_t feature_count, VdpVideoMixerFeature const * features, /* output parameters follow */ VdpBool * feature_supports ); /** * \brief Retrieve whether features are enabled. * \param[in] mixer The mixer to manipulate. * \param[in] feature_count The number of features to query. * \param[in] features The list of features to query. * \param[out] feature_enabled A list of values indicating * whether the feature is enabled. * \return VdpStatus The completion status of the operation. */ typedef VdpStatus VdpVideoMixerGetFeatureEnables( VdpVideoMixer mixer, uint32_t feature_count, VdpVideoMixerFeature const * features, /* output parameters follow */ VdpBool * feature_enables ); /** * \brief Retrieve parameter values given at creation time. * \param[in] mixer The mixer to manipulate. * \param[in] parameter_count The number of parameters to query. * \param[in] parameters The list of parameters to query. * \param[out] parameter_values The list of current values for * the parameters. Note that each entry in the value array is a pointer to * storage that will receive the actual value. If the attribute's type is * a pointer itself, please closely read the documentation for that * attribute type for any other data passing requirements. * \return VdpStatus The completion status of the operation. */ typedef VdpStatus VdpVideoMixerGetParameterValues( VdpVideoMixer mixer, uint32_t parameter_count, VdpVideoMixerParameter const * parameters, /* output parameters follow */ void * const * parameter_values ); /** * \brief Retrieve current attribute values. * \param[in] mixer The mixer to manipulate. * \param[in] attribute_count The number of attributes to query. * \param[in] attributes The list of attributes to query. * \param[out] attribute_values The list of current values for * the attributes. Note that each entry in the value array is a pointer to * storage that will receive the actual value. If the attribute's type is * a pointer itself, please closely read the documentation for that * attribute type for any other data passing requirements. * \return VdpStatus The completion status of the operation. */ typedef VdpStatus VdpVideoMixerGetAttributeValues( VdpVideoMixer mixer, uint32_t attribute_count, VdpVideoMixerAttribute const * attributes, /* output parameters follow */ void * const * attribute_values ); /** * \brief Destroy a VdpVideoMixer. * \param[in] device The device to destroy. * \return VdpStatus The completion status of the operation. */ typedef VdpStatus VdpVideoMixerDestroy( VdpVideoMixer mixer ); /** * \hideinitializer * \brief The structure of the picture present in a \ref * VdpVideoSurface "VdpVideoSurface". */ typedef enum { /** * The picture is a field, and is the top field of the surface. */ VDP_VIDEO_MIXER_PICTURE_STRUCTURE_TOP_FIELD, /** * The picture is a field, and is the bottom field of the * surface. */ VDP_VIDEO_MIXER_PICTURE_STRUCTURE_BOTTOM_FIELD, /** * The picture is a frame, and hence is the entire surface. */ VDP_VIDEO_MIXER_PICTURE_STRUCTURE_FRAME, } VdpVideoMixerPictureStructure; #define VDP_LAYER_VERSION 0 /** * \brief Definition of an additional \ref VdpOutputSurface * "VdpOutputSurface" layer in the composting model. */ typedef struct { /** * This field must be filled with VDP_LAYER_VERSION */ uint32_t struct_version; /** * The surface to composite from. */ VdpOutputSurface source_surface; /** * The sub-rectangle of the source surface to use. If NULL, the * entire source surface will be used. */ VdpRect const * source_rect; /** * The sub-rectangle of the destination surface to map * this layer into. This rectangle is relative to the entire * destination surface. This rectangle will be clipped by \ref * VdpVideoMixerRender's \b destination_rect. If NULL, the * destination rectangle will be sized to match the source * rectangle, and will be located at the origin. */ VdpRect const * destination_rect; } VdpLayer; /** * \brief Perform a video post-processing and compositing * operation. * \param[in] mixer The mixer object that will perform the * mixing/rendering operation. * \param[in] background_surface A background image. If set to any value other * than VDP_INVALID_HANDLE, the specific surface will be used instead of * the background color as the first layer in the mixer's compositing * process. * \param[in] background_source_rect When background_surface is specified, * this parameter indicates the portion of background_surface that will * be used as the background layer. The specified region will be * extracted and scaled to match the size of destination_rect. If NULL, * the entire background_surface will be used. * \param[in] current_picture_structure The picture structure of * the field/frame to be processed. This field/frame is * presented in the \b video_surface_current parameter. If * frame, then all \b video_surface_* parameters are * assumed to be frames. If field, then all * video_surface_* parameters are assumed to be fields, * with alternating top/bottom-ness derived from * video_surface_current. * \param[in] video_surfaces_past_count The number of provided * fields/frames prior to the current picture. * \param[in] video_surfaces_past The fields/frames prior to the * current field/frame. Note that array index 0 is the * field/frame temporally nearest to the current * field/frame, with increasing array indices used for * older frames. Unavailable entries may be set to * \ref VDP_INVALID_HANDLE. * \param[in] video_surface_current The field/frame to be * processed. * \param[in] video_surfaces_future_count The number of provided * fields/frames following the current picture. * \param[in] video_surfaces_future The fields/frames that * follow the current field/frame. Note that array index 0 * is the field/frame temporally nearest to the current * field/frame, with increasing array indices used for * newer frames. Unavailable entries may be set to \ref * VDP_INVALID_HANDLE. * \param[in] video_source_rect The sub-rectangle of the source * video surface to extract and process. If NULL, the * entire surface will be used. Left/right and/or top/bottom * co-ordinates may be swapped to flip the source. Values * from outside the video surface are valid and samples * at those locations will be taken from the nearest edge. * \param[in] destination_surface * \param[in] destination_rect The sub-rectangle of the * destination surface to modify. Note that rectangle clips * all other actions. * \param[in] destination_video_rect The sub-rectangle of the * destination surface that will contain the processed * video. This rectangle is relative to the entire * destination surface. This rectangle is clipped by \b * destination_rect. If NULL, the destination rectangle * will be sized to match the source rectangle, and will * be located at the origin. * \param[in] layer_count The number of additional layers to * composite above the video. * \param[in] layers The array of additional layers to composite * above the video. * \return VdpStatus The completion status of the operation. * * For a complete discussion of how to use this API, please see * \ref video_mixer_usage. */ typedef VdpStatus VdpVideoMixerRender( VdpVideoMixer mixer, VdpOutputSurface background_surface, VdpRect const * background_source_rect, VdpVideoMixerPictureStructure current_picture_structure, uint32_t video_surface_past_count, VdpVideoSurface const * video_surface_past, VdpVideoSurface video_surface_current, uint32_t video_surface_future_count, VdpVideoSurface const * video_surface_future, VdpRect const * video_source_rect, VdpOutputSurface destination_surface, VdpRect const * destination_rect, VdpRect const * destination_video_rect, uint32_t layer_count, VdpLayer const * layers ); /*@}*/ /** * \defgroup VdpPresentationQueue VdpPresentationQueue; Video \ * presentation (display) object * * The VdpPresentationQueue manages a queue of surfaces and * associated timestamps. For each surface in the queue, once * the associated timestamp is reached, the surface is displayed * to the user. This timestamp-based approach yields high * quality video delivery. * * The exact location of the displayed content is Window System * specific. For this reason, the \ref api_winsys provides an * API to create a \ref VdpPresentationQueueTarget object (e.g. * via \ref VdpPresentationQueueTargetCreateX11) which * encapsulates this information. * * Note that the presentation queue performs no scaling of * surfaces to match the display target's size, aspect ratio, * etc. * * Surfaces that are too large to fit into the display target * will be clipped. Surfaces that are too small to fill the * display target will be aligned to the top-left corner of the * display target, with the balance of the display target being * filled with a constant configurable "background" color. * * Note that the presentation queue operates in a manner that is * semantically equivalent to an overlay surface, with any * required color key painting hidden internally. However, * implementations are free to use whatever semantically * equivalent technique they wish. Note that implementations * that actually use color-keyed overlays will typically use * the "background" color as the overlay color key value, so * this color should be chosen with care. * * @{ */ /** * \brief The representation of a point in time. * * VdpTime timestamps are intended to be a high-precision timing * system, potentially independent from any other time domain in * the system. * * Time is represented in units of nanoseconds. The origin * (i.e. the time represented by a value of 0) is implementation * dependent. */ typedef uint64_t VdpTime; /** * \brief An opaque handle representing the location where * video will be presented. * * VdpPresentationQueueTarget are created using a \ref api_winsys * specific API, such as \ref * VdpPresentationQueueTargetCreateX11. */ typedef uint32_t VdpPresentationQueueTarget; /** * \brief Destroy a VdpPresentationQueueTarget. * \param[in] presentation_queue_target The target to destroy. * \return VdpStatus The completion status of the operation. */ typedef VdpStatus VdpPresentationQueueTargetDestroy( VdpPresentationQueueTarget presentation_queue_target ); /** * \brief An opaque handle representing a presentation queue * object. */ typedef uint32_t VdpPresentationQueue; /** * \brief Create a VdpPresentationQueue. * \param[in] device The device that will contain the queue. * \param[in] presentation_queue_target The location to display * the content. * \param[out] presentation_queue The new queue's handle. * \return VdpStatus The completion status of the operation. * * Note: The initial value for the background color will be set to * an implementation-defined value. */ typedef VdpStatus VdpPresentationQueueCreate( VdpDevice device, VdpPresentationQueueTarget presentation_queue_target, /* output parameters follow */ VdpPresentationQueue * presentation_queue ); /** * \brief Destroy a VdpPresentationQueue. * \param[in] presentation_queue The queue to destroy. * \return VdpStatus The completion status of the operation. */ typedef VdpStatus VdpPresentationQueueDestroy( VdpPresentationQueue presentation_queue ); /** * \brief Configure the background color setting. * \param[in] presentation_queue The queue to manipulate. * \param[in] background_color The new background color. * * Note: Implementations may choose whether to apply the * new background color value immediately, or defer it until * the next surface is presented. */ typedef VdpStatus VdpPresentationQueueSetBackgroundColor( VdpPresentationQueue presentation_queue, VdpColor * const background_color ); /** * \brief Retrieve the current background color setting. * \param[in] presentation_queue The queue to query. * \param[out] background_color The current background color. */ typedef VdpStatus VdpPresentationQueueGetBackgroundColor( VdpPresentationQueue presentation_queue, VdpColor * background_color ); /** * \brief Retrieve the presentation queue's "current" time. * \param[in] presentation_queue The queue to query. * \param[out] current_time The current time, which may * represent a point between display VSYNC events. * \return VdpStatus The completion status of the operation. */ typedef VdpStatus VdpPresentationQueueGetTime( VdpPresentationQueue presentation_queue, /* output parameters follow */ VdpTime * current_time ); /** * \brief Enter a surface into the presentation queue. * \param[in] presentation_queue The queue to query. * \param[in] surface The surface to enter into the queue. * \param[in] clip_width If set to a non-zero value, the presentation queue * will display only clip_width pixels of the surface (anchored to the * top-left corner of the surface. * \param[in] clip_height If set to a non-zero value, the presentation queue * will display only clip_height lines of the surface (anchored to the * top-left corner of the surface. * \param[in] earliest_presentation_time The timestamp * associated with the surface. The presentation queue * will not display the surface until the presentation * queue's current time is at least this value. * \return VdpStatus The completion status of the operation. * * Applications may choose to allow resizing of the presentation queue target * (which may be e.g. a regular Window when using an X11-based * implementation). * * \b clip_width and \b clip_height may be used to limit the size of the * displayed region of a surface, in order to match the specific region that * was rendered to. * * In turn, this allows the application to allocate over-sized (e.g. * screen-sized) surfaces, but render to a region that matches the current * size of the video window. * * Using this technique, an application's response to window resizing may * simply be to render to, and display, a different region of the surface, * rather than de-/re-allocation of surfaces to match the updated window size. * * Implementations may impose an upper bound on the number of entries * contained by the presentation queue at a given time. This limit is likely * different to the number of \ref VdpOutputSurface "VdpOutputSurface"s that * may be allocated at a given time. This limit applies to entries in the * QUEUED or VISIBLE state only. In other words, entries that have * transitioned from a QUEUED or VISIBLE state to an IDLE state do not count * toward this limit. */ typedef VdpStatus VdpPresentationQueueDisplay( VdpPresentationQueue presentation_queue, VdpOutputSurface surface, uint32_t clip_width, uint32_t clip_height, VdpTime earliest_presentation_time ); /** * \brief Wait for a surface to finish being displayed. * \param[in] presentation_queue The queue to query. * \param[in] surface The surface to wait for. * \param[out] first_presentation_time The timestamp of the * VSYNC at which this surface was first displayed. Note * that 0 means the surface was never displayed. * \return VdpStatus The completion status of the operation. * * Note that this API would block forever if queried about the surface most * recently added to a presentation queue. That is because there would be no * other surface that could possibly replace that surface as the currently * displayed surface, and hence that surface would never become idle. For * that reason, this function will return an error in that case. */ typedef VdpStatus VdpPresentationQueueBlockUntilSurfaceIdle( VdpPresentationQueue presentation_queue, VdpOutputSurface surface, /* output parameters follow */ VdpTime * first_presentation_time ); /** * \hideinitializer * \brief The status of a surface within a presentation queue. */ typedef enum { /** The surface is not queued or currently visible. */ VDP_PRESENTATION_QUEUE_STATUS_IDLE, /** The surface is in the queue, and not currently visible. */ VDP_PRESENTATION_QUEUE_STATUS_QUEUED, /** The surface is the currently visible surface. */ VDP_PRESENTATION_QUEUE_STATUS_VISIBLE, } VdpPresentationQueueStatus; /** * \brief Poll the current queue status of a surface. * \param[in] presentation_queue The queue to query. * \param[in] surface The surface to query. * \param[out] status The current status of the surface within * the queue. * \param[out] first_presentation_time The timestamp of the * VSYNC at which this surface was first displayed. Note * that 0 means the surface was never displayed. * \return VdpStatus The completion status of the operation. */ typedef VdpStatus VdpPresentationQueueQuerySurfaceStatus( VdpPresentationQueue presentation_queue, VdpOutputSurface surface, /* output parameters follow */ VdpPresentationQueueStatus * status, VdpTime * first_presentation_time ); /*@}*/ /** * \defgroup display_preemption Display Preemption * * The Window System may operate within a frame-work (such as * Linux's VT switching) where the display is shared between the * Window System (e.g. X) and some other output mechanism (e.g. * the VT.) Given this scenario, the Window System's control of * the display could be preempted, and restored, at any time. * * VDPAU does not mandate that implementations hide such * preemptions from VDPAU client applications; doing so may * impose extreme burdens upon VDPAU implementations. Equally, * however, implementations are free to hide such preemptions * from client applications. * * VDPAU allows implementations to inform the client application * when such a preemption has occurred, and then refuse to * continue further operation. * * Similarly, some form of fatal hardware error could prevent further * operation of the VDPAU implementation, without a complete * re-initialization. * * The following discusses the behavior of implementations that * choose not to hide preemption from client applications. * * When preemption occurs, VDPAU internally destroys all * objects; the client application need not do this. However, if * the client application wishes to continue operation, it must * recreate all objects that it uses. It is probable that this * recreation will not succeed until the display ownership is * restored to the Window System. * * Once preemption has occurred, all VDPAU entry points will * return the specific error code \ref * VDP_STATUS_DISPLAY_PREEMPTED. * * VDPAU client applications may also be notified of such * preemptions and fatal errors via a callback. See \ref * VdpPreemptionCallbackRegister for more details. * * @{ */ /** * \brief A callback to notify the client application that a * device's display has been preempted. * \param[in] device The device that had its display preempted. * \param[in] context The client-supplied callback context * information. * \return void No return value */ typedef void VdpPreemptionCallback( VdpDevice device, void * context ); /** * \brief Configure the display preemption callback. * \param[in] device The device to be monitored for preemption. * \param[in] callback The client application's callback * function. If NULL, the callback is unregistered. * \param[in] context The client-supplied callback context * information. This information will be passed to the * callback function if/when invoked. * \return VdpStatus The completion status of the operation. */ typedef VdpStatus VdpPreemptionCallbackRegister( VdpDevice device, VdpPreemptionCallback callback, void * context ); /*@}*/ /** * \defgroup get_proc_address Entry Point Retrieval * * In order to facilitate multiple implementations of VDPAU * co-existing within a single process, all functionality is * available via function pointers. The mechanism to retrieve * those function pointers is described below. * * @{ */ /** * \brief A type suitable for \ref VdpGetProcAddress * "VdpGetProcAddress"'s \b function_id parameter. */ typedef uint32_t VdpFuncId; /** \hideinitializer */ #define VDP_FUNC_ID_GET_ERROR_STRING ((VdpFuncId)0) /** \hideinitializer */ #define VDP_FUNC_ID_GET_PROC_ADDRESS ((VdpFuncId)1) /** \hideinitializer */ #define VDP_FUNC_ID_GET_API_VERSION ((VdpFuncId)2) /** \hideinitializer */ #define VDP_FUNC_ID_GET_INFORMATION_STRING ((VdpFuncId)4) /** \hideinitializer */ #define VDP_FUNC_ID_DEVICE_DESTROY ((VdpFuncId)5) /** \hideinitializer */ #define VDP_FUNC_ID_GENERATE_CSC_MATRIX ((VdpFuncId)6) /** \hideinitializer */ #define VDP_FUNC_ID_VIDEO_SURFACE_QUERY_CAPABILITIES ((VdpFuncId)7) /** \hideinitializer */ #define VDP_FUNC_ID_VIDEO_SURFACE_QUERY_GET_PUT_BITS_Y_CB_CR_CAPABILITIES ((VdpFuncId)8) /** \hideinitializer */ #define VDP_FUNC_ID_VIDEO_SURFACE_CREATE ((VdpFuncId)9) /** \hideinitializer */ #define VDP_FUNC_ID_VIDEO_SURFACE_DESTROY ((VdpFuncId)10) /** \hideinitializer */ #define VDP_FUNC_ID_VIDEO_SURFACE_GET_PARAMETERS ((VdpFuncId)11) /** \hideinitializer */ #define VDP_FUNC_ID_VIDEO_SURFACE_GET_BITS_Y_CB_CR ((VdpFuncId)12) /** \hideinitializer */ #define VDP_FUNC_ID_VIDEO_SURFACE_PUT_BITS_Y_CB_CR ((VdpFuncId)13) /** \hideinitializer */ #define VDP_FUNC_ID_OUTPUT_SURFACE_QUERY_CAPABILITIES ((VdpFuncId)14) /** \hideinitializer */ #define VDP_FUNC_ID_OUTPUT_SURFACE_QUERY_GET_PUT_BITS_NATIVE_CAPABILITIES ((VdpFuncId)15) /** \hideinitializer */ #define VDP_FUNC_ID_OUTPUT_SURFACE_QUERY_PUT_BITS_INDEXED_CAPABILITIES ((VdpFuncId)16) /** \hideinitializer */ #define VDP_FUNC_ID_OUTPUT_SURFACE_QUERY_PUT_BITS_Y_CB_CR_CAPABILITIES ((VdpFuncId)17) /** \hideinitializer */ #define VDP_FUNC_ID_OUTPUT_SURFACE_CREATE ((VdpFuncId)18) /** \hideinitializer */ #define VDP_FUNC_ID_OUTPUT_SURFACE_DESTROY ((VdpFuncId)19) /** \hideinitializer */ #define VDP_FUNC_ID_OUTPUT_SURFACE_GET_PARAMETERS ((VdpFuncId)20) /** \hideinitializer */ #define VDP_FUNC_ID_OUTPUT_SURFACE_GET_BITS_NATIVE ((VdpFuncId)21) /** \hideinitializer */ #define VDP_FUNC_ID_OUTPUT_SURFACE_PUT_BITS_NATIVE ((VdpFuncId)22) /** \hideinitializer */ #define VDP_FUNC_ID_OUTPUT_SURFACE_PUT_BITS_INDEXED ((VdpFuncId)23) /** \hideinitializer */ #define VDP_FUNC_ID_OUTPUT_SURFACE_PUT_BITS_Y_CB_CR ((VdpFuncId)24) /** \hideinitializer */ #define VDP_FUNC_ID_BITMAP_SURFACE_QUERY_CAPABILITIES ((VdpFuncId)25) /** \hideinitializer */ #define VDP_FUNC_ID_BITMAP_SURFACE_CREATE ((VdpFuncId)26) /** \hideinitializer */ #define VDP_FUNC_ID_BITMAP_SURFACE_DESTROY ((VdpFuncId)27) /** \hideinitializer */ #define VDP_FUNC_ID_BITMAP_SURFACE_GET_PARAMETERS ((VdpFuncId)28) /** \hideinitializer */ #define VDP_FUNC_ID_BITMAP_SURFACE_PUT_BITS_NATIVE ((VdpFuncId)29) /** \hideinitializer */ #define VDP_FUNC_ID_OUTPUT_SURFACE_RENDER_OUTPUT_SURFACE ((VdpFuncId)33) /** \hideinitializer */ #define VDP_FUNC_ID_OUTPUT_SURFACE_RENDER_BITMAP_SURFACE ((VdpFuncId)34) /** \hideinitializer */ #define VDP_FUNC_ID_OUTPUT_SURFACE_RENDER_VIDEO_SURFACE_LUMA ((VdpFuncId)35) /** \hideinitializer */ #define VDP_FUNC_ID_DECODER_QUERY_CAPABILITIES ((VdpFuncId)36) /** \hideinitializer */ #define VDP_FUNC_ID_DECODER_CREATE ((VdpFuncId)37) /** \hideinitializer */ #define VDP_FUNC_ID_DECODER_DESTROY ((VdpFuncId)38) /** \hideinitializer */ #define VDP_FUNC_ID_DECODER_GET_PARAMETERS ((VdpFuncId)39) /** \hideinitializer */ #define VDP_FUNC_ID_DECODER_RENDER ((VdpFuncId)40) /** \hideinitializer */ #define VDP_FUNC_ID_VIDEO_MIXER_QUERY_FEATURE_SUPPORT ((VdpFuncId)41) /** \hideinitializer */ #define VDP_FUNC_ID_VIDEO_MIXER_QUERY_PARAMETER_SUPPORT ((VdpFuncId)42) /** \hideinitializer */ #define VDP_FUNC_ID_VIDEO_MIXER_QUERY_ATTRIBUTE_SUPPORT ((VdpFuncId)43) /** \hideinitializer */ #define VDP_FUNC_ID_VIDEO_MIXER_QUERY_PARAMETER_VALUE_RANGE ((VdpFuncId)44) /** \hideinitializer */ #define VDP_FUNC_ID_VIDEO_MIXER_QUERY_ATTRIBUTE_VALUE_RANGE ((VdpFuncId)45) /** \hideinitializer */ #define VDP_FUNC_ID_VIDEO_MIXER_CREATE ((VdpFuncId)46) /** \hideinitializer */ #define VDP_FUNC_ID_VIDEO_MIXER_SET_FEATURE_ENABLES ((VdpFuncId)47) /** \hideinitializer */ #define VDP_FUNC_ID_VIDEO_MIXER_SET_ATTRIBUTE_VALUES ((VdpFuncId)48) /** \hideinitializer */ #define VDP_FUNC_ID_VIDEO_MIXER_GET_FEATURE_SUPPORT ((VdpFuncId)49) /** \hideinitializer */ #define VDP_FUNC_ID_VIDEO_MIXER_GET_FEATURE_ENABLES ((VdpFuncId)50) /** \hideinitializer */ #define VDP_FUNC_ID_VIDEO_MIXER_GET_PARAMETER_VALUES ((VdpFuncId)51) /** \hideinitializer */ #define VDP_FUNC_ID_VIDEO_MIXER_GET_ATTRIBUTE_VALUES ((VdpFuncId)52) /** \hideinitializer */ #define VDP_FUNC_ID_VIDEO_MIXER_DESTROY ((VdpFuncId)53) /** \hideinitializer */ #define VDP_FUNC_ID_VIDEO_MIXER_RENDER ((VdpFuncId)54) /** \hideinitializer */ #define VDP_FUNC_ID_PRESENTATION_QUEUE_TARGET_DESTROY ((VdpFuncId)55) /** \hideinitializer */ #define VDP_FUNC_ID_PRESENTATION_QUEUE_CREATE ((VdpFuncId)56) /** \hideinitializer */ #define VDP_FUNC_ID_PRESENTATION_QUEUE_DESTROY ((VdpFuncId)57) /** \hideinitializer */ #define VDP_FUNC_ID_PRESENTATION_QUEUE_SET_BACKGROUND_COLOR ((VdpFuncId)58) /** \hideinitializer */ #define VDP_FUNC_ID_PRESENTATION_QUEUE_GET_BACKGROUND_COLOR ((VdpFuncId)59) /** \hideinitializer */ #define VDP_FUNC_ID_PRESENTATION_QUEUE_GET_TIME ((VdpFuncId)62) /** \hideinitializer */ #define VDP_FUNC_ID_PRESENTATION_QUEUE_DISPLAY ((VdpFuncId)63) /** \hideinitializer */ #define VDP_FUNC_ID_PRESENTATION_QUEUE_BLOCK_UNTIL_SURFACE_IDLE ((VdpFuncId)64) /** \hideinitializer */ #define VDP_FUNC_ID_PRESENTATION_QUEUE_QUERY_SURFACE_STATUS ((VdpFuncId)65) /** \hideinitializer */ #define VDP_FUNC_ID_PREEMPTION_CALLBACK_REGISTER ((VdpFuncId)66) #define VDP_FUNC_ID_BASE_WINSYS 0x1000 /** * \brief Retrieve a VDPAU function pointer. * \param[in] device The device that the function will operate * against. * \param[in] function_id The specific function to retrieve. * \param[out] function_pointer The actual pointer for the * application to call. * \return VdpStatus The completion status of the operation. */ typedef VdpStatus VdpGetProcAddress( VdpDevice device, VdpFuncId function_id, /* output parameters follow */ void * * function_pointer ); /*@}*/ /*@}*/ /** * \defgroup api_winsys Window System Integration Layer * * The set of VDPAU functionality specific to an individual * Windowing System. */ #ifdef __cplusplus } #endif #endif libvdpau-va-gl-0.4.2/3rdparty/vdpau/vdpau_x11.h000066400000000000000000000143611277566164500212270ustar00rootroot00000000000000/* * This source file is documented using Doxygen markup. * See http://www.stack.nl/~dimitri/doxygen/ */ /* * This copyright notice applies to this header file: * * Copyright (c) 2008-2009 NVIDIA Corporation * * Permission is hereby granted, free of charge, to any person * obtaining a copy of this software and associated documentation * files (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, * copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following * conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ /** * \file vdpau_x11.h * \brief X11 Window System Integration Layer * * This file contains the \ref api_winsys_x11 X11 Window System * Integration Layer. */ #ifndef _VDPAU_X11_H #define _VDPAU_X11_H #include #include "vdpau.h" #ifdef __cplusplus extern "C" { #endif /** * \ingroup api_winsys * @{ */ /** * \defgroup api_winsys_x11 X11 Window System Integration Layer * * The set of VDPAU functionality specific to usage with the X * Window System. * * \section Driver Library Layout * * An X11-oriented VDPAU installation consists of the following * components: * * - Header files. These files are located in the standard * system header file path. * - \c vdpau/vdpau.h * - \c vdpau/vdpau_x11.h * - The VDPAU wrapper library. These files are located in the * standard system (possibly X11-specific) library path. * - \c libvdpau.so.1 (runtime) * - \c libvdpau.so (development) * - Back-end driver files. These files are located in a * system-defined library path, which is configurable at compile * time but is typically /usr/lib/vdpau. Use `pkg-config * --variable=moduledir vdpau` to locate the driver install path. * - \c $moduledir/libvdpau_\%s.so.1 * For example: * - \c /usr/lib/vdpau/libvdpau_nvidia.so.1 * - \c /usr/lib/vdpau/libvdpau_intel.so.1 * - \c /usr/lib/vdpau/libvdpau_ati.so.1 * The library path can be overridden by the VDPAU_DRIVER_PATH * environment variable. * * The VDPAU wrapper library implements just one function; \ref * vdp_device_create_x11. The wrapper implements this function by * dynamically loading the appropriate back-end driver file mentioned * above. When available, the wrapper uses the DRI2 extension's * DRI2Connect request with the driver type 'DRI2DriverVDPAU' to * determine which back-end driver to load. If that fails, the wrapper * library hard-codes the driver name as "nvidia", although this can * be overridden using the environment variable VDPAU_DRIVER. * * The back-end driver is expected to implement a function named * \b vdp_imp_device_create_x11. The wrapper will call this function to * actually implement the \ref vdp_device_create_x11 application call. * * Note that it is theoretically possible for an application to * create multiple \ref VdpDevice "VdpDevice" objects. In this * case, the wrapper library may load multiple back-end drivers * into the same application, and/or invoke a specific back-end * driver's \b VdpImpDeviceCreateX11 multiple times. The wrapper * library imposes no policy regarding whether the application * may instantiate multiple \ref VdpDevice "VdpDevice" objects for * the same display and/or screen. However, back-end drivers are * free to limit the number of \ref VdpDevice "VdpDevice" objects * as required by their implementation. * * @{ */ /** * \brief Create a VdpDevice object for use with X11. * \param[in] display The X Display that the VdpDevice VdpDevice * will operate against. * \param[in] screen The X screen that the VdpDevice will operate * against. * \param[out] device The new device's handle. * \param[out] get_proc_address The get_proc_address entry point * to use with this device. * \return VdpStatus The completion status of the operation. */ typedef VdpStatus VdpDeviceCreateX11( Display * display, int screen, /* output parameters follow */ VdpDevice * device, VdpGetProcAddress * * get_proc_address ); /** * \brief Create a VdpDevice object for use with X11. * This is an actual symbol of type \ref VdpDeviceCreateX11 * */ VdpDeviceCreateX11 vdp_device_create_x11; /** * \brief Create a VdpPresentationQueueTarget for use with X11. * \param[in] device The device that will contain the queue * target. * \param[in] drawable The X11 Drawable that the presentation * queue will present into. * \param[out] target The new queue target's handle. * \return VdpStatus The completion status of the operation. * * Note: VDPAU expects to own the entire drawable for the duration of time * that the presentation queue target exists. In particular, * implementations may choose to manipulate client-visible X11 window state * as required. As such, it is recommended that applications create a * dedicated window for the presentation queue target, as a child * (grand-child, ...) of their top-level application window. * * Applications may also create child-windows of the presentation queue * target, which will cover any presented video in the normal fashion. VDPAU * implementations will not manipulate such child windows in any fashion. */ typedef VdpStatus VdpPresentationQueueTargetCreateX11( VdpDevice device, Drawable drawable, /* output parameters follow */ VdpPresentationQueueTarget * target ); /** \hideinitializer */ #define VDP_FUNC_ID_PRESENTATION_QUEUE_TARGET_CREATE_X11 (VdpFuncId)(VDP_FUNC_ID_BASE_WINSYS + 0) /*@}*/ /*@}*/ #ifdef __cplusplus } #endif #endif libvdpau-va-gl-0.4.2/CMakeLists.txt000066400000000000000000000023051277566164500171120ustar00rootroot00000000000000project (libvdpau-va-gl) cmake_minimum_required (VERSION 2.8) add_definitions(-Wall -fvisibility=hidden -fPIC) set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -pthread -std=gnu99") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -pthread -std=c++11") find_package(PkgConfig REQUIRED) find_package(X11 REQUIRED) pkg_check_modules(LIBVA libva-x11 REQUIRED) pkg_check_modules(LIBGL gl REQUIRED) set(DRIVER_NAME "vdpau_va_gl" CACHE STRING "driver name") set(LIB_SUFFIX "" CACHE STRING "library path suffix (if needed)") set(LIB_INSTALL_DIR "${CMAKE_INSTALL_PREFIX}/lib${LIB_SUFFIX}/vdpau" CACHE PATH "library installation path") include_directories ( 3rdparty ${X11_INCLUDE_DIRS} ${LIBVA_INCLUDE_DIRS} ${LIBGL_INCLUDE_DIRS} ${GENERATED_INCLUDE_DIRS} ${CMAKE_BINARY_DIR} ) # filter public symbols set(SYMBOLMAP "-Wl,-version-script=\"${CMAKE_SOURCE_DIR}/src/symbolmap\"") add_custom_target(check COMMAND ${CMAKE_CTEST_COMMAND}) add_custom_target(build-tests) add_dependencies(check build-tests) add_subdirectory(glsl) enable_testing() add_subdirectory(tests) # put compiled library to the build directory root set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}) add_subdirectory(src) libvdpau-va-gl-0.4.2/ChangeLog000066400000000000000000000050551277566164500161310ustar00rootroot000000000000002016-06-19 Rinat Ibragimov * tag: v0.4.0 * deps: convert code to use C++11 * deps: drop libswscale and GLib dependencies * core: include copy of VDPAU headers * core: drop own tracing code in favor of libvdpau's * core: fix VLC video scaling 2016-03-19 Rinat Ibragimov * license: change to the MIT license 2016-03-02 Rinat Ibragimov * tag v0.3.6 * core: handle Constrained Baseline profile for H.264 * misc: bugfixes 2016-02-21 Rinat Ibragimov * tag: v0.3.5 * misc: bugfixes * misc: compatibility with some other ffmpeg versions 2014-01-19 Rinat Ibragimov * tag: v0.3.0 * core: avoid NULL dereference on proprietary drivers * core: minor refactoring 2014-01-13 Rinat Ibragimov * core: mitigate memleaks on glx contexts by using own vaCopySurfaceGLX implementation 2014-01-11 Rinat Ibragimov * doc: add known-issues.md * core: use glsl shaders for colorspace conversion and texture components reordering * core: split large source code file into smaller pieces relevant to various parts of API * tests: use static linking with core code. `make check` now checks current source, not current VDPAU driver. * core: render to pixmap with bitblitting afterwards * core: mitigate GLX context leaks from per thread context pool * core: fix some concurrency bugs 2013-11-15 Rinat Ibragimov * tag: v0.2.1 * core: fix some NULL dereferences, memcpy warning, deadlock in presentation thread * core: implement rotation and coloring in surface rendering functions * core: reuse render_target entries (VdpDecoder) 2013-09-21 Rinat Ibragimov * core: start splitting vdpau-soft.c into smaller pieces * core: use separate thread for presentation queue * core: drop global locking, try to use fine grained locking instead 2013-07-06 Rinat Ibragimov * tag: v0.1.0 * core: remove miscellaneous unused code * core: add basic documentation on structures * core: fix build system to simplify further packaging * core: enable compiling against new libva 1.2.1 release 2013-05-14 Rinat Ibragimov * libvdpau-va-gl use common GL context pool across several vdp devices. Shrinks memory overhead from ~100% to ~50% * libvdpau-va-gl add new quirk named AvoidVA, which disables using VA-API even on systems that have it * libvdpau-va-gl start writing changes to ChangeLog libvdpau-va-gl-0.4.2/Doxyfile000066400000000000000000002251111277566164500160620ustar00rootroot00000000000000# Doxyfile 1.8.1.2 # This file describes the settings to be used by the documentation system # doxygen (www.doxygen.org) for a project. # # All text after a hash (#) is considered a comment and will be ignored. # The format is: # TAG = value [value, ...] # For lists items can also be appended using: # TAG += value [value, ...] # Values that contain spaces should be placed between quotes (" "). #--------------------------------------------------------------------------- # Project related configuration options #--------------------------------------------------------------------------- # This tag specifies the encoding used for all characters in the config file # that follow. The default is UTF-8 which is also the encoding used for all # text before the first occurrence of this tag. Doxygen uses libiconv (or the # iconv built into libc) for the transcoding. See # http://www.gnu.org/software/libiconv for the list of possible encodings. DOXYFILE_ENCODING = UTF-8 # The PROJECT_NAME tag is a single word (or sequence of words) that should # identify the project. Note that if you do not use Doxywizard you need # to put quotes around the project name if it contains spaces. PROJECT_NAME = "libvdpau-va-gl" # The PROJECT_NUMBER tag can be used to enter a project or revision number. # This could be handy for archiving the generated documentation or # if some version control system is used. PROJECT_NUMBER = # Using the PROJECT_BRIEF tag one can provide an optional one line description # for a project that appears at the top of each page and should give viewer # a quick idea about the purpose of the project. Keep the description short. PROJECT_BRIEF = "VDPAU driver with OpenGL/VA-API backend" # With the PROJECT_LOGO tag one can specify an logo or icon that is # included in the documentation. The maximum height of the logo should not # exceed 55 pixels and the maximum width should not exceed 200 pixels. # Doxygen will copy the logo to the output directory. PROJECT_LOGO = # The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) # base path where the generated documentation will be put. # If a relative path is entered, it will be relative to the location # where doxygen was started. If left blank the current directory will be used. OUTPUT_DIRECTORY = # If the CREATE_SUBDIRS tag is set to YES, then doxygen will create # 4096 sub-directories (in 2 levels) under the output directory of each output # format and will distribute the generated files over these directories. # Enabling this option can be useful when feeding doxygen a huge amount of # source files, where putting all generated files in the same directory would # otherwise cause performance problems for the file system. CREATE_SUBDIRS = NO # The OUTPUT_LANGUAGE tag is used to specify the language in which all # documentation generated by doxygen is written. Doxygen will use this # information to generate all constant output in the proper language. # The default language is English, other supported languages are: # Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-Traditional, # Croatian, Czech, Danish, Dutch, Esperanto, Farsi, Finnish, French, German, # Greek, Hungarian, Italian, Japanese, Japanese-en (Japanese with English # messages), Korean, Korean-en, Lithuanian, Norwegian, Macedonian, Persian, # Polish, Portuguese, Romanian, Russian, Serbian, Serbian-Cyrillic, Slovak, # Slovene, Spanish, Swedish, Ukrainian, and Vietnamese. OUTPUT_LANGUAGE = English # If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will # include brief member descriptions after the members that are listed in # the file and class documentation (similar to JavaDoc). # Set to NO to disable this. BRIEF_MEMBER_DESC = YES # If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend # the brief description of a member or function before the detailed description. # Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the # brief descriptions will be completely suppressed. REPEAT_BRIEF = YES # This tag implements a quasi-intelligent brief description abbreviator # that is used to form the text in various listings. Each string # in this list, if found as the leading text of the brief description, will be # stripped from the text and the result after processing the whole list, is # used as the annotated text. Otherwise, the brief description is used as-is. # If left blank, the following values are used ("$name" is automatically # replaced with the name of the entity): "The $name class" "The $name widget" # "The $name file" "is" "provides" "specifies" "contains" # "represents" "a" "an" "the" ABBREVIATE_BRIEF = # If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then # Doxygen will generate a detailed section even if there is only a brief # description. ALWAYS_DETAILED_SEC = NO # If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all # inherited members of a class in the documentation of that class as if those # members were ordinary class members. Constructors, destructors and assignment # operators of the base classes will not be shown. INLINE_INHERITED_MEMB = NO # If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full # path before files name in the file list and in the header files. If set # to NO the shortest path that makes the file name unique will be used. FULL_PATH_NAMES = YES # If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag # can be used to strip a user-defined part of the path. Stripping is # only done if one of the specified strings matches the left-hand part of # the path. The tag can be used to show relative paths in the file list. # If left blank the directory from which doxygen is run is used as the # path to strip. STRIP_FROM_PATH = # The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of # the path mentioned in the documentation of a class, which tells # the reader which header file to include in order to use a class. # If left blank only the name of the header file containing the class # definition is used. Otherwise one should specify the include paths that # are normally passed to the compiler using the -I flag. STRIP_FROM_INC_PATH = # If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter # (but less readable) file names. This can be useful if your file system # doesn't support long names like on DOS, Mac, or CD-ROM. SHORT_NAMES = NO # If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen # will interpret the first line (until the first dot) of a JavaDoc-style # comment as the brief description. If set to NO, the JavaDoc # comments will behave just like regular Qt-style comments # (thus requiring an explicit @brief command for a brief description.) JAVADOC_AUTOBRIEF = NO # If the QT_AUTOBRIEF tag is set to YES then Doxygen will # interpret the first line (until the first dot) of a Qt-style # comment as the brief description. If set to NO, the comments # will behave just like regular Qt-style comments (thus requiring # an explicit \brief command for a brief description.) QT_AUTOBRIEF = NO # The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen # treat a multi-line C++ special comment block (i.e. a block of //! or /// # comments) as a brief description. This used to be the default behaviour. # The new default is to treat a multi-line C++ comment block as a detailed # description. Set this tag to YES if you prefer the old behaviour instead. MULTILINE_CPP_IS_BRIEF = NO # If the INHERIT_DOCS tag is set to YES (the default) then an undocumented # member inherits the documentation from any documented member that it # re-implements. INHERIT_DOCS = YES # If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce # a new page for each member. If set to NO, the documentation of a member will # be part of the file/class/namespace that contains it. SEPARATE_MEMBER_PAGES = NO # The TAB_SIZE tag can be used to set the number of spaces in a tab. # Doxygen uses this value to replace tabs by spaces in code fragments. TAB_SIZE = 8 # This tag can be used to specify a number of aliases that acts # as commands in the documentation. An alias has the form "name=value". # For example adding "sideeffect=\par Side Effects:\n" will allow you to # put the command \sideeffect (or @sideeffect) in the documentation, which # will result in a user-defined paragraph with heading "Side Effects:". # You can put \n's in the value part of an alias to insert newlines. ALIASES = # This tag can be used to specify a number of word-keyword mappings (TCL only). # A mapping has the form "name=value". For example adding # "class=itcl::class" will allow you to use the command class in the # itcl::class meaning. TCL_SUBST = # Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C # sources only. Doxygen will then generate output that is more tailored for C. # For instance, some of the names that are used will be different. The list # of all members will be omitted, etc. OPTIMIZE_OUTPUT_FOR_C = NO # Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java # sources only. Doxygen will then generate output that is more tailored for # Java. For instance, namespaces will be presented as packages, qualified # scopes will look different, etc. OPTIMIZE_OUTPUT_JAVA = NO # Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran # sources only. Doxygen will then generate output that is more tailored for # Fortran. OPTIMIZE_FOR_FORTRAN = NO # Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL # sources. Doxygen will then generate output that is tailored for # VHDL. OPTIMIZE_OUTPUT_VHDL = NO # Doxygen selects the parser to use depending on the extension of the files it # parses. With this tag you can assign which parser to use for a given extension. # Doxygen has a built-in mapping, but you can override or extend it using this # tag. The format is ext=language, where ext is a file extension, and language # is one of the parsers supported by doxygen: IDL, Java, Javascript, CSharp, C, # C++, D, PHP, Objective-C, Python, Fortran, VHDL, C, C++. For instance to make # doxygen treat .inc files as Fortran files (default is PHP), and .f files as C # (default is Fortran), use: inc=Fortran f=C. Note that for custom extensions # you also need to set FILE_PATTERNS otherwise the files are not read by doxygen. EXTENSION_MAPPING = # If MARKDOWN_SUPPORT is enabled (the default) then doxygen pre-processes all # comments according to the Markdown format, which allows for more readable # documentation. See http://daringfireball.net/projects/markdown/ for details. # The output of markdown processing is further processed by doxygen, so you # can mix doxygen, HTML, and XML commands with Markdown formatting. # Disable only in case of backward compatibilities issues. MARKDOWN_SUPPORT = YES # If you use STL classes (i.e. std::string, std::vector, etc.) but do not want # to include (a tag file for) the STL sources as input, then you should # set this tag to YES in order to let doxygen match functions declarations and # definitions whose arguments contain STL classes (e.g. func(std::string); v.s. # func(std::string) {}). This also makes the inheritance and collaboration # diagrams that involve STL classes more complete and accurate. BUILTIN_STL_SUPPORT = NO # If you use Microsoft's C++/CLI language, you should set this option to YES to # enable parsing support. CPP_CLI_SUPPORT = NO # Set the SIP_SUPPORT tag to YES if your project consists of sip sources only. # Doxygen will parse them like normal C++ but will assume all classes use public # instead of private inheritance when no explicit protection keyword is present. SIP_SUPPORT = NO # For Microsoft's IDL there are propget and propput attributes to indicate getter # and setter methods for a property. Setting this option to YES (the default) # will make doxygen replace the get and set methods by a property in the # documentation. This will only work if the methods are indeed getting or # setting a simple type. If this is not the case, or you want to show the # methods anyway, you should set this option to NO. IDL_PROPERTY_SUPPORT = YES # If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC # tag is set to YES, then doxygen will reuse the documentation of the first # member in the group (if any) for the other members of the group. By default # all members of a group must be documented explicitly. DISTRIBUTE_GROUP_DOC = NO # Set the SUBGROUPING tag to YES (the default) to allow class member groups of # the same type (for instance a group of public functions) to be put as a # subgroup of that type (e.g. under the Public Functions section). Set it to # NO to prevent subgrouping. Alternatively, this can be done per class using # the \nosubgrouping command. SUBGROUPING = YES # When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and # unions are shown inside the group in which they are included (e.g. using # @ingroup) instead of on a separate page (for HTML and Man pages) or # section (for LaTeX and RTF). INLINE_GROUPED_CLASSES = NO # When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and # unions with only public data fields will be shown inline in the documentation # of the scope in which they are defined (i.e. file, namespace, or group # documentation), provided this scope is documented. If set to NO (the default), # structs, classes, and unions are shown on a separate page (for HTML and Man # pages) or section (for LaTeX and RTF). INLINE_SIMPLE_STRUCTS = NO # When TYPEDEF_HIDES_STRUCT is enabled, a typedef of a struct, union, or enum # is documented as struct, union, or enum with the name of the typedef. So # typedef struct TypeS {} TypeT, will appear in the documentation as a struct # with name TypeT. When disabled the typedef will appear as a member of a file, # namespace, or class. And the struct will be named TypeS. This can typically # be useful for C code in case the coding convention dictates that all compound # types are typedef'ed and only the typedef is referenced, never the tag name. TYPEDEF_HIDES_STRUCT = NO # The SYMBOL_CACHE_SIZE determines the size of the internal cache use to # determine which symbols to keep in memory and which to flush to disk. # When the cache is full, less often used symbols will be written to disk. # For small to medium size projects (<1000 input files) the default value is # probably good enough. For larger projects a too small cache size can cause # doxygen to be busy swapping symbols to and from disk most of the time # causing a significant performance penalty. # If the system has enough physical memory increasing the cache will improve the # performance by keeping more symbols in memory. Note that the value works on # a logarithmic scale so increasing the size by one will roughly double the # memory usage. The cache size is given by this formula: # 2^(16+SYMBOL_CACHE_SIZE). The valid range is 0..9, the default is 0, # corresponding to a cache size of 2^16 = 65536 symbols. SYMBOL_CACHE_SIZE = 0 # Similar to the SYMBOL_CACHE_SIZE the size of the symbol lookup cache can be # set using LOOKUP_CACHE_SIZE. This cache is used to resolve symbols given # their name and scope. Since this can be an expensive process and often the # same symbol appear multiple times in the code, doxygen keeps a cache of # pre-resolved symbols. If the cache is too small doxygen will become slower. # If the cache is too large, memory is wasted. The cache size is given by this # formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range is 0..9, the default is 0, # corresponding to a cache size of 2^16 = 65536 symbols. LOOKUP_CACHE_SIZE = 0 #--------------------------------------------------------------------------- # Build related configuration options #--------------------------------------------------------------------------- # If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in # documentation are documented, even if no documentation was available. # Private class members and static file members will be hidden unless # the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES EXTRACT_ALL = NO # If the EXTRACT_PRIVATE tag is set to YES all private members of a class # will be included in the documentation. EXTRACT_PRIVATE = NO # If the EXTRACT_PACKAGE tag is set to YES all members with package or internal scope will be included in the documentation. EXTRACT_PACKAGE = NO # If the EXTRACT_STATIC tag is set to YES all static members of a file # will be included in the documentation. EXTRACT_STATIC = NO # If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) # defined locally in source files will be included in the documentation. # If set to NO only classes defined in header files are included. EXTRACT_LOCAL_CLASSES = YES # This flag is only useful for Objective-C code. When set to YES local # methods, which are defined in the implementation section but not in # the interface are included in the documentation. # If set to NO (the default) only methods in the interface are included. EXTRACT_LOCAL_METHODS = NO # If this flag is set to YES, the members of anonymous namespaces will be # extracted and appear in the documentation as a namespace called # 'anonymous_namespace{file}', where file will be replaced with the base # name of the file that contains the anonymous namespace. By default # anonymous namespaces are hidden. EXTRACT_ANON_NSPACES = NO # If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all # undocumented members of documented classes, files or namespaces. # If set to NO (the default) these members will be included in the # various overviews, but no documentation section is generated. # This option has no effect if EXTRACT_ALL is enabled. HIDE_UNDOC_MEMBERS = NO # If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all # undocumented classes that are normally visible in the class hierarchy. # If set to NO (the default) these classes will be included in the various # overviews. This option has no effect if EXTRACT_ALL is enabled. HIDE_UNDOC_CLASSES = NO # If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all # friend (class|struct|union) declarations. # If set to NO (the default) these declarations will be included in the # documentation. HIDE_FRIEND_COMPOUNDS = NO # If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any # documentation blocks found inside the body of a function. # If set to NO (the default) these blocks will be appended to the # function's detailed documentation block. HIDE_IN_BODY_DOCS = NO # The INTERNAL_DOCS tag determines if documentation # that is typed after a \internal command is included. If the tag is set # to NO (the default) then the documentation will be excluded. # Set it to YES to include the internal documentation. INTERNAL_DOCS = NO # If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate # file names in lower-case letters. If set to YES upper-case letters are also # allowed. This is useful if you have classes or files whose names only differ # in case and if your file system supports case sensitive file names. Windows # and Mac users are advised to set this option to NO. CASE_SENSE_NAMES = YES # If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen # will show members with their full class and namespace scopes in the # documentation. If set to YES the scope will be hidden. HIDE_SCOPE_NAMES = NO # If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen # will put a list of the files that are included by a file in the documentation # of that file. SHOW_INCLUDE_FILES = YES # If the FORCE_LOCAL_INCLUDES tag is set to YES then Doxygen # will list include files with double quotes in the documentation # rather than with sharp brackets. FORCE_LOCAL_INCLUDES = NO # If the INLINE_INFO tag is set to YES (the default) then a tag [inline] # is inserted in the documentation for inline members. INLINE_INFO = YES # If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen # will sort the (detailed) documentation of file and class members # alphabetically by member name. If set to NO the members will appear in # declaration order. SORT_MEMBER_DOCS = YES # If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the # brief documentation of file, namespace and class members alphabetically # by member name. If set to NO (the default) the members will appear in # declaration order. SORT_BRIEF_DOCS = NO # If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen # will sort the (brief and detailed) documentation of class members so that # constructors and destructors are listed first. If set to NO (the default) # the constructors will appear in the respective orders defined by # SORT_MEMBER_DOCS and SORT_BRIEF_DOCS. # This tag will be ignored for brief docs if SORT_BRIEF_DOCS is set to NO # and ignored for detailed docs if SORT_MEMBER_DOCS is set to NO. SORT_MEMBERS_CTORS_1ST = NO # If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the # hierarchy of group names into alphabetical order. If set to NO (the default) # the group names will appear in their defined order. SORT_GROUP_NAMES = NO # If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be # sorted by fully-qualified names, including namespaces. If set to # NO (the default), the class list will be sorted only by class name, # not including the namespace part. # Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. # Note: This option applies only to the class list, not to the # alphabetical list. SORT_BY_SCOPE_NAME = NO # If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to # do proper type resolution of all parameters of a function it will reject a # match between the prototype and the implementation of a member function even # if there is only one candidate or it is obvious which candidate to choose # by doing a simple string match. By disabling STRICT_PROTO_MATCHING doxygen # will still accept a match between prototype and implementation in such cases. STRICT_PROTO_MATCHING = NO # The GENERATE_TODOLIST tag can be used to enable (YES) or # disable (NO) the todo list. This list is created by putting \todo # commands in the documentation. GENERATE_TODOLIST = YES # The GENERATE_TESTLIST tag can be used to enable (YES) or # disable (NO) the test list. This list is created by putting \test # commands in the documentation. GENERATE_TESTLIST = YES # The GENERATE_BUGLIST tag can be used to enable (YES) or # disable (NO) the bug list. This list is created by putting \bug # commands in the documentation. GENERATE_BUGLIST = YES # The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or # disable (NO) the deprecated list. This list is created by putting # \deprecated commands in the documentation. GENERATE_DEPRECATEDLIST= YES # The ENABLED_SECTIONS tag can be used to enable conditional # documentation sections, marked by \if sectionname ... \endif. ENABLED_SECTIONS = # The MAX_INITIALIZER_LINES tag determines the maximum number of lines # the initial value of a variable or macro consists of for it to appear in # the documentation. If the initializer consists of more lines than specified # here it will be hidden. Use a value of 0 to hide initializers completely. # The appearance of the initializer of individual variables and macros in the # documentation can be controlled using \showinitializer or \hideinitializer # command in the documentation regardless of this setting. MAX_INITIALIZER_LINES = 30 # Set the SHOW_USED_FILES tag to NO to disable the list of files generated # at the bottom of the documentation of classes and structs. If set to YES the # list will mention the files that were used to generate the documentation. SHOW_USED_FILES = YES # Set the SHOW_FILES tag to NO to disable the generation of the Files page. # This will remove the Files entry from the Quick Index and from the # Folder Tree View (if specified). The default is YES. SHOW_FILES = YES # Set the SHOW_NAMESPACES tag to NO to disable the generation of the # Namespaces page. # This will remove the Namespaces entry from the Quick Index # and from the Folder Tree View (if specified). The default is YES. SHOW_NAMESPACES = YES # The FILE_VERSION_FILTER tag can be used to specify a program or script that # doxygen should invoke to get the current version for each file (typically from # the version control system). Doxygen will invoke the program by executing (via # popen()) the command , where is the value of # the FILE_VERSION_FILTER tag, and is the name of an input file # provided by doxygen. Whatever the program writes to standard output # is used as the file version. See the manual for examples. FILE_VERSION_FILTER = # The LAYOUT_FILE tag can be used to specify a layout file which will be parsed # by doxygen. The layout file controls the global structure of the generated # output files in an output format independent way. To create the layout file # that represents doxygen's defaults, run doxygen with the -l option. # You can optionally specify a file name after the option, if omitted # DoxygenLayout.xml will be used as the name of the layout file. LAYOUT_FILE = # The CITE_BIB_FILES tag can be used to specify one or more bib files # containing the references data. This must be a list of .bib files. The # .bib extension is automatically appended if omitted. Using this command # requires the bibtex tool to be installed. See also # http://en.wikipedia.org/wiki/BibTeX for more info. For LaTeX the style # of the bibliography can be controlled using LATEX_BIB_STYLE. To use this # feature you need bibtex and perl available in the search path. CITE_BIB_FILES = #--------------------------------------------------------------------------- # configuration options related to warning and progress messages #--------------------------------------------------------------------------- # The QUIET tag can be used to turn on/off the messages that are generated # by doxygen. Possible values are YES and NO. If left blank NO is used. QUIET = NO # The WARNINGS tag can be used to turn on/off the warning messages that are # generated by doxygen. Possible values are YES and NO. If left blank # NO is used. WARNINGS = YES # If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings # for undocumented members. If EXTRACT_ALL is set to YES then this flag will # automatically be disabled. WARN_IF_UNDOCUMENTED = YES # If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for # potential errors in the documentation, such as not documenting some # parameters in a documented function, or documenting parameters that # don't exist or using markup commands wrongly. WARN_IF_DOC_ERROR = YES # The WARN_NO_PARAMDOC option can be enabled to get warnings for # functions that are documented, but have no documentation for their parameters # or return value. If set to NO (the default) doxygen will only warn about # wrong or incomplete parameter documentation, but not about the absence of # documentation. WARN_NO_PARAMDOC = NO # The WARN_FORMAT tag determines the format of the warning messages that # doxygen can produce. The string should contain the $file, $line, and $text # tags, which will be replaced by the file and line number from which the # warning originated and the warning text. Optionally the format may contain # $version, which will be replaced by the version of the file (if it could # be obtained via FILE_VERSION_FILTER) WARN_FORMAT = "$file:$line: $text" # The WARN_LOGFILE tag can be used to specify a file to which warning # and error messages should be written. If left blank the output is written # to stderr. WARN_LOGFILE = #--------------------------------------------------------------------------- # configuration options related to the input files #--------------------------------------------------------------------------- # The INPUT tag can be used to specify the files and/or directories that contain # documented source files. You may enter file names like "myfile.cpp" or # directories like "/usr/src/myproject". Separate the files or directories # with spaces. INPUT = # This tag can be used to specify the character encoding of the source files # that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is # also the default input encoding. Doxygen uses libiconv (or the iconv built # into libc) for the transcoding. See http://www.gnu.org/software/libiconv for # the list of possible encodings. INPUT_ENCODING = UTF-8 # If the value of the INPUT tag contains directories, you can use the # FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp # and *.h) to filter out the source-files in the directories. If left # blank the following patterns are tested: # *.c *.cc *.cxx *.cpp *.c++ *.d *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh # *.hxx *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.dox *.py # *.f90 *.f *.for *.vhd *.vhdl FILE_PATTERNS = # The RECURSIVE tag can be used to turn specify whether or not subdirectories # should be searched for input files as well. Possible values are YES and NO. # If left blank NO is used. RECURSIVE = NO # The EXCLUDE tag can be used to specify files and/or directories that should be # excluded from the INPUT source files. This way you can easily exclude a # subdirectory from a directory tree whose root is specified with the INPUT tag. # Note that relative paths are relative to the directory from which doxygen is # run. EXCLUDE = # The EXCLUDE_SYMLINKS tag can be used to select whether or not files or # directories that are symbolic links (a Unix file system feature) are excluded # from the input. EXCLUDE_SYMLINKS = NO # If the value of the INPUT tag contains directories, you can use the # EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude # certain files from those directories. Note that the wildcards are matched # against the file with absolute path, so to exclude all test directories # for example use the pattern */test/* EXCLUDE_PATTERNS = # The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names # (namespaces, classes, functions, etc.) that should be excluded from the # output. The symbol name can be a fully qualified name, a word, or if the # wildcard * is used, a substring. Examples: ANamespace, AClass, # AClass::ANamespace, ANamespace::*Test EXCLUDE_SYMBOLS = # The EXAMPLE_PATH tag can be used to specify one or more files or # directories that contain example code fragments that are included (see # the \include command). EXAMPLE_PATH = # If the value of the EXAMPLE_PATH tag contains directories, you can use the # EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp # and *.h) to filter out the source-files in the directories. If left # blank all files are included. EXAMPLE_PATTERNS = # If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be # searched for input files to be used with the \include or \dontinclude # commands irrespective of the value of the RECURSIVE tag. # Possible values are YES and NO. If left blank NO is used. EXAMPLE_RECURSIVE = NO # The IMAGE_PATH tag can be used to specify one or more files or # directories that contain image that are included in the documentation (see # the \image command). IMAGE_PATH = # The INPUT_FILTER tag can be used to specify a program that doxygen should # invoke to filter for each input file. Doxygen will invoke the filter program # by executing (via popen()) the command , where # is the value of the INPUT_FILTER tag, and is the name of an # input file. Doxygen will then use the output that the filter program writes # to standard output. # If FILTER_PATTERNS is specified, this tag will be # ignored. INPUT_FILTER = # The FILTER_PATTERNS tag can be used to specify filters on a per file pattern # basis. # Doxygen will compare the file name with each pattern and apply the # filter if there is a match. # The filters are a list of the form: # pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further # info on how filters are used. If FILTER_PATTERNS is empty or if # non of the patterns match the file name, INPUT_FILTER is applied. FILTER_PATTERNS = # If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using # INPUT_FILTER) will be used to filter the input files when producing source # files to browse (i.e. when SOURCE_BROWSER is set to YES). FILTER_SOURCE_FILES = NO # The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file # pattern. A pattern will override the setting for FILTER_PATTERN (if any) # and it is also possible to disable source filtering for a specific pattern # using *.ext= (so without naming a filter). This option only has effect when # FILTER_SOURCE_FILES is enabled. FILTER_SOURCE_PATTERNS = #--------------------------------------------------------------------------- # configuration options related to source browsing #--------------------------------------------------------------------------- # If the SOURCE_BROWSER tag is set to YES then a list of source files will # be generated. Documented entities will be cross-referenced with these sources. # Note: To get rid of all source code in the generated output, make sure also # VERBATIM_HEADERS is set to NO. SOURCE_BROWSER = NO # Setting the INLINE_SOURCES tag to YES will include the body # of functions and classes directly in the documentation. INLINE_SOURCES = NO # Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct # doxygen to hide any special comment blocks from generated source code # fragments. Normal C, C++ and Fortran comments will always remain visible. STRIP_CODE_COMMENTS = YES # If the REFERENCED_BY_RELATION tag is set to YES # then for each documented function all documented # functions referencing it will be listed. REFERENCED_BY_RELATION = NO # If the REFERENCES_RELATION tag is set to YES # then for each documented function all documented entities # called/used by that function will be listed. REFERENCES_RELATION = NO # If the REFERENCES_LINK_SOURCE tag is set to YES (the default) # and SOURCE_BROWSER tag is set to YES, then the hyperlinks from # functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will # link to the source code. # Otherwise they will link to the documentation. REFERENCES_LINK_SOURCE = YES # If the USE_HTAGS tag is set to YES then the references to source code # will point to the HTML generated by the htags(1) tool instead of doxygen # built-in source browser. The htags tool is part of GNU's global source # tagging system (see http://www.gnu.org/software/global/global.html). You # will need version 4.8.6 or higher. USE_HTAGS = NO # If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen # will generate a verbatim copy of the header file for each class for # which an include is specified. Set to NO to disable this. VERBATIM_HEADERS = YES #--------------------------------------------------------------------------- # configuration options related to the alphabetical class index #--------------------------------------------------------------------------- # If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index # of all compounds will be generated. Enable this if the project # contains a lot of classes, structs, unions or interfaces. ALPHABETICAL_INDEX = YES # If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then # the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns # in which this list will be split (can be a number in the range [1..20]) COLS_IN_ALPHA_INDEX = 5 # In case all classes in a project start with a common prefix, all # classes will be put under the same header in the alphabetical index. # The IGNORE_PREFIX tag can be used to specify one or more prefixes that # should be ignored while generating the index headers. IGNORE_PREFIX = #--------------------------------------------------------------------------- # configuration options related to the HTML output #--------------------------------------------------------------------------- # If the GENERATE_HTML tag is set to YES (the default) Doxygen will # generate HTML output. GENERATE_HTML = YES # The HTML_OUTPUT tag is used to specify where the HTML docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `html' will be used as the default path. HTML_OUTPUT = html # The HTML_FILE_EXTENSION tag can be used to specify the file extension for # each generated HTML page (for example: .htm,.php,.asp). If it is left blank # doxygen will generate files with .html extension. HTML_FILE_EXTENSION = .html # The HTML_HEADER tag can be used to specify a personal HTML header for # each generated HTML page. If it is left blank doxygen will generate a # standard header. Note that when using a custom header you are responsible # for the proper inclusion of any scripts and style sheets that doxygen # needs, which is dependent on the configuration options used. # It is advised to generate a default header using "doxygen -w html # header.html footer.html stylesheet.css YourConfigFile" and then modify # that header. Note that the header is subject to change so you typically # have to redo this when upgrading to a newer version of doxygen or when # changing the value of configuration settings such as GENERATE_TREEVIEW! HTML_HEADER = # The HTML_FOOTER tag can be used to specify a personal HTML footer for # each generated HTML page. If it is left blank doxygen will generate a # standard footer. HTML_FOOTER = # The HTML_STYLESHEET tag can be used to specify a user-defined cascading # style sheet that is used by each HTML page. It can be used to # fine-tune the look of the HTML output. If the tag is left blank doxygen # will generate a default style sheet. Note that doxygen will try to copy # the style sheet file to the HTML output directory, so don't put your own # style sheet in the HTML output directory as well, or it will be erased! HTML_STYLESHEET = # The HTML_EXTRA_FILES tag can be used to specify one or more extra images or # other source files which should be copied to the HTML output directory. Note # that these files will be copied to the base HTML output directory. Use the # $relpath$ marker in the HTML_HEADER and/or HTML_FOOTER files to load these # files. In the HTML_STYLESHEET file, use the file name only. Also note that # the files will be copied as-is; there are no commands or markers available. HTML_EXTRA_FILES = # The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. # Doxygen will adjust the colors in the style sheet and background images # according to this color. Hue is specified as an angle on a colorwheel, # see http://en.wikipedia.org/wiki/Hue for more information. # For instance the value 0 represents red, 60 is yellow, 120 is green, # 180 is cyan, 240 is blue, 300 purple, and 360 is red again. # The allowed range is 0 to 359. HTML_COLORSTYLE_HUE = 220 # The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of # the colors in the HTML output. For a value of 0 the output will use # grayscales only. A value of 255 will produce the most vivid colors. HTML_COLORSTYLE_SAT = 100 # The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to # the luminance component of the colors in the HTML output. Values below # 100 gradually make the output lighter, whereas values above 100 make # the output darker. The value divided by 100 is the actual gamma applied, # so 80 represents a gamma of 0.8, The value 220 represents a gamma of 2.2, # and 100 does not change the gamma. HTML_COLORSTYLE_GAMMA = 80 # If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML # page will contain the date and time when the page was generated. Setting # this to NO can help when comparing the output of multiple runs. HTML_TIMESTAMP = YES # If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML # documentation will contain sections that can be hidden and shown after the # page has loaded. HTML_DYNAMIC_SECTIONS = NO # With HTML_INDEX_NUM_ENTRIES one can control the preferred number of # entries shown in the various tree structured indices initially; the user # can expand and collapse entries dynamically later on. Doxygen will expand # the tree to such a level that at most the specified number of entries are # visible (unless a fully collapsed tree already exceeds this amount). # So setting the number of entries 1 will produce a full collapsed tree by # default. 0 is a special value representing an infinite number of entries # and will result in a full expanded tree by default. HTML_INDEX_NUM_ENTRIES = 100 # If the GENERATE_DOCSET tag is set to YES, additional index files # will be generated that can be used as input for Apple's Xcode 3 # integrated development environment, introduced with OSX 10.5 (Leopard). # To create a documentation set, doxygen will generate a Makefile in the # HTML output directory. Running make will produce the docset in that # directory and running "make install" will install the docset in # ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find # it at startup. # See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html # for more information. GENERATE_DOCSET = NO # When GENERATE_DOCSET tag is set to YES, this tag determines the name of the # feed. A documentation feed provides an umbrella under which multiple # documentation sets from a single provider (such as a company or product suite) # can be grouped. DOCSET_FEEDNAME = "Doxygen generated docs" # When GENERATE_DOCSET tag is set to YES, this tag specifies a string that # should uniquely identify the documentation set bundle. This should be a # reverse domain-name style string, e.g. com.mycompany.MyDocSet. Doxygen # will append .docset to the name. DOCSET_BUNDLE_ID = org.doxygen.Project # When GENERATE_PUBLISHER_ID tag specifies a string that should uniquely identify # the documentation publisher. This should be a reverse domain-name style # string, e.g. com.mycompany.MyDocSet.documentation. DOCSET_PUBLISHER_ID = org.doxygen.Publisher # The GENERATE_PUBLISHER_NAME tag identifies the documentation publisher. DOCSET_PUBLISHER_NAME = Publisher # If the GENERATE_HTMLHELP tag is set to YES, additional index files # will be generated that can be used as input for tools like the # Microsoft HTML help workshop to generate a compiled HTML help file (.chm) # of the generated HTML documentation. GENERATE_HTMLHELP = NO # If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can # be used to specify the file name of the resulting .chm file. You # can add a path in front of the file if the result should not be # written to the html output directory. CHM_FILE = # If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can # be used to specify the location (absolute path including file name) of # the HTML help compiler (hhc.exe). If non-empty doxygen will try to run # the HTML help compiler on the generated index.hhp. HHC_LOCATION = # If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag # controls if a separate .chi index file is generated (YES) or that # it should be included in the master .chm file (NO). GENERATE_CHI = NO # If the GENERATE_HTMLHELP tag is set to YES, the CHM_INDEX_ENCODING # is used to encode HtmlHelp index (hhk), content (hhc) and project file # content. CHM_INDEX_ENCODING = # If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag # controls whether a binary table of contents is generated (YES) or a # normal table of contents (NO) in the .chm file. BINARY_TOC = NO # The TOC_EXPAND flag can be set to YES to add extra items for group members # to the contents of the HTML help documentation and to the tree view. TOC_EXPAND = NO # If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and # QHP_VIRTUAL_FOLDER are set, an additional index file will be generated # that can be used as input for Qt's qhelpgenerator to generate a # Qt Compressed Help (.qch) of the generated HTML documentation. GENERATE_QHP = NO # If the QHG_LOCATION tag is specified, the QCH_FILE tag can # be used to specify the file name of the resulting .qch file. # The path specified is relative to the HTML output folder. QCH_FILE = # The QHP_NAMESPACE tag specifies the namespace to use when generating # Qt Help Project output. For more information please see # http://doc.trolltech.com/qthelpproject.html#namespace QHP_NAMESPACE = org.doxygen.Project # The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating # Qt Help Project output. For more information please see # http://doc.trolltech.com/qthelpproject.html#virtual-folders QHP_VIRTUAL_FOLDER = doc # If QHP_CUST_FILTER_NAME is set, it specifies the name of a custom filter to # add. For more information please see # http://doc.trolltech.com/qthelpproject.html#custom-filters QHP_CUST_FILTER_NAME = # The QHP_CUST_FILT_ATTRS tag specifies the list of the attributes of the # custom filter to add. For more information please see # # Qt Help Project / Custom Filters. QHP_CUST_FILTER_ATTRS = # The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this # project's # filter section matches. # # Qt Help Project / Filter Attributes. QHP_SECT_FILTER_ATTRS = # If the GENERATE_QHP tag is set to YES, the QHG_LOCATION tag can # be used to specify the location of Qt's qhelpgenerator. # If non-empty doxygen will try to run qhelpgenerator on the generated # .qhp file. QHG_LOCATION = # If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files # will be generated, which together with the HTML files, form an Eclipse help # plugin. To install this plugin and make it available under the help contents # menu in Eclipse, the contents of the directory containing the HTML and XML # files needs to be copied into the plugins directory of eclipse. The name of # the directory within the plugins directory should be the same as # the ECLIPSE_DOC_ID value. After copying Eclipse needs to be restarted before # the help appears. GENERATE_ECLIPSEHELP = NO # A unique identifier for the eclipse help plugin. When installing the plugin # the directory name containing the HTML and XML files should also have # this name. ECLIPSE_DOC_ID = org.doxygen.Project # The DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) # at top of each HTML page. The value NO (the default) enables the index and # the value YES disables it. Since the tabs have the same information as the # navigation tree you can set this option to NO if you already set # GENERATE_TREEVIEW to YES. DISABLE_INDEX = NO # The GENERATE_TREEVIEW tag is used to specify whether a tree-like index # structure should be generated to display hierarchical information. # If the tag value is set to YES, a side panel will be generated # containing a tree-like index structure (just like the one that # is generated for HTML Help). For this to work a browser that supports # JavaScript, DHTML, CSS and frames is required (i.e. any modern browser). # Windows users are probably better off using the HTML help feature. # Since the tree basically has the same information as the tab index you # could consider to set DISABLE_INDEX to NO when enabling this option. GENERATE_TREEVIEW = NO # The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values # (range [0,1..20]) that doxygen will group on one line in the generated HTML # documentation. Note that a value of 0 will completely suppress the enum # values from appearing in the overview section. ENUM_VALUES_PER_LINE = 4 # If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be # used to set the initial width (in pixels) of the frame in which the tree # is shown. TREEVIEW_WIDTH = 250 # When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open # links to external symbols imported via tag files in a separate window. EXT_LINKS_IN_WINDOW = NO # Use this tag to change the font size of Latex formulas included # as images in the HTML documentation. The default is 10. Note that # when you change the font size after a successful doxygen run you need # to manually remove any form_*.png images from the HTML output directory # to force them to be regenerated. FORMULA_FONTSIZE = 10 # Use the FORMULA_TRANPARENT tag to determine whether or not the images # generated for formulas are transparent PNGs. Transparent PNGs are # not supported properly for IE 6.0, but are supported on all modern browsers. # Note that when changing this option you need to delete any form_*.png files # in the HTML output before the changes have effect. FORMULA_TRANSPARENT = YES # Enable the USE_MATHJAX option to render LaTeX formulas using MathJax # (see http://www.mathjax.org) which uses client side Javascript for the # rendering instead of using prerendered bitmaps. Use this if you do not # have LaTeX installed or if you want to formulas look prettier in the HTML # output. When enabled you may also need to install MathJax separately and # configure the path to it using the MATHJAX_RELPATH option. USE_MATHJAX = NO # When MathJax is enabled you need to specify the location relative to the # HTML output directory using the MATHJAX_RELPATH option. The destination # directory should contain the MathJax.js script. For instance, if the mathjax # directory is located at the same level as the HTML output directory, then # MATHJAX_RELPATH should be ../mathjax. The default value points to # the MathJax Content Delivery Network so you can quickly see the result without # installing MathJax. # However, it is strongly recommended to install a local # copy of MathJax from http://www.mathjax.org before deployment. MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest # The MATHJAX_EXTENSIONS tag can be used to specify one or MathJax extension # names that should be enabled during MathJax rendering. MATHJAX_EXTENSIONS = # When the SEARCHENGINE tag is enabled doxygen will generate a search box # for the HTML output. The underlying search engine uses javascript # and DHTML and should work on any modern browser. Note that when using # HTML help (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets # (GENERATE_DOCSET) there is already a search function so this one should # typically be disabled. For large projects the javascript based search engine # can be slow, then enabling SERVER_BASED_SEARCH may provide a better solution. SEARCHENGINE = YES # When the SERVER_BASED_SEARCH tag is enabled the search engine will be # implemented using a PHP enabled web server instead of at the web client # using Javascript. Doxygen will generate the search PHP script and index # file to put on the web server. The advantage of the server # based approach is that it scales better to large projects and allows # full text search. The disadvantages are that it is more difficult to setup # and does not have live searching capabilities. SERVER_BASED_SEARCH = NO #--------------------------------------------------------------------------- # configuration options related to the LaTeX output #--------------------------------------------------------------------------- # If the GENERATE_LATEX tag is set to YES (the default) Doxygen will # generate Latex output. GENERATE_LATEX = YES # The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `latex' will be used as the default path. LATEX_OUTPUT = latex # The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be # invoked. If left blank `latex' will be used as the default command name. # Note that when enabling USE_PDFLATEX this option is only used for # generating bitmaps for formulas in the HTML output, but not in the # Makefile that is written to the output directory. LATEX_CMD_NAME = latex # The MAKEINDEX_CMD_NAME tag can be used to specify the command name to # generate index for LaTeX. If left blank `makeindex' will be used as the # default command name. MAKEINDEX_CMD_NAME = makeindex # If the COMPACT_LATEX tag is set to YES Doxygen generates more compact # LaTeX documents. This may be useful for small projects and may help to # save some trees in general. COMPACT_LATEX = NO # The PAPER_TYPE tag can be used to set the paper type that is used # by the printer. Possible values are: a4, letter, legal and # executive. If left blank a4wide will be used. PAPER_TYPE = a4 # The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX # packages that should be included in the LaTeX output. EXTRA_PACKAGES = # The LATEX_HEADER tag can be used to specify a personal LaTeX header for # the generated latex document. The header should contain everything until # the first chapter. If it is left blank doxygen will generate a # standard header. Notice: only use this tag if you know what you are doing! LATEX_HEADER = # The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for # the generated latex document. The footer should contain everything after # the last chapter. If it is left blank doxygen will generate a # standard footer. Notice: only use this tag if you know what you are doing! LATEX_FOOTER = # If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated # is prepared for conversion to pdf (using ps2pdf). The pdf file will # contain links (just like the HTML output) instead of page references # This makes the output suitable for online browsing using a pdf viewer. PDF_HYPERLINKS = YES # If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of # plain latex in the generated Makefile. Set this option to YES to get a # higher quality PDF documentation. USE_PDFLATEX = YES # If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode. # command to the generated LaTeX files. This will instruct LaTeX to keep # running if errors occur, instead of asking the user for help. # This option is also used when generating formulas in HTML. LATEX_BATCHMODE = NO # If LATEX_HIDE_INDICES is set to YES then doxygen will not # include the index chapters (such as File Index, Compound Index, etc.) # in the output. LATEX_HIDE_INDICES = NO # If LATEX_SOURCE_CODE is set to YES then doxygen will include # source code with syntax highlighting in the LaTeX output. # Note that which sources are shown also depends on other settings # such as SOURCE_BROWSER. LATEX_SOURCE_CODE = NO # The LATEX_BIB_STYLE tag can be used to specify the style to use for the # bibliography, e.g. plainnat, or ieeetr. The default style is "plain". See # http://en.wikipedia.org/wiki/BibTeX for more info. LATEX_BIB_STYLE = plain #--------------------------------------------------------------------------- # configuration options related to the RTF output #--------------------------------------------------------------------------- # If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output # The RTF output is optimized for Word 97 and may not look very pretty with # other RTF readers or editors. GENERATE_RTF = NO # The RTF_OUTPUT tag is used to specify where the RTF docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `rtf' will be used as the default path. RTF_OUTPUT = rtf # If the COMPACT_RTF tag is set to YES Doxygen generates more compact # RTF documents. This may be useful for small projects and may help to # save some trees in general. COMPACT_RTF = NO # If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated # will contain hyperlink fields. The RTF file will # contain links (just like the HTML output) instead of page references. # This makes the output suitable for online browsing using WORD or other # programs which support those fields. # Note: wordpad (write) and others do not support links. RTF_HYPERLINKS = NO # Load style sheet definitions from file. Syntax is similar to doxygen's # config file, i.e. a series of assignments. You only have to provide # replacements, missing definitions are set to their default value. RTF_STYLESHEET_FILE = # Set optional variables used in the generation of an rtf document. # Syntax is similar to doxygen's config file. RTF_EXTENSIONS_FILE = #--------------------------------------------------------------------------- # configuration options related to the man page output #--------------------------------------------------------------------------- # If the GENERATE_MAN tag is set to YES (the default) Doxygen will # generate man pages GENERATE_MAN = NO # The MAN_OUTPUT tag is used to specify where the man pages will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `man' will be used as the default path. MAN_OUTPUT = man # The MAN_EXTENSION tag determines the extension that is added to # the generated man pages (default is the subroutine's section .3) MAN_EXTENSION = .3 # If the MAN_LINKS tag is set to YES and Doxygen generates man output, # then it will generate one additional man file for each entity # documented in the real man page(s). These additional files # only source the real man page, but without them the man command # would be unable to find the correct page. The default is NO. MAN_LINKS = NO #--------------------------------------------------------------------------- # configuration options related to the XML output #--------------------------------------------------------------------------- # If the GENERATE_XML tag is set to YES Doxygen will # generate an XML file that captures the structure of # the code including all documentation. GENERATE_XML = NO # The XML_OUTPUT tag is used to specify where the XML pages will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `xml' will be used as the default path. XML_OUTPUT = xml # The XML_SCHEMA tag can be used to specify an XML schema, # which can be used by a validating XML parser to check the # syntax of the XML files. XML_SCHEMA = # The XML_DTD tag can be used to specify an XML DTD, # which can be used by a validating XML parser to check the # syntax of the XML files. XML_DTD = # If the XML_PROGRAMLISTING tag is set to YES Doxygen will # dump the program listings (including syntax highlighting # and cross-referencing information) to the XML output. Note that # enabling this will significantly increase the size of the XML output. XML_PROGRAMLISTING = YES #--------------------------------------------------------------------------- # configuration options for the AutoGen Definitions output #--------------------------------------------------------------------------- # If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will # generate an AutoGen Definitions (see autogen.sf.net) file # that captures the structure of the code including all # documentation. Note that this feature is still experimental # and incomplete at the moment. GENERATE_AUTOGEN_DEF = NO #--------------------------------------------------------------------------- # configuration options related to the Perl module output #--------------------------------------------------------------------------- # If the GENERATE_PERLMOD tag is set to YES Doxygen will # generate a Perl module file that captures the structure of # the code including all documentation. Note that this # feature is still experimental and incomplete at the # moment. GENERATE_PERLMOD = NO # If the PERLMOD_LATEX tag is set to YES Doxygen will generate # the necessary Makefile rules, Perl scripts and LaTeX code to be able # to generate PDF and DVI output from the Perl module output. PERLMOD_LATEX = NO # If the PERLMOD_PRETTY tag is set to YES the Perl module output will be # nicely formatted so it can be parsed by a human reader. # This is useful # if you want to understand what is going on. # On the other hand, if this # tag is set to NO the size of the Perl module output will be much smaller # and Perl will parse it just the same. PERLMOD_PRETTY = YES # The names of the make variables in the generated doxyrules.make file # are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. # This is useful so different doxyrules.make files included by the same # Makefile don't overwrite each other's variables. PERLMOD_MAKEVAR_PREFIX = #--------------------------------------------------------------------------- # Configuration options related to the preprocessor #--------------------------------------------------------------------------- # If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will # evaluate all C-preprocessor directives found in the sources and include # files. ENABLE_PREPROCESSING = YES # If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro # names in the source code. If set to NO (the default) only conditional # compilation will be performed. Macro expansion can be done in a controlled # way by setting EXPAND_ONLY_PREDEF to YES. MACRO_EXPANSION = NO # If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES # then the macro expansion is limited to the macros specified with the # PREDEFINED and EXPAND_AS_DEFINED tags. EXPAND_ONLY_PREDEF = NO # If the SEARCH_INCLUDES tag is set to YES (the default) the includes files # pointed to by INCLUDE_PATH will be searched when a #include is found. SEARCH_INCLUDES = YES # The INCLUDE_PATH tag can be used to specify one or more directories that # contain include files that are not input files but should be processed by # the preprocessor. INCLUDE_PATH = # You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard # patterns (like *.h and *.hpp) to filter out the header-files in the # directories. If left blank, the patterns specified with FILE_PATTERNS will # be used. INCLUDE_FILE_PATTERNS = # The PREDEFINED tag can be used to specify one or more macro names that # are defined before the preprocessor is started (similar to the -D option of # gcc). The argument of the tag is a list of macros of the form: name # or name=definition (no spaces). If the definition and the = are # omitted =1 is assumed. To prevent a macro definition from being # undefined via #undef or recursively expanded use the := operator # instead of the = operator. PREDEFINED = # If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then # this tag can be used to specify a list of macro names that should be expanded. # The macro definition that is found in the sources will be used. # Use the PREDEFINED tag if you want to use a different macro definition that # overrules the definition found in the source code. EXPAND_AS_DEFINED = # If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then # doxygen's preprocessor will remove all references to function-like macros # that are alone on a line, have an all uppercase name, and do not end with a # semicolon, because these will confuse the parser if not removed. SKIP_FUNCTION_MACROS = YES #--------------------------------------------------------------------------- # Configuration::additions related to external references #--------------------------------------------------------------------------- # The TAGFILES option can be used to specify one or more tagfiles. For each # tag file the location of the external documentation should be added. The # format of a tag file without this location is as follows: # # TAGFILES = file1 file2 ... # Adding location for the tag files is done as follows: # # TAGFILES = file1=loc1 "file2 = loc2" ... # where "loc1" and "loc2" can be relative or absolute paths # or URLs. Note that each tag file must have a unique name (where the name does # NOT include the path). If a tag file is not located in the directory in which # doxygen is run, you must also specify the path to the tagfile here. TAGFILES = # When a file name is specified after GENERATE_TAGFILE, doxygen will create # a tag file that is based on the input files it reads. GENERATE_TAGFILE = # If the ALLEXTERNALS tag is set to YES all external classes will be listed # in the class index. If set to NO only the inherited external classes # will be listed. ALLEXTERNALS = NO # If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed # in the modules index. If set to NO, only the current project's groups will # be listed. EXTERNAL_GROUPS = YES # The PERL_PATH should be the absolute path and name of the perl script # interpreter (i.e. the result of `which perl'). PERL_PATH = /usr/bin/perl #--------------------------------------------------------------------------- # Configuration options related to the dot tool #--------------------------------------------------------------------------- # If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will # generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base # or super classes. Setting the tag to NO turns the diagrams off. Note that # this option also works with HAVE_DOT disabled, but it is recommended to # install and use dot, since it yields more powerful graphs. CLASS_DIAGRAMS = YES # You can define message sequence charts within doxygen comments using the \msc # command. Doxygen will then run the mscgen tool (see # http://www.mcternan.me.uk/mscgen/) to produce the chart and insert it in the # documentation. The MSCGEN_PATH tag allows you to specify the directory where # the mscgen tool resides. If left empty the tool is assumed to be found in the # default search path. MSCGEN_PATH = # If set to YES, the inheritance and collaboration graphs will hide # inheritance and usage relations if the target is undocumented # or is not a class. HIDE_UNDOC_RELATIONS = YES # If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is # available from the path. This tool is part of Graphviz, a graph visualization # toolkit from AT&T and Lucent Bell Labs. The other options in this section # have no effect if this option is set to NO (the default) HAVE_DOT = NO # The DOT_NUM_THREADS specifies the number of dot invocations doxygen is # allowed to run in parallel. When set to 0 (the default) doxygen will # base this on the number of processors available in the system. You can set it # explicitly to a value larger than 0 to get control over the balance # between CPU load and processing speed. DOT_NUM_THREADS = 0 # By default doxygen will use the Helvetica font for all dot files that # doxygen generates. When you want a differently looking font you can specify # the font name using DOT_FONTNAME. You need to make sure dot is able to find # the font, which can be done by putting it in a standard location or by setting # the DOTFONTPATH environment variable or by setting DOT_FONTPATH to the # directory containing the font. DOT_FONTNAME = Helvetica # The DOT_FONTSIZE tag can be used to set the size of the font of dot graphs. # The default size is 10pt. DOT_FONTSIZE = 10 # By default doxygen will tell dot to use the Helvetica font. # If you specify a different font using DOT_FONTNAME you can use DOT_FONTPATH to # set the path where dot can find it. DOT_FONTPATH = # If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen # will generate a graph for each documented class showing the direct and # indirect inheritance relations. Setting this tag to YES will force the # CLASS_DIAGRAMS tag to NO. CLASS_GRAPH = YES # If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen # will generate a graph for each documented class showing the direct and # indirect implementation dependencies (inheritance, containment, and # class references variables) of the class with other documented classes. COLLABORATION_GRAPH = YES # If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen # will generate a graph for groups, showing the direct groups dependencies GROUP_GRAPHS = YES # If the UML_LOOK tag is set to YES doxygen will generate inheritance and # collaboration diagrams in a style similar to the OMG's Unified Modeling # Language. UML_LOOK = NO # If the UML_LOOK tag is enabled, the fields and methods are shown inside # the class node. If there are many fields or methods and many nodes the # graph may become too big to be useful. The UML_LIMIT_NUM_FIELDS # threshold limits the number of items for each type to make the size more # managable. Set this to 0 for no limit. Note that the threshold may be # exceeded by 50% before the limit is enforced. UML_LIMIT_NUM_FIELDS = 10 # If set to YES, the inheritance and collaboration graphs will show the # relations between templates and their instances. TEMPLATE_RELATIONS = NO # If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT # tags are set to YES then doxygen will generate a graph for each documented # file showing the direct and indirect include dependencies of the file with # other documented files. INCLUDE_GRAPH = YES # If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and # HAVE_DOT tags are set to YES then doxygen will generate a graph for each # documented header file showing the documented files that directly or # indirectly include this file. INCLUDED_BY_GRAPH = YES # If the CALL_GRAPH and HAVE_DOT options are set to YES then # doxygen will generate a call dependency graph for every global function # or class method. Note that enabling this option will significantly increase # the time of a run. So in most cases it will be better to enable call graphs # for selected functions only using the \callgraph command. CALL_GRAPH = NO # If the CALLER_GRAPH and HAVE_DOT tags are set to YES then # doxygen will generate a caller dependency graph for every global function # or class method. Note that enabling this option will significantly increase # the time of a run. So in most cases it will be better to enable caller # graphs for selected functions only using the \callergraph command. CALLER_GRAPH = NO # If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen # will generate a graphical hierarchy of all classes instead of a textual one. GRAPHICAL_HIERARCHY = YES # If the DIRECTORY_GRAPH and HAVE_DOT tags are set to YES # then doxygen will show the dependencies a directory has on other directories # in a graphical way. The dependency relations are determined by the #include # relations between the files in the directories. DIRECTORY_GRAPH = YES # The DOT_IMAGE_FORMAT tag can be used to set the image format of the images # generated by dot. Possible values are svg, png, jpg, or gif. # If left blank png will be used. If you choose svg you need to set # HTML_FILE_EXTENSION to xhtml in order to make the SVG files # visible in IE 9+ (other browsers do not have this requirement). DOT_IMAGE_FORMAT = png # If DOT_IMAGE_FORMAT is set to svg, then this option can be set to YES to # enable generation of interactive SVG images that allow zooming and panning. # Note that this requires a modern browser other than Internet Explorer. # Tested and working are Firefox, Chrome, Safari, and Opera. For IE 9+ you # need to set HTML_FILE_EXTENSION to xhtml in order to make the SVG files # visible. Older versions of IE do not have SVG support. INTERACTIVE_SVG = NO # The tag DOT_PATH can be used to specify the path where the dot tool can be # found. If left blank, it is assumed the dot tool can be found in the path. DOT_PATH = # The DOTFILE_DIRS tag can be used to specify one or more directories that # contain dot files that are included in the documentation (see the # \dotfile command). DOTFILE_DIRS = # The MSCFILE_DIRS tag can be used to specify one or more directories that # contain msc files that are included in the documentation (see the # \mscfile command). MSCFILE_DIRS = # The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of # nodes that will be shown in the graph. If the number of nodes in a graph # becomes larger than this value, doxygen will truncate the graph, which is # visualized by representing a node as a red box. Note that doxygen if the # number of direct children of the root node in a graph is already larger than # DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note # that the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH. DOT_GRAPH_MAX_NODES = 50 # The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the # graphs generated by dot. A depth value of 3 means that only nodes reachable # from the root by following a path via at most 3 edges will be shown. Nodes # that lay further from the root node will be omitted. Note that setting this # option to 1 or 2 may greatly reduce the computation time needed for large # code bases. Also note that the size of a graph can be further restricted by # DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction. MAX_DOT_GRAPH_DEPTH = 0 # Set the DOT_TRANSPARENT tag to YES to generate images with a transparent # background. This is disabled by default, because dot on Windows does not # seem to support this out of the box. Warning: Depending on the platform used, # enabling this option may lead to badly anti-aliased labels on the edges of # a graph (i.e. they become hard to read). DOT_TRANSPARENT = NO # Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output # files in one run (i.e. multiple -o and -T options on the command line). This # makes dot run faster, but since only newer versions of dot (>1.8.10) # support this, this feature is disabled by default. DOT_MULTI_TARGETS = YES # If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will # generate a legend page explaining the meaning of the various boxes and # arrows in the dot generated graphs. GENERATE_LEGEND = YES # If the DOT_CLEANUP tag is set to YES (the default) Doxygen will # remove the intermediate dot files that are used to generate # the various graphs. DOT_CLEANUP = YES libvdpau-va-gl-0.4.2/LICENSE000066400000000000000000000020261277566164500153570ustar00rootroot00000000000000The MIT License (MIT) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. libvdpau-va-gl-0.4.2/README.md000066400000000000000000000050311277566164500156300ustar00rootroot00000000000000About ===== Briefly, this is the [VDPAU](http://en.wikipedia.org/wiki/VDPAU) driver with [VA-API](http://en.wikipedia.org/wiki/Video_Acceleration_API)/OpenGL backend. There are applications exists that can use VDPAU. Amongst them are Adobe Flash Player and Mplayer. They both can use VDPAU, but since there is no VDPAU available on Intel chips, they fall back to different drawing techniques. And while Mplayer can use XVideo extension to offload scaling to GPU, Flash Player can not and does all scaling in software. If there was VDPAU available, CPU usage could be significantly lower. VDPAU is not vendor-locked technology. Even official documentation mentions possibility of other drivers. They should be named as `libvdpau_drivername.so.1` and placed where linker could find them. `/usr/lib` usually works fine. Which driver to use is determined by asking X server about current driver name or by using `VDPAU_DRIVER` environment variable. Here is one. Named libvdpau_va_gl.so.1, it uses OpenGL under the hood to accelerate drawing and scaling and VA-API (if available) to accelerate video decoding. For now VA-API is available on some Intel chips, and on some AMD video adapters with help of [xvba-va-driver](http://cgit.freedesktop.org/vaapi/xvba-driver/). OpenGL is available, you know, on systems with OpenGL available. Install ======= 1. `sudo apt-get install cmake libva-dev libgl1-mesa-dev` 2. `mkdir build; cd build` 3. `cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr ..` 4. `sudo make install` 5. Add `VDPAU_DRIVER=va_gl` to your environment Commands above should work for any Debian-based distro. Fedora names packages in a different way, so package installation step will look like: `sudo yum install cmake libva-devel mesa-libGL-devel`. Run time configuration ====================== Besides `VDPAU_DRIVER` variable which selects which driver to use there are other variables that control runtime behavior of va_gl driver. `VDPAU_QUIRKS` contains comma-separated list of enabled quirks. Here is the list: * `XCloseDisplay` Disables calling of XCloseDisplay which may segfault on some video drivers * `ShowWatermark` Enables displaying string "va_gl" in bottom-right corner of window * `AvoidVA` Makes libvdpau-va-gl NOT use VA-API Parameters of VDPAU_QUIRKS are case-insensetive. Copying ======= libvdpau-va-gl is distributed under the terms of the MIT license. See LICENSE file for details. Contact ======= Author can be reached at email `ibragimovrinat-at-mail.ru` or at github: https://github.com/i-rinat/ libvdpau-va-gl-0.4.2/doc/000077500000000000000000000000001277566164500151175ustar00rootroot00000000000000libvdpau-va-gl-0.4.2/doc/flash-wmode.js000066400000000000000000000025701277566164500176670ustar00rootroot00000000000000// ==UserScript== // @name Set wmode to 'direct' // @namespace None // @description Sets embed's and object's wmode parameter to 'direct' to enable hw acceleration // @include * // @grant none // ==/UserScript== (function () { nodeInserted(); })(); document.addEventListener("DOMNodeInserted", nodeInserted, false); function nodeInserted() { for (var objs = document.getElementsByTagName("object"), i = 0, obj; obj = objs[i]; i++) { if (obj.type == 'application/x-shockwave-flash') { var skip = false; for (var params = obj.getElementsByTagName("param"), j = 0, param; param = params[j]; j++) { if (param.getAttribute("name") == "wmode") { param.setAttribute("value", "direct"); skip = true; break; } } if(skip) continue; var param = document.createElement("param"); param.setAttribute("name", "wmode"); param.setAttribute("value", "direct"); obj.appendChild(param); } } for (var embeds = document.getElementsByTagName("embed"), i = 0, embed; embed = embeds[i]; i++) { if (embed.type != 'application/x-shockwave-flash') continue; if ((embed.getAttribute('wmode') && embed.getAttribute('wmode') == 'direct')) continue; embed.setAttribute('wmode', 'direct'); var html = embed.outerHTML; embed.insertAdjacentHTML('beforeBegin', embed.outerHTML); embed.parentNode.removeChild(embed); } } libvdpau-va-gl-0.4.2/doc/known-issues.md000066400000000000000000000064101277566164500201070ustar00rootroot00000000000000Known issues ============ Flash Player is slow -------------------- The issue consists of two: hw decoding (D in VDPAU) and hw presentation (P in VDPAU, mostly scaling). See below. No hardware accelerated decoding in Flash Player ------------------------------------------------ Flash Player have hardware accelerated decoding turned off by default. To enable, add line `EnableLinuxHWVideoDecode=1` to file `/etc/adobe/mms.cfg`. Create that file if necessary. You must reload plugin, easiest way to reload plugin is to restart browser. No hardware accelerated presentation in Flash --------------------------------------------- First, you may check whenever application uses VDPAU via libvdpau-va-gl, by adding `ShowWatermark` to `VDPAU_QUIRKS` environment variable. That will display "va_gl" at bottom right corner of video. If you see it, you are fine. Otherwise, you can try user script [doc/flash-wmode.js](flash-wmode.js) which will force wmode parameter value to be 'direct'. Here is how and why it works. Flash Player is an NPAPI plugin. Such plugins are separate binaries which output is embedded in a web page by one of two different ways. Plugin can ask browser either windowed or windowless operation. First way browser creates a window and passes it to a plugin. Then plugin can draw on that window when and how it wants to. Second way plugin does content display only on browser demand by filling data buffer. VDPAU requires an X drawable to display on, so it can be used only in windowed plugin mode. Usually nothing can be displayed over that drawable. VDPAU will overwrite everything else. On the other hand, browser plugins have `wmode` parameter which controls how their content is managed by browser. You can search for exact `wmode` semantics on the Internet. But here is the crucial part: if `wmode` set to anything but `direct`, plugin can not use hardware acceleration, since it forces windowless operation which in turn prevents VDPAU usage. Script above forces all plugin instances to have `wmode=direct`. That solves some problems, but has own drawbacks. If web page was desined to have something to be displayed over Flash movie, that will become hidden. That may be subtitles, or video player controls. They may become unusable. If you know any better working solution for this problem, please let me know. Flash is still slow ------------------- Flash movies (.swf) must use StageVideo to make use of hardware acceleration. If author for some reason have not used it, there is nothing can be done on our side. For example, Vimeo player does use hardware decoding, but then it downloads decoded frames back to CPU, where they scaled with their own scaler implemented in ActionScript. Mplayer have higher CPU usage with VDPAU than with Xv ----------------------------------------------------- If you omit `-vc ffh264vdpau`, Mplayer will use software decoder, then then output YCbCr images via VDPAU. At the moment YCbCr to RGB conversion is done with help of libswscale, which can eat decent amount of CPU time. Ensure you have hardware accelerated codecs enabled. Mplayer shows weird errors for 10-bit H.264 --------------------------------------------- VDPAU at the moment has no support for Hi10P, so 10bit videos will fail. There is nothing can be done in libvdpau-va-gl to fix this. libvdpau-va-gl-0.4.2/glsl/000077500000000000000000000000001277566164500153135ustar00rootroot00000000000000libvdpau-va-gl-0.4.2/glsl/CMakeLists.txt000066400000000000000000000015661277566164500200630ustar00rootroot00000000000000set(shader_list_no_path NV12_RGBA.glsl YV12_RGBA.glsl red_to_alpha_swizzle.glsl ) set(GENERATED_INCLUDE_DIRS ${CMAKE_CURRENT_BINARY_DIR} PARENT_SCOPE) set(shader_list) foreach(item ${shader_list_no_path}) list(APPEND shader_list ${CMAKE_CURRENT_SOURCE_DIR}/${item}) endforeach(item) add_custom_command( OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/shaders.c ${CMAKE_CURRENT_BINARY_DIR}/shaders.h COMMAND ${CMAKE_CURRENT_BINARY_DIR}/shader-bundle-tool ${CMAKE_CURRENT_BINARY_DIR}/shaders.h ${CMAKE_CURRENT_BINARY_DIR}/shaders.c ${shader_list} DEPENDS ${shader_list} shader-bundle-tool ) add_custom_target(shader-bundle-src DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/shaders.c ${CMAKE_CURRENT_BINARY_DIR}/shaders.h) add_executable(shader-bundle-tool shader-bundle-tool.cc) add_library(shader-bundle STATIC ${CMAKE_CURRENT_BINARY_DIR}/shaders.c) add_dependencies(shader-bundle shader-bundle-src) libvdpau-va-gl-0.4.2/glsl/NV12_RGBA.glsl000066400000000000000000000005621277566164500174620ustar00rootroot00000000000000#version 110 uniform sampler2D tex[2]; void main() { vec2 y_coord = gl_TexCoord[0].xy; float y = texture2D(tex[0], y_coord).r; float cb = texture2D(tex[1], y_coord).r - 0.5; float cr = texture2D(tex[1], y_coord).g - 0.5; gl_FragColor = vec4( y + 1.4021 * cr, y - 0.34482 * cb - 0.71405 * cr, y + 1.7713 * cb, 1.0); } libvdpau-va-gl-0.4.2/glsl/YV12_RGBA.glsl000066400000000000000000000007421277566164500174750ustar00rootroot00000000000000#version 110 uniform sampler2D tex[2]; void main() { vec2 y_coord = gl_TexCoord[0].xy; vec2 cb_coord = vec2(y_coord.x, y_coord.y/2.0); vec2 cr_coord = vec2(y_coord.x, y_coord.y/2.0 + 0.5); float y = texture2D(tex[0], y_coord).r; float cb = texture2D(tex[1], cb_coord).r - 0.5; float cr = texture2D(tex[1], cr_coord).r - 0.5; gl_FragColor = vec4( y + 1.4021 * cr, y - 0.34482 * cb - 0.71405 * cr, y + 1.7713 * cb, 1.0); } libvdpau-va-gl-0.4.2/glsl/red_to_alpha_swizzle.glsl000066400000000000000000000002221277566164500224020ustar00rootroot00000000000000#version 110 uniform sampler2D tex_0; void main() { gl_FragColor = gl_Color * vec4(1.0, 1.0, 1.0, texture2D(tex_0, gl_TexCoord[0].xy).r); } libvdpau-va-gl-0.4.2/glsl/shader-bundle-tool.cc000066400000000000000000000051051277566164500213130ustar00rootroot00000000000000#include #include #include using std::cerr; using std::fstream; using std::ios_base; using std::string; using std::stringstream; namespace { string get_basename(const string &path) { size_t pos = path.find_last_of('/'); if (pos == string::npos) return path; return path.substr(pos + 1); } } // anonymous namespace int main(int argc, char *argv[]) { if (argc <= 2) { cerr << "not enough arguments\n"; return 1; } fstream fp_h{argv[1], ios_base::out}; fstream fp_c{argv[2], ios_base::out}; if (!fp_h.is_open()) { cerr << "can't open " << argv[1] << "\n"; return 2; } if (!fp_c.is_open()) { cerr << "can't open " << argv[2] << "\n"; return 2; } // h file fp_h << "// generated file, all changes will be lost\n\n" "#pragma once\n\n\n" "struct shader_s {\n" " const char *body;\n" " int len;\n" "};\n" "\n" "extern struct shader_s glsl_shaders[" << argc - 3 << "];\n\n"; fp_h << "#define SHADER_COUNT " << argc - 3 << "\n\n"; fp_h << "enum {\n"; for (int k = 3; k < argc; k ++) { const string bname = get_basename(argv[k]); size_t pos = bname.find_last_of('.'); if (pos == string::npos) continue; fp_h << " glsl_" << bname.substr(0, pos) << " = " << k - 3 << ",\n"; } fp_h << "};\n"; // c file fp_c << "// generated file, all changes will be lost\n\n"; const string h_name = get_basename(argv[1]); fp_c << "#include \"" << h_name << "\"\n"; fp_c << "\n"; fp_c << "struct shader_s glsl_shaders[" << argc - 3 << "] = {\n"; for (int k = 3; k < argc; k ++) { stringstream ss; fstream fp_tmp{argv[k], ios_base::in}; if (!fp_tmp.is_open()) { cerr << "can't open " << argv[k] << "\n"; return 2; } ss << fp_tmp.rdbuf(); fp_c << " {\n"; fp_c << " .body =\n"; fp_c << " \""; size_t len = 0; for (const char c: ss.str()) { switch (c) { case '\n': fp_c << "\\n\"\n \""; len ++; break; case '\r': break; default: fp_c << c; len ++; break; } } fp_c <<"\",\n"; fp_c << " .len = " << len << ",\n"; fp_c << " },\n"; } fp_c << "};\n"; return 0; } libvdpau-va-gl-0.4.2/src/000077500000000000000000000000001277566164500151415ustar00rootroot00000000000000libvdpau-va-gl-0.4.2/src/CMakeLists.txt000066400000000000000000000016121277566164500177010ustar00rootroot00000000000000include_directories( ${GENERATED_INCLUDE_DIRS} ) link_directories ( ${X11_LIBRARY_DIRS} ${LIBVA_LIBRARY_DIRS} ${LIBGL_LIBRARY_DIRS} ) add_library(${DRIVER_NAME} SHARED api-bitmap-surface.cc api-csc-matrix.cc api-decoder.cc api-device.cc api-output-surface.cc api-presentation-queue.cc api-video-mixer.cc api-video-surface.cc entry.cc globals.cc glx-context.cc h264-parse.cc handle-storage.cc reverse-constant.cc trace.cc watermark.cc x-display-ref.cc ) add_dependencies(${DRIVER_NAME} shader-bundle) set(LINK_LIBRARIES ${X11_LIBRARIES} ${LIBVA_LIBRARIES} ${LIBGL_LIBRARIES} -lrt shader-bundle ) target_link_libraries(${DRIVER_NAME} ${LINK_LIBRARIES} ${SYMBOLMAP}) set_target_properties(${DRIVER_NAME} PROPERTIES VERSION 1 ) install(TARGETS ${DRIVER_NAME} DESTINATION ${LIB_INSTALL_DIR}) libvdpau-va-gl-0.4.2/src/api-bitmap-surface.cc000066400000000000000000000252031277566164500211230ustar00rootroot00000000000000/* * Copyright 2013-2016 Rinat Ibragimov * * This file is part of libvdpau-va-gl * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include "api-bitmap-surface.hh" #include "api-device.hh" #include "glx-context.hh" #include "handle-storage.hh" #include "reverse-constant.hh" #include "trace.hh" #include #include #include #include using std::shared_ptr; namespace vdp { namespace BitmapSurface { Resource::Resource(shared_ptr a_device, VdpRGBAFormat a_rgba_format, uint32_t a_width, uint32_t a_height, VdpBool a_frequently_accessed) : rgba_format{a_rgba_format} , width{a_width} , height{a_height} , frequently_accessed{a_frequently_accessed} { device = a_device; switch (rgba_format) { case VDP_RGBA_FORMAT_B8G8R8A8: gl_internal_format = GL_RGBA; gl_format = GL_BGRA; gl_type = GL_UNSIGNED_BYTE; bytes_per_pixel = 4; break; case VDP_RGBA_FORMAT_R8G8B8A8: gl_internal_format = GL_RGBA; gl_format = GL_RGBA; gl_type = GL_UNSIGNED_BYTE; bytes_per_pixel = 4; break; case VDP_RGBA_FORMAT_R10G10B10A2: gl_internal_format = GL_RGB10_A2; gl_format = GL_RGBA; gl_type = GL_UNSIGNED_INT_10_10_10_2; bytes_per_pixel = 4; break; case VDP_RGBA_FORMAT_B10G10R10A2: gl_internal_format = GL_RGB10_A2; gl_format = GL_BGRA; gl_type = GL_UNSIGNED_INT_10_10_10_2; bytes_per_pixel = 4; break; case VDP_RGBA_FORMAT_A8: gl_internal_format = GL_RGBA; gl_format = GL_RED; gl_type = GL_UNSIGNED_BYTE; bytes_per_pixel = 1; break; default: traceError("BitmapSurface::Resource::Resource(): %s not implemented\n", reverse_rgba_format(rgba_format)); throw invalid_rgba_format(); } // Frequently accessed bitmaps reside in system memory rather that in GPU texture. dirty = 0; if (frequently_accessed) bitmap_data.reserve(width * height * bytes_per_pixel); GLXThreadLocalContext glc_guard{device}; glGenTextures(1, &tex_id); glBindTexture(GL_TEXTURE_2D, tex_id); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); glTexImage2D(GL_TEXTURE_2D, 0, gl_internal_format, width, height, 0, gl_format, gl_type, nullptr); glFinish(); const auto gl_error = glGetError(); if (gl_error != GL_NO_ERROR) { // Requested RGBA format was wrong traceError("BitmapSurface::Resource::Resource(): texture failure, %d\n", gl_error); throw vdp::generic_error(); } } Resource::~Resource() { try { GLXThreadLocalContext glc_guard{device}; glDeleteTextures(1, &tex_id); const auto gl_error = glGetError(); if (gl_error != GL_NO_ERROR) traceError("BitmapSurface::Resource::~Resource(): gl error %d\n", gl_error); } catch (...) { traceError("BitmapSurface::Resource::~Resource(): caught exception\n"); } } VdpStatus CreateImpl(VdpDevice device_id, VdpRGBAFormat rgba_format, uint32_t width, uint32_t height, VdpBool frequently_accessed, VdpBitmapSurface *surface) { if (!surface) return VDP_STATUS_INVALID_HANDLE; vdp::ResourceRef device{device_id}; auto data = std::make_shared(device, rgba_format, width, height, frequently_accessed); *surface = vdp::ResourceStorage::instance().insert(data); return VDP_STATUS_OK; } VdpStatus Create(VdpDevice device_id, VdpRGBAFormat rgba_format, uint32_t width, uint32_t height, VdpBool frequently_accessed, VdpBitmapSurface *surface) { return check_for_exceptions(CreateImpl, device_id, rgba_format, width, height, frequently_accessed, surface); } VdpStatus DestroyImpl(VdpBitmapSurface surface_id) { vdp::ResourceRef surface{surface_id}; ResourceStorage::instance().drop(surface_id); return VDP_STATUS_OK; } VdpStatus Destroy(VdpBitmapSurface surface_id) { return check_for_exceptions(DestroyImpl, surface_id); } VdpStatus GetParametersImpl(VdpBitmapSurface surface_id, VdpRGBAFormat *rgba_format, uint32_t *width, uint32_t *height, VdpBool *frequently_accessed) { if (!rgba_format || !width || !height || !frequently_accessed) return VDP_STATUS_INVALID_POINTER; vdp::ResourceRef src_surf{surface_id}; *rgba_format = src_surf->rgba_format; *width = src_surf->width; *height = src_surf->height; *frequently_accessed = src_surf->frequently_accessed; return VDP_STATUS_OK; } VdpStatus GetParameters(VdpBitmapSurface surface_id, VdpRGBAFormat *rgba_format, uint32_t *width, uint32_t *height, VdpBool *frequently_accessed) { return check_for_exceptions(GetParametersImpl, surface_id, rgba_format, width, height, frequently_accessed); } VdpStatus PutBitsNativeImpl(VdpBitmapSurface surface_id, void const *const *source_data, uint32_t const *source_pitches, VdpRect const *destination_rect) { if (!source_data || !source_pitches) return VDP_STATUS_INVALID_POINTER; vdp::ResourceRef dst_surf{surface_id}; VdpRect d_rect = {0, 0, dst_surf->width, dst_surf->height}; if (destination_rect) d_rect = *destination_rect; if (dst_surf->frequently_accessed) { if (d_rect.x0 == 0 && dst_surf->width == d_rect.x1 && source_pitches[0] == d_rect.x1) { // full width, can copy all lines with a single memcpy const size_t bytes_to_copy = (d_rect.x1 - d_rect.x0) * (d_rect.y1 - d_rect.y0) * dst_surf->bytes_per_pixel; memcpy(dst_surf->bitmap_data.data() + d_rect.y0 * dst_surf->width * dst_surf->bytes_per_pixel, source_data[0], bytes_to_copy); } else { // copy line by line const size_t bytes_in_line = (d_rect.x1 - d_rect.x0) * dst_surf->bytes_per_pixel; auto source_data_0 = static_cast(source_data[0]); for (auto y = d_rect.y0; y < d_rect.y1; y ++) { memcpy(dst_surf->bitmap_data.data() + (y * dst_surf->width + d_rect.x0) * dst_surf->bytes_per_pixel, source_data_0 + (y - d_rect.y0) * source_pitches[0], bytes_in_line); } } dst_surf->dirty = true; } else { GLXThreadLocalContext glc_guard{dst_surf->device}; glBindTexture(GL_TEXTURE_2D, dst_surf->tex_id); glPixelStorei(GL_UNPACK_ROW_LENGTH, source_pitches[0] / dst_surf->bytes_per_pixel); if (dst_surf->bytes_per_pixel != 4) glPixelStorei(GL_UNPACK_ALIGNMENT, 1); glTexSubImage2D(GL_TEXTURE_2D, 0, d_rect.x0, d_rect.y0, d_rect.x1 - d_rect.x0, d_rect.y1 - d_rect.y0, dst_surf->gl_format, dst_surf->gl_type, source_data[0]); glPixelStorei(GL_UNPACK_ROW_LENGTH, 0); if (dst_surf->bytes_per_pixel != 4) glPixelStorei(GL_UNPACK_ALIGNMENT, 4); glFinish(); const auto gl_error = glGetError(); if (gl_error != GL_NO_ERROR) { traceError("BitmapSurface::PutBitsNativeImpl(): gl error %d\n", gl_error); return VDP_STATUS_ERROR; } } return VDP_STATUS_OK; } VdpStatus PutBitsNative(VdpBitmapSurface surface_id, void const *const *source_data, uint32_t const *source_pitches, VdpRect const *destination_rect) { return check_for_exceptions(PutBitsNativeImpl, surface_id, source_data, source_pitches, destination_rect); } VdpStatus QueryCapabilitiesImpl(VdpDevice device_id, VdpRGBAFormat surface_rgba_format, VdpBool *is_supported, uint32_t *max_width, uint32_t *max_height) { if (!is_supported || !max_width || !max_height) return VDP_STATUS_INVALID_POINTER; vdp::ResourceRef device{device_id}; switch (surface_rgba_format) { case VDP_RGBA_FORMAT_B8G8R8A8: case VDP_RGBA_FORMAT_R8G8B8A8: case VDP_RGBA_FORMAT_R10G10B10A2: case VDP_RGBA_FORMAT_B10G10R10A2: case VDP_RGBA_FORMAT_A8: *is_supported = 1; // all these formats are supported by every OpenGL break; // implementation default: *is_supported = 0; break; } GLXThreadLocalContext glc_guard{device}; GLint max_texture_size; glGetIntegerv(GL_MAX_TEXTURE_SIZE, &max_texture_size); const auto gl_error = glGetError(); if (gl_error != GL_NO_ERROR) { traceError("BitmapSurface::QueryCapabilitiesImpl(): gl error %d\n", gl_error); return VDP_STATUS_ERROR; } *max_width = max_texture_size; *max_height = max_texture_size; return VDP_STATUS_OK; } VdpStatus QueryCapabilities(VdpDevice device_id, VdpRGBAFormat surface_rgba_format, VdpBool *is_supported, uint32_t *max_width, uint32_t *max_height) { return check_for_exceptions(QueryCapabilitiesImpl, device_id, surface_rgba_format, is_supported, max_width, max_height); } } } // namespace vdp::BitmapSurface libvdpau-va-gl-0.4.2/src/api-bitmap-surface.hh000066400000000000000000000052611277566164500211370ustar00rootroot00000000000000/* * Copyright 2013-2016 Rinat Ibragimov * * This file is part of libvdpau-va-gl * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #pragma once #include "api-device.hh" #include "api.hh" #include #include #include namespace vdp { namespace BitmapSurface { struct Resource: public vdp::GenericResource { Resource(std::shared_ptr a_device, VdpRGBAFormat a_rgba_format, uint32_t a_width, uint32_t a_height, VdpBool a_frequently_accessed); ~Resource(); VdpRGBAFormat rgba_format; ///< RGBA format of data stored GLuint tex_id; ///< GL texture id uint32_t width; uint32_t height; VdpBool frequently_accessed;///< 1 if surface should be optimized for frequent access unsigned int bytes_per_pixel; ///< number of bytes per bitmap pixel GLuint gl_internal_format; ///< GL texture format: internal format GLuint gl_format; ///< GL texture format: preferred external format GLuint gl_type; ///< GL texture format: pixel type std::vector bitmap_data; ///< system-memory buffer for frequently accessed bitmaps bool dirty; ///< dirty flag. True if system-memory buffer contains data ///< newer than GPU texture contents }; VdpBitmapSurfaceQueryCapabilities QueryCapabilities; VdpBitmapSurfaceCreate Create; VdpBitmapSurfaceDestroy Destroy; VdpBitmapSurfaceGetParameters GetParameters; VdpBitmapSurfacePutBitsNative PutBitsNative; } } // namespace vdp::BitmapSurface libvdpau-va-gl-0.4.2/src/api-csc-matrix.cc000066400000000000000000000052761277566164500203030ustar00rootroot00000000000000/* * Copyright 2013-2016 Rinat Ibragimov * * This file is part of libvdpau-va-gl * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include "api-csc-matrix.hh" #include namespace vdp { VdpStatus GenerateCSCMatrix(VdpProcamp *procamp, VdpColorStandard standard, VdpCSCMatrix *csc_matrix) { if (!csc_matrix) return VDP_STATUS_INVALID_POINTER; if (procamp && VDP_PROCAMP_VERSION != procamp->struct_version) return VDP_STATUS_INVALID_VALUE; // TODO: do correct matricies calculation VdpCSCMatrix *m = csc_matrix; switch (standard) { case VDP_COLOR_STANDARD_ITUR_BT_601: (*m)[0][0] = 1.164f; (*m)[0][1] = 0.0f; (*m)[0][2] = 1.596f; (*m)[0][3] = -222.9f; (*m)[1][0] = 1.164f; (*m)[1][1] = -0.392f; (*m)[1][2] = -0.813f; (*m)[1][3] = 135.6f; (*m)[2][0] = 1.164f; (*m)[2][1] = 2.017f; (*m)[2][2] = 0.0f; (*m)[2][3] = -276.8f; break; case VDP_COLOR_STANDARD_ITUR_BT_709: (*m)[0][0] = 1.0f; (*m)[0][1] = 0.0f; (*m)[0][2] = 1.402f; (*m)[0][3] = -179.4f; (*m)[1][0] = 1.0f; (*m)[1][1] = -0.344f; (*m)[1][2] = -0.714f; (*m)[1][3] = 135.5f; (*m)[2][0] = 1.0f; (*m)[2][1] = 1.772f; (*m)[2][2] = 0.0f; (*m)[2][3] = -226.8f; break; case VDP_COLOR_STANDARD_SMPTE_240M: (*m)[0][0] = 0.581f; (*m)[0][1] = -0.764f; (*m)[0][2] = 1.576f; (*m)[0][3] = 0.0f; (*m)[1][0] = 0.581f; (*m)[1][1] = -0.991f; (*m)[1][2] = -0.477f; (*m)[1][3] = 0.0f; (*m)[2][0] = 0.581f; (*m)[2][1] = 1.062f; (*m)[2][2] = 0.000f; (*m)[2][3] = 0.0f; break; default: return VDP_STATUS_INVALID_COLOR_STANDARD; } return VDP_STATUS_OK; } } // namespace vdp libvdpau-va-gl-0.4.2/src/api-csc-matrix.hh000066400000000000000000000024031277566164500203020ustar00rootroot00000000000000/* * Copyright 2013-2016 Rinat Ibragimov * * This file is part of libvdpau-va-gl * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #pragma once #include namespace vdp { VdpGenerateCSCMatrix GenerateCSCMatrix; } // namespace vdp libvdpau-va-gl-0.4.2/src/api-decoder.cc000066400000000000000000000645461277566164500176430ustar00rootroot00000000000000/* * Copyright 2013-2016 Rinat Ibragimov * * This file is part of libvdpau-va-gl * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include "api-decoder.hh" #include "api-video-surface.hh" #include "glx-context.hh" #include "h264-parse.hh" #include "handle-storage.hh" #include "reverse-constant.hh" #include "trace.hh" #include #include using std::make_shared; using std::shared_ptr; using std::vector; namespace vdp { namespace Decoder { Resource::Resource(shared_ptr a_device, VdpDecoderProfile a_profile, uint32_t a_width, uint32_t a_height, uint32_t n_max_references) : profile{a_profile} , width{a_width} , height{a_height} , max_references{n_max_references} { device = a_device; VADisplay va_dpy = device->va_dpy; if (!device->va_available) throw vdp::invalid_decoder_profile(); // initialize free_list. Initially they all free for (int k = 0; k < vdp::kMaxRenderTargets; k ++) free_list.push_back(k); VAProfile va_profile; VAStatus status; bool final_try = false; VdpDecoderProfile next_profile = profile; // Try to create decoder for asked profile. On failure try to create more advanced one while (not final_try) { profile = next_profile; switch (profile) { case VDP_DECODER_PROFILE_H264_CONSTRAINED_BASELINE: va_profile = VAProfileH264ConstrainedBaseline; render_targets.resize(vdp::kNumRenderTargets); next_profile = VDP_DECODER_PROFILE_H264_BASELINE; break; case VDP_DECODER_PROFILE_H264_BASELINE: va_profile = VAProfileH264Baseline; render_targets.resize(vdp::kNumRenderTargets); next_profile = VDP_DECODER_PROFILE_H264_MAIN; break; case VDP_DECODER_PROFILE_H264_MAIN: va_profile = VAProfileH264Main; render_targets.resize(vdp::kNumRenderTargets); next_profile = VDP_DECODER_PROFILE_H264_HIGH; break; case VDP_DECODER_PROFILE_H264_HIGH: va_profile = VAProfileH264High; render_targets.resize(vdp::kNumRenderTargets); // there is no more advanced profile, so it's final try final_try = true; break; default: traceError("Decoder::Resource::Resource(): decoder %s not implemented\n", reverse_decoder_profile(profile)); throw vdp::invalid_decoder_profile(); } status = vaCreateConfig(va_dpy, va_profile, VAEntrypointVLD, nullptr, 0, &config_id); if (status == VA_STATUS_SUCCESS) // break loop if decoder created break; } if (status != VA_STATUS_SUCCESS) throw vdp::generic_error(); // Create surfaces. All video surfaces created here, rather than in VdpVideoSurfaceCreate. // VAAPI requires surfaces to be bound with context on its creation time, while VDPAU allows // to do it later. So here is a trick: VDP video surfaces get their va_surf dynamically in // DecoderRender. // TODO: check format of surfaces created #if VA_CHECK_VERSION(0, 34, 0) status = vaCreateSurfaces(va_dpy, VA_RT_FORMAT_YUV420, width, height, render_targets.data(), render_targets.size(), nullptr, 0); #else status = vaCreateSurfaces(va_dpy, width, height, VA_RT_FORMAT_YUV420, render_targets.size(), render_targets.data()); #endif if (status != VA_STATUS_SUCCESS) throw vdp::generic_error(); status = vaCreateContext(va_dpy, config_id, width, height, VA_PROGRESSIVE, render_targets.data(), render_targets.size(), &context_id); if (status != VA_STATUS_SUCCESS) throw vdp::generic_error(); } Resource::~Resource() { try { if (device->va_available) { const VADisplay va_dpy = device->va_dpy; vaDestroySurfaces(va_dpy, render_targets.data(), render_targets.size()); vaDestroyContext(va_dpy, context_id); vaDestroyConfig(va_dpy, config_id); } } catch (...) { traceError("Decoder::Resource::~Resource(): caught exception\n"); } } VdpStatus CreateImpl(VdpDevice device_id, VdpDecoderProfile profile, uint32_t width, uint32_t height, uint32_t max_references, VdpDecoder *decoder) { if (!decoder) return VDP_STATUS_INVALID_POINTER; ResourceRef device{device_id}; auto data = make_shared(device, profile, width, height, max_references); *decoder = ResourceStorage::instance().insert(data); return VDP_STATUS_OK; } VdpStatus Create(VdpDevice device_id, VdpDecoderProfile profile, uint32_t width, uint32_t height, uint32_t max_references, VdpDecoder *decoder) { return check_for_exceptions(CreateImpl, device_id, profile, width, height, max_references, decoder); } VdpStatus DestroyImpl(VdpDecoder decoder_id) { ResourceRef decoder{decoder_id}; ResourceStorage::instance().drop(decoder_id); return VDP_STATUS_OK; } VdpStatus Destroy(VdpDecoder decoder_id) { return check_for_exceptions(DestroyImpl, decoder_id); } VdpStatus GetParametersImpl(VdpDecoder decoder_id, VdpDecoderProfile *profile, uint32_t *width, uint32_t *height) { ResourceRef decoder{decoder_id}; if (profile) *profile = decoder->profile; if (width) *width = decoder->width; if (height) *height = decoder->height; return VDP_STATUS_OK; } VdpStatus GetParameters(VdpDecoder decoder_id, VdpDecoderProfile *profile, uint32_t *width, uint32_t *height) { return check_for_exceptions(GetParametersImpl, decoder_id, profile, width, height); } VdpStatus h264_translate_reference_frames(shared_ptr &dst_surf, shared_ptr &decoder, VAPictureParameterBufferH264 *pic_param, const VdpPictureInfoH264 *vdppi) { // take new VA surface from buffer if needed if (dst_surf->va_surf == VA_INVALID_SURFACE) { if (decoder->free_list.size() == 0) return VDP_STATUS_RESOURCES; auto idx = decoder->free_list.back(); decoder->free_list.pop_back(); dst_surf->decoder = decoder; dst_surf->va_surf = decoder->render_targets[idx]; dst_surf->rt_idx = idx; } // current frame pic_param->CurrPic.picture_id = dst_surf->va_surf; pic_param->CurrPic.frame_idx = vdppi->frame_num; pic_param->CurrPic.flags = vdppi->is_reference ? VA_PICTURE_H264_SHORT_TERM_REFERENCE : 0; if (vdppi->field_pic_flag) { pic_param->CurrPic.flags |= vdppi->bottom_field_flag ? VA_PICTURE_H264_BOTTOM_FIELD : VA_PICTURE_H264_TOP_FIELD; } pic_param->CurrPic.TopFieldOrderCnt = vdppi->field_order_cnt[0]; pic_param->CurrPic.BottomFieldOrderCnt = vdppi->field_order_cnt[1]; // mark all pictures invalid preliminary for (int k = 0; k < 16; k ++) reset_va_picture_h264(&pic_param->ReferenceFrames[k]); // reference frames for (int k = 0; k < vdppi->num_ref_frames; k ++) { if (vdppi->referenceFrames[k].surface == VDP_INVALID_HANDLE) { reset_va_picture_h264(&pic_param->ReferenceFrames[k]); continue; } VdpReferenceFrameH264 const *vdp_ref = &vdppi->referenceFrames[k]; ResourceRef video_surf{vdp_ref->surface}; VAPictureH264 *va_ref = &pic_param->ReferenceFrames[k]; // take new VA surface from buffer if needed if (video_surf->va_surf == VA_INVALID_SURFACE) { if (decoder->free_list.size() == 0) return VDP_STATUS_RESOURCES; const auto idx = decoder->free_list.back(); decoder->free_list.pop_back(); dst_surf->decoder = decoder; dst_surf->va_surf = decoder->render_targets[idx]; dst_surf->rt_idx = idx; } va_ref->picture_id = video_surf->va_surf; va_ref->frame_idx = vdp_ref->frame_idx; va_ref->flags = vdp_ref->is_long_term ? VA_PICTURE_H264_LONG_TERM_REFERENCE : VA_PICTURE_H264_SHORT_TERM_REFERENCE; if (vdp_ref->top_is_reference && vdp_ref->bottom_is_reference) { // Full frame. This block intentionally left blank. No flags set. } else { if (vdp_ref->top_is_reference) va_ref->flags |= VA_PICTURE_H264_TOP_FIELD; else va_ref->flags |= VA_PICTURE_H264_BOTTOM_FIELD; } va_ref->TopFieldOrderCnt = vdp_ref->field_order_cnt[0]; va_ref->BottomFieldOrderCnt = vdp_ref->field_order_cnt[1]; } return VDP_STATUS_OK; } template inline void set_ptr_val(T1 *ptr, T2 val) { if (ptr) *ptr = val; } VdpStatus QueryCapabilitiesImpl(VdpDevice device_id, VdpDecoderProfile profile, VdpBool *is_supported, uint32_t *max_level, uint32_t *max_macroblocks, uint32_t *max_width, uint32_t *max_height) { if (!is_supported || !max_level || !max_macroblocks || !max_width || !max_height) return VDP_STATUS_INVALID_POINTER; ResourceRef device{device_id}; set_ptr_val(max_level, 0); set_ptr_val(max_macroblocks, 0); set_ptr_val(max_width, 0); set_ptr_val(max_height, 0); set_ptr_val(is_supported, 0); if (!device->va_available) return VDP_STATUS_OK; vector va_profile_list(vaMaxNumProfiles(device->va_dpy)); int num_profiles; VAStatus status = vaQueryConfigProfiles(device->va_dpy, va_profile_list.data(), &num_profiles); if (status != VA_STATUS_SUCCESS) return VDP_STATUS_ERROR; struct { int mpeg2_simple; int mpeg2_main; int h264_baseline; int h264_main; int h264_high; int vc1_simple; int vc1_main; int vc1_advanced; } available_profiles = {}; for (int k = 0; k < num_profiles; k ++) { switch (va_profile_list[k]) { case VAProfileMPEG2Main: available_profiles.mpeg2_main = 0; // fall through case VAProfileMPEG2Simple: available_profiles.mpeg2_simple = 0; break; case VAProfileH264High: available_profiles.h264_high = 1; // fall through case VAProfileH264Main: available_profiles.h264_main = 1; // fall through case VAProfileH264Baseline: available_profiles.h264_baseline = 1; // fall though case VAProfileH264ConstrainedBaseline: break; case VAProfileVC1Advanced: available_profiles.vc1_advanced = 0; // fall though case VAProfileVC1Main: available_profiles.vc1_main = 0; // fall though case VAProfileVC1Simple: available_profiles.vc1_simple = 0; break; // unhandled profiles case VAProfileH263Baseline: case VAProfileJPEGBaseline: default: // do nothing break; } } // TODO: How to determine max width and height width libva? set_ptr_val(max_width, 2048); set_ptr_val(max_height, 2048); set_ptr_val(max_macroblocks, 16384); switch (profile) { case VDP_DECODER_PROFILE_MPEG2_SIMPLE: set_ptr_val(is_supported, available_profiles.mpeg2_simple); set_ptr_val(max_level, VDP_DECODER_LEVEL_MPEG2_HL); break; case VDP_DECODER_PROFILE_MPEG2_MAIN: set_ptr_val(is_supported, available_profiles.mpeg2_main); set_ptr_val(max_level, VDP_DECODER_LEVEL_MPEG2_HL); break; case VDP_DECODER_PROFILE_H264_CONSTRAINED_BASELINE: set_ptr_val(is_supported, available_profiles.h264_baseline || available_profiles.h264_main); set_ptr_val(max_level, VDP_DECODER_LEVEL_H264_5_1); break; case VDP_DECODER_PROFILE_H264_BASELINE: set_ptr_val(is_supported, available_profiles.h264_baseline); // TODO: Does underlying libva really support 5.1? set_ptr_val(max_level, VDP_DECODER_LEVEL_H264_5_1); break; case VDP_DECODER_PROFILE_H264_MAIN: set_ptr_val(is_supported, available_profiles.h264_main); set_ptr_val(max_level, VDP_DECODER_LEVEL_H264_5_1); break; case VDP_DECODER_PROFILE_H264_HIGH: set_ptr_val(is_supported, available_profiles.h264_high); set_ptr_val(max_level, VDP_DECODER_LEVEL_H264_5_1); break; case VDP_DECODER_PROFILE_VC1_SIMPLE: set_ptr_val(is_supported, available_profiles.vc1_simple); set_ptr_val(max_level, VDP_DECODER_LEVEL_VC1_SIMPLE_MEDIUM); break; case VDP_DECODER_PROFILE_VC1_MAIN: set_ptr_val(is_supported, available_profiles.vc1_main); set_ptr_val(max_level, VDP_DECODER_LEVEL_VC1_MAIN_HIGH); break; case VDP_DECODER_PROFILE_VC1_ADVANCED: set_ptr_val(is_supported, available_profiles.vc1_advanced); set_ptr_val(max_level, VDP_DECODER_LEVEL_VC1_ADVANCED_L4); break; // unsupported case VDP_DECODER_PROFILE_MPEG1: case VDP_DECODER_PROFILE_MPEG4_PART2_SP: case VDP_DECODER_PROFILE_MPEG4_PART2_ASP: case VDP_DECODER_PROFILE_DIVX4_QMOBILE: case VDP_DECODER_PROFILE_DIVX4_MOBILE: case VDP_DECODER_PROFILE_DIVX4_HOME_THEATER: case VDP_DECODER_PROFILE_DIVX4_HD_1080P: case VDP_DECODER_PROFILE_DIVX5_QMOBILE: case VDP_DECODER_PROFILE_DIVX5_MOBILE: case VDP_DECODER_PROFILE_DIVX5_HOME_THEATER: case VDP_DECODER_PROFILE_DIVX5_HD_1080P: default: break; } return VDP_STATUS_OK; } VdpStatus QueryCapabilities(VdpDevice device_id, VdpDecoderProfile profile, VdpBool *is_supported, uint32_t *max_level, uint32_t *max_macroblocks, uint32_t *max_width, uint32_t *max_height) { return check_for_exceptions(QueryCapabilitiesImpl, device_id, profile, is_supported, max_level, max_macroblocks, max_width, max_height); } void h264_translate_pic_param(VAPictureParameterBufferH264 *pic_param, uint32_t width, uint32_t height, const VdpPictureInfoH264 *vdppi, uint32_t level) { pic_param->picture_width_in_mbs_minus1 = (width - 1) / 16; pic_param->picture_height_in_mbs_minus1 = (height - 1) / 16; pic_param->bit_depth_luma_minus8 = 0; // TODO: deal with more than 8 bits pic_param->bit_depth_chroma_minus8 = 0; // same for luma pic_param->num_ref_frames = vdppi->num_ref_frames; #define SEQ_FIELDS(fieldname) pic_param->seq_fields.bits.fieldname #define PIC_FIELDS(fieldname) pic_param->pic_fields.bits.fieldname SEQ_FIELDS(chroma_format_idc) = 1; // TODO: not only YUV420 SEQ_FIELDS(residual_colour_transform_flag) = 0; SEQ_FIELDS(gaps_in_frame_num_value_allowed_flag)= 0; SEQ_FIELDS(frame_mbs_only_flag) = vdppi->frame_mbs_only_flag; SEQ_FIELDS(mb_adaptive_frame_field_flag) = vdppi->mb_adaptive_frame_field_flag; SEQ_FIELDS(direct_8x8_inference_flag) = vdppi->direct_8x8_inference_flag; SEQ_FIELDS(MinLumaBiPredSize8x8) = (level >= 31); SEQ_FIELDS(log2_max_frame_num_minus4) = vdppi->log2_max_frame_num_minus4; SEQ_FIELDS(pic_order_cnt_type) = vdppi->pic_order_cnt_type; SEQ_FIELDS(log2_max_pic_order_cnt_lsb_minus4) = vdppi->log2_max_pic_order_cnt_lsb_minus4; SEQ_FIELDS(delta_pic_order_always_zero_flag) = vdppi->delta_pic_order_always_zero_flag; pic_param->num_slice_groups_minus1 = 0; // TODO: vdppi->slice_count - 1; ??? pic_param->slice_group_map_type = 0; // ??? pic_param->slice_group_change_rate_minus1 = 0; // ??? pic_param->pic_init_qp_minus26 = vdppi->pic_init_qp_minus26; pic_param->pic_init_qs_minus26 = 0; // ??? pic_param->chroma_qp_index_offset = vdppi->chroma_qp_index_offset; pic_param->second_chroma_qp_index_offset = vdppi->second_chroma_qp_index_offset; PIC_FIELDS(entropy_coding_mode_flag) = vdppi->entropy_coding_mode_flag; PIC_FIELDS(weighted_pred_flag) = vdppi->weighted_pred_flag; PIC_FIELDS(weighted_bipred_idc) = vdppi->weighted_bipred_idc; PIC_FIELDS(transform_8x8_mode_flag) = vdppi->transform_8x8_mode_flag; PIC_FIELDS(field_pic_flag) = vdppi->field_pic_flag; PIC_FIELDS(constrained_intra_pred_flag) = vdppi->constrained_intra_pred_flag; PIC_FIELDS(pic_order_present_flag) = vdppi->pic_order_present_flag; PIC_FIELDS(deblocking_filter_control_present_flag) = vdppi->deblocking_filter_control_present_flag; PIC_FIELDS(redundant_pic_cnt_present_flag) = vdppi->redundant_pic_cnt_present_flag; PIC_FIELDS(reference_pic_flag) = vdppi->is_reference; pic_param->frame_num = vdppi->frame_num; #undef SEQ_FIELDS #undef PIC_FIELDS } void h264_translate_iq_matrix(VAIQMatrixBufferH264 *iq_matrix, const VdpPictureInfoH264 *vdppi) { for (int j = 0; j < 6; j ++) for (int k = 0; k < 16; k ++) iq_matrix->ScalingList4x4[j][k] = vdppi->scaling_lists_4x4[j][k]; for (int j = 0; j < 2; j ++) for (int k = 0; k < 64; k ++) iq_matrix->ScalingList8x8[j][k] = vdppi->scaling_lists_8x8[j][k]; } VdpStatus Render_h264(shared_ptr decoder, shared_ptr dst_surf, VdpPictureInfo const *picture_info, uint32_t bitstream_buffer_count, VdpBitstreamBuffer const *bitstream_buffers) { VADisplay va_dpy = decoder->device->va_dpy; VAStatus status; const auto *vdppi = static_cast(picture_info); // TODO: figure out where to get level const uint32_t level = 41; // preparing picture parameters and IQ matrix VAPictureParameterBufferH264 pic_param = {}; VAIQMatrixBufferH264 iq_matrix; const auto vs = h264_translate_reference_frames(dst_surf, decoder, &pic_param, vdppi); if (vs != VDP_STATUS_OK) { if (vs == VDP_STATUS_RESOURCES) { traceError("Decoder::Render_h264(): no surfaces left in buffer\n"); return VDP_STATUS_RESOURCES; } return VDP_STATUS_ERROR; } h264_translate_pic_param(&pic_param, decoder->width, decoder->height, vdppi, level); h264_translate_iq_matrix(&iq_matrix, vdppi); { GLXLockGuard guard; VABufferID pic_param_buf, iq_matrix_buf; status = vaCreateBuffer(va_dpy, decoder->context_id, VAPictureParameterBufferType, sizeof(VAPictureParameterBufferH264), 1, &pic_param, &pic_param_buf); if (status != VA_STATUS_SUCCESS) return VDP_STATUS_ERROR; status = vaCreateBuffer(va_dpy, decoder->context_id, VAIQMatrixBufferType, sizeof(VAIQMatrixBufferH264), 1, &iq_matrix, &iq_matrix_buf); if (status != VA_STATUS_SUCCESS) return VDP_STATUS_ERROR; // send data to decoding hardware status = vaBeginPicture(va_dpy, decoder->context_id, dst_surf->va_surf); if (status != VA_STATUS_SUCCESS) return VDP_STATUS_ERROR; status = vaRenderPicture(va_dpy, decoder->context_id, &pic_param_buf, 1); if (status != VA_STATUS_SUCCESS) return VDP_STATUS_ERROR; status = vaRenderPicture(va_dpy, decoder->context_id, &iq_matrix_buf, 1); if (status != VA_STATUS_SUCCESS) return VDP_STATUS_ERROR; vaDestroyBuffer(va_dpy, pic_param_buf); vaDestroyBuffer(va_dpy, iq_matrix_buf); } // merge bitstream buffers vector merged_bitstream; for (uint32_t k = 0; k < bitstream_buffer_count; k ++) { const auto *buf = static_cast(bitstream_buffers[k].bitstream); const auto buf_len = bitstream_buffers[k].bitstream_bytes; merged_bitstream.insert(merged_bitstream.end(), buf, buf + buf_len); } // Slice parameters // All slice data have been merged into one continuous buffer. But we must supply // slices one by one to the hardware decoder, so we need to delimit them. VDPAU // requires bitstream buffers to include slice start code (0x00 0x00 0x01). Those // will be used to calculate offsets and sizes of slice data in code below. RBSPState st_g{merged_bitstream}; // reference, global state int64_t nal_offset; try { nal_offset = st_g.navigate_to_nal_unit(); } catch (const RBSPState::error &) { traceError("Decoder::Render_h264(): no NAL header\n"); return VDP_STATUS_ERROR; } do { VASliceParameterBufferH264 sp_h264 = {}; // make a copy of global rbsp state for using in slice header parser RBSPState st{st_g}; int64_t nal_offset_next; st.reset_bit_counter(); try { nal_offset_next = st_g.navigate_to_nal_unit(); } catch (const RBSPState::error &) { nal_offset_next = -1; } // calculate end of current slice. Note (-3). It's slice start code length. const unsigned int end_pos = (nal_offset_next > 0) ? (nal_offset_next - 3) : merged_bitstream.size(); sp_h264.slice_data_size = end_pos - nal_offset; sp_h264.slice_data_offset = 0; sp_h264.slice_data_flag = VA_SLICE_DATA_FLAG_ALL; // TODO: this may be not entirely true for YUV444 // but if we limiting to YUV420, that's ok int ChromaArrayType = pic_param.seq_fields.bits.chroma_format_idc; // parse slice header and use its data to fill slice parameter buffer parse_slice_header(st, &pic_param, ChromaArrayType, vdppi->num_ref_idx_l0_active_minus1, vdppi->num_ref_idx_l1_active_minus1, &sp_h264); VABufferID slice_parameters_buf; GLXLockGuard guard; status = vaCreateBuffer(va_dpy, decoder->context_id, VASliceParameterBufferType, sizeof(VASliceParameterBufferH264), 1, &sp_h264, &slice_parameters_buf); if (status != VA_STATUS_SUCCESS) return VDP_STATUS_ERROR; status = vaRenderPicture(va_dpy, decoder->context_id, &slice_parameters_buf, 1); if (status != VA_STATUS_SUCCESS) return VDP_STATUS_ERROR; VABufferID slice_buf; status = vaCreateBuffer(va_dpy, decoder->context_id, VASliceDataBufferType, sp_h264.slice_data_size, 1, merged_bitstream.data() + nal_offset, &slice_buf); if (status != VA_STATUS_SUCCESS) return VDP_STATUS_ERROR; status = vaRenderPicture(va_dpy, decoder->context_id, &slice_buf, 1); if (status != VA_STATUS_SUCCESS) return VDP_STATUS_ERROR; vaDestroyBuffer(va_dpy, slice_parameters_buf); vaDestroyBuffer(va_dpy, slice_buf); if (nal_offset_next < 0) // nal_offset_next equals -1 when there is no slice break; // start code found. Thus that was the final slice. nal_offset = nal_offset_next; } while (1); { GLXLockGuard guard; status = vaEndPicture(va_dpy, decoder->context_id); if (status != VA_STATUS_SUCCESS) return VDP_STATUS_ERROR; } dst_surf->sync_va_to_glx = true; return VDP_STATUS_OK; } VdpStatus RenderImpl(VdpDecoder decoder_id, VdpVideoSurface target, VdpPictureInfo const *picture_info, uint32_t bitstream_buffer_count, VdpBitstreamBuffer const *bitstream_buffers) { if (not picture_info || not bitstream_buffers) return VDP_STATUS_INVALID_POINTER; ResourceRef decoder{decoder_id}; ResourceRef dst_surf{target}; if (decoder->profile == VDP_DECODER_PROFILE_H264_CONSTRAINED_BASELINE || decoder->profile == VDP_DECODER_PROFILE_H264_BASELINE || decoder->profile == VDP_DECODER_PROFILE_H264_MAIN || decoder->profile == VDP_DECODER_PROFILE_H264_HIGH) { // TODO: check exit code Render_h264(decoder, dst_surf, picture_info, bitstream_buffer_count, bitstream_buffers); } else { traceError("Decoder::RenderImpl(): no implementation for profile %s\n", reverse_decoder_profile(decoder->profile)); return VDP_STATUS_NO_IMPLEMENTATION; } return VDP_STATUS_OK; } VdpStatus Render(VdpDecoder decoder_id, VdpVideoSurface target, VdpPictureInfo const *picture_info, uint32_t bitstream_buffer_count, VdpBitstreamBuffer const *bitstream_buffers) { return check_for_exceptions(RenderImpl, decoder_id, target, picture_info, bitstream_buffer_count, bitstream_buffers); } } } // namespace vdp::Decoder libvdpau-va-gl-0.4.2/src/api-decoder.hh000066400000000000000000000043101277566164500176340ustar00rootroot00000000000000/* * Copyright 2013-2016 Rinat Ibragimov * * This file is part of libvdpau-va-gl * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #pragma once #include "api.hh" #include #include #include #include #include namespace vdp { namespace Decoder { struct Resource: public vdp::GenericResource { Resource(std::shared_ptr a_device, VdpDecoderProfile a_profile, uint32_t a_width, uint32_t a_height, uint32_t n_max_references); ~Resource(); VdpDecoderProfile profile; ///< decoder profile uint32_t width; uint32_t height; uint32_t max_references; ///< maximum count of reference frames VAConfigID config_id; ///< VA-API config id VAContextID context_id; ///< VA-API context id std::vector render_targets; ///< spare VA surfaces std::vector free_list; }; VdpDecoderQueryCapabilities QueryCapabilities; VdpDecoderCreate Create; VdpDecoderDestroy Destroy; VdpDecoderGetParameters GetParameters; VdpDecoderRender Render; } } // namespace vdp::Decoder libvdpau-va-gl-0.4.2/src/api-device.cc000066400000000000000000000467541277566164500174760ustar00rootroot00000000000000/* * Copyright 2013-2016 Rinat Ibragimov * * This file is part of libvdpau-va-gl * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #define GL_GLEXT_PROTOTYPES #include "api-bitmap-surface.hh" #include "api-csc-matrix.hh" #include "api-decoder.hh" #include "api-device.hh" #include "api-output-surface.hh" #include "api-presentation-queue.hh" #include "api-video-mixer.hh" #include "api-video-surface.hh" #include "globals.hh" #include "glx-context.hh" #include "handle-storage.hh" #include "reverse-constant.hh" #include "trace.hh" #include "watermark.hh" #include #include #include #include #include #include #include #include using std::map; using std::mutex; using std::pair; using std::string; namespace vdp { class shader_compilation_failed: public std::exception { }; const string kImplemetationDescriptionString{"OpenGL/VAAPI backend for VDPAU"}; } // namespace vdp namespace vdp { VdpStatus GetApiVersion(uint32_t *api_version) { if (api_version) *api_version = VDPAU_VERSION; return VDP_STATUS_OK; } const char * GetErrorString(VdpStatus status) { return reverse_status(status); } VdpStatus GetInformationString(char const **information_string) { if (information_string) *information_string = kImplemetationDescriptionString.c_str(); return VDP_STATUS_OK; } VdpStatus GetProcAddress(VdpDevice, VdpFuncId function_id, void **function_pointer) { if (!function_pointer) return VDP_STATUS_INVALID_POINTER; switch (function_id) { case VDP_FUNC_ID_GET_ERROR_STRING: *function_pointer = reinterpret_cast(&vdp::GetErrorString); break; case VDP_FUNC_ID_GET_PROC_ADDRESS: *function_pointer = reinterpret_cast(&vdp::GetProcAddress); break; case VDP_FUNC_ID_GET_API_VERSION: *function_pointer = reinterpret_cast(&vdp::GetApiVersion); break; case VDP_FUNC_ID_GET_INFORMATION_STRING: *function_pointer = reinterpret_cast(&vdp::GetInformationString); break; case VDP_FUNC_ID_DEVICE_DESTROY: *function_pointer = reinterpret_cast(&vdp::Device::Destroy); break; case VDP_FUNC_ID_GENERATE_CSC_MATRIX: *function_pointer = reinterpret_cast(&vdp::GenerateCSCMatrix); break; case VDP_FUNC_ID_VIDEO_SURFACE_QUERY_CAPABILITIES: *function_pointer = reinterpret_cast(&vdp::VideoSurface::QueryCapabilities); break; case VDP_FUNC_ID_VIDEO_SURFACE_QUERY_GET_PUT_BITS_Y_CB_CR_CAPABILITIES: *function_pointer = reinterpret_cast( &vdp::VideoSurface::QueryGetPutBitsYCbCrCapabilities); break; case VDP_FUNC_ID_VIDEO_SURFACE_CREATE: *function_pointer = reinterpret_cast(&vdp::VideoSurface::Create); break; case VDP_FUNC_ID_VIDEO_SURFACE_DESTROY: *function_pointer = reinterpret_cast(&vdp::VideoSurface::Destroy); break; case VDP_FUNC_ID_VIDEO_SURFACE_GET_PARAMETERS: *function_pointer = reinterpret_cast(&vdp::VideoSurface::GetParameters); break; case VDP_FUNC_ID_VIDEO_SURFACE_GET_BITS_Y_CB_CR: *function_pointer = reinterpret_cast(&vdp::VideoSurface::GetBitsYCbCr); break; case VDP_FUNC_ID_VIDEO_SURFACE_PUT_BITS_Y_CB_CR: *function_pointer = reinterpret_cast(&vdp::VideoSurface::PutBitsYCbCr); break; case VDP_FUNC_ID_OUTPUT_SURFACE_QUERY_CAPABILITIES: *function_pointer = reinterpret_cast(&vdp::OutputSurface::QueryCapabilities); break; case VDP_FUNC_ID_OUTPUT_SURFACE_QUERY_GET_PUT_BITS_NATIVE_CAPABILITIES: *function_pointer = reinterpret_cast(& vdp::OutputSurface::QueryGetPutBitsNativeCapabilities); break; case VDP_FUNC_ID_OUTPUT_SURFACE_QUERY_PUT_BITS_INDEXED_CAPABILITIES: *function_pointer = reinterpret_cast( &vdp::OutputSurface::QueryPutBitsIndexedCapabilities); break; case VDP_FUNC_ID_OUTPUT_SURFACE_QUERY_PUT_BITS_Y_CB_CR_CAPABILITIES: *function_pointer = reinterpret_cast( &vdp::OutputSurface::QueryPutBitsYCbCrCapabilities); break; case VDP_FUNC_ID_OUTPUT_SURFACE_CREATE: *function_pointer = reinterpret_cast(&vdp::OutputSurface::Create); break; case VDP_FUNC_ID_OUTPUT_SURFACE_DESTROY: *function_pointer = reinterpret_cast(&vdp::OutputSurface::Destroy); break; case VDP_FUNC_ID_OUTPUT_SURFACE_GET_PARAMETERS: *function_pointer = reinterpret_cast(&vdp::OutputSurface::GetParameters); break; case VDP_FUNC_ID_OUTPUT_SURFACE_GET_BITS_NATIVE: *function_pointer = reinterpret_cast(&vdp::OutputSurface::GetBitsNative); break; case VDP_FUNC_ID_OUTPUT_SURFACE_PUT_BITS_NATIVE: *function_pointer = reinterpret_cast(&vdp::OutputSurface::PutBitsNative); break; case VDP_FUNC_ID_OUTPUT_SURFACE_PUT_BITS_INDEXED: *function_pointer = reinterpret_cast(&vdp::OutputSurface::PutBitsIndexed); break; case VDP_FUNC_ID_OUTPUT_SURFACE_PUT_BITS_Y_CB_CR: *function_pointer = reinterpret_cast(&vdp::OutputSurface::PutBitsYCbCr); break; case VDP_FUNC_ID_BITMAP_SURFACE_QUERY_CAPABILITIES: *function_pointer = reinterpret_cast(&vdp::BitmapSurface::QueryCapabilities); break; case VDP_FUNC_ID_BITMAP_SURFACE_CREATE: *function_pointer = reinterpret_cast(&vdp::BitmapSurface::Create); break; case VDP_FUNC_ID_BITMAP_SURFACE_DESTROY: *function_pointer = reinterpret_cast(&vdp::BitmapSurface::Destroy); break; case VDP_FUNC_ID_BITMAP_SURFACE_GET_PARAMETERS: *function_pointer = reinterpret_cast(&vdp::BitmapSurface::GetParameters); break; case VDP_FUNC_ID_BITMAP_SURFACE_PUT_BITS_NATIVE: *function_pointer = reinterpret_cast(&vdp::BitmapSurface::PutBitsNative); break; case VDP_FUNC_ID_OUTPUT_SURFACE_RENDER_OUTPUT_SURFACE: *function_pointer = reinterpret_cast(&vdp::OutputSurface::RenderOutputSurface); break; case VDP_FUNC_ID_OUTPUT_SURFACE_RENDER_BITMAP_SURFACE: *function_pointer = reinterpret_cast(&vdp::OutputSurface::RenderBitmapSurface); break; case VDP_FUNC_ID_OUTPUT_SURFACE_RENDER_VIDEO_SURFACE_LUMA: // deprecated by the spec *function_pointer = nullptr; break; case VDP_FUNC_ID_DECODER_QUERY_CAPABILITIES: *function_pointer = reinterpret_cast(&vdp::Decoder::QueryCapabilities); break; case VDP_FUNC_ID_DECODER_CREATE: *function_pointer = reinterpret_cast(&vdp::Decoder::Create); break; case VDP_FUNC_ID_DECODER_DESTROY: *function_pointer = reinterpret_cast(&vdp::Decoder::Destroy); break; case VDP_FUNC_ID_DECODER_GET_PARAMETERS: *function_pointer = reinterpret_cast(&vdp::Decoder::GetParameters); break; case VDP_FUNC_ID_DECODER_RENDER: *function_pointer = reinterpret_cast(&vdp::Decoder::Render); break; case VDP_FUNC_ID_VIDEO_MIXER_QUERY_FEATURE_SUPPORT: *function_pointer = reinterpret_cast(&vdp::VideoMixer::QueryFeatureSupport); break; case VDP_FUNC_ID_VIDEO_MIXER_QUERY_PARAMETER_SUPPORT: *function_pointer = reinterpret_cast(&vdp::VideoMixer::QueryParameterSupport); break; case VDP_FUNC_ID_VIDEO_MIXER_QUERY_ATTRIBUTE_SUPPORT: *function_pointer = reinterpret_cast(&vdp::VideoMixer::QueryAttributeSupport); break; case VDP_FUNC_ID_VIDEO_MIXER_QUERY_PARAMETER_VALUE_RANGE: *function_pointer = reinterpret_cast(&vdp::VideoMixer::QueryParameterValueRange); break; case VDP_FUNC_ID_VIDEO_MIXER_QUERY_ATTRIBUTE_VALUE_RANGE: *function_pointer = reinterpret_cast(&vdp::VideoMixer::QueryAttributeValueRange); break; case VDP_FUNC_ID_VIDEO_MIXER_CREATE: *function_pointer = reinterpret_cast(&vdp::VideoMixer::Create); break; case VDP_FUNC_ID_VIDEO_MIXER_SET_FEATURE_ENABLES: *function_pointer = reinterpret_cast(&vdp::VideoMixer::SetFeatureEnables); break; case VDP_FUNC_ID_VIDEO_MIXER_SET_ATTRIBUTE_VALUES: *function_pointer = reinterpret_cast(&vdp::VideoMixer::SetAttributeValues); break; case VDP_FUNC_ID_VIDEO_MIXER_GET_FEATURE_SUPPORT: *function_pointer = reinterpret_cast(&vdp::VideoMixer::GetFeatureSupport); break; case VDP_FUNC_ID_VIDEO_MIXER_GET_FEATURE_ENABLES: *function_pointer = reinterpret_cast(&vdp::VideoMixer::GetFeatureEnables); break; case VDP_FUNC_ID_VIDEO_MIXER_GET_PARAMETER_VALUES: *function_pointer = reinterpret_cast(&vdp::VideoMixer::GetParameterValues); break; case VDP_FUNC_ID_VIDEO_MIXER_GET_ATTRIBUTE_VALUES: *function_pointer = reinterpret_cast(&vdp::VideoMixer::GetAttributeValues); break; case VDP_FUNC_ID_VIDEO_MIXER_DESTROY: *function_pointer = reinterpret_cast(&vdp::VideoMixer::Destroy); break; case VDP_FUNC_ID_VIDEO_MIXER_RENDER: *function_pointer = reinterpret_cast(&vdp::VideoMixer::Render); break; case VDP_FUNC_ID_PRESENTATION_QUEUE_TARGET_DESTROY: *function_pointer = reinterpret_cast(&vdp::PresentationQueue::TargetDestroy); break; case VDP_FUNC_ID_PRESENTATION_QUEUE_CREATE: *function_pointer = reinterpret_cast(&vdp::PresentationQueue::Create); break; case VDP_FUNC_ID_PRESENTATION_QUEUE_DESTROY: *function_pointer = reinterpret_cast(&vdp::PresentationQueue::Destroy); break; case VDP_FUNC_ID_PRESENTATION_QUEUE_SET_BACKGROUND_COLOR: *function_pointer = reinterpret_cast(&vdp::PresentationQueue::SetBackgroundColor); break; case VDP_FUNC_ID_PRESENTATION_QUEUE_GET_BACKGROUND_COLOR: *function_pointer = reinterpret_cast(&vdp::PresentationQueue::GetBackgroundColor); break; case VDP_FUNC_ID_PRESENTATION_QUEUE_GET_TIME: *function_pointer = reinterpret_cast(&vdp::PresentationQueue::GetTime); break; case VDP_FUNC_ID_PRESENTATION_QUEUE_DISPLAY: *function_pointer = reinterpret_cast(&vdp::PresentationQueue::Display); break; case VDP_FUNC_ID_PRESENTATION_QUEUE_BLOCK_UNTIL_SURFACE_IDLE: *function_pointer = reinterpret_cast( &vdp::PresentationQueue::BlockUntilSurfaceIdle); break; case VDP_FUNC_ID_PRESENTATION_QUEUE_QUERY_SURFACE_STATUS: *function_pointer = reinterpret_cast(&vdp::PresentationQueue::QuerySurfaceStatus); break; case VDP_FUNC_ID_PREEMPTION_CALLBACK_REGISTER: *function_pointer = reinterpret_cast(&vdp::PreemptionCallbackRegister); break; case VDP_FUNC_ID_BASE_WINSYS: *function_pointer = reinterpret_cast(&vdp::PresentationQueue::TargetCreateX11); break; default: *function_pointer = nullptr; break; } // switch if (*function_pointer == nullptr) return VDP_STATUS_INVALID_FUNC_ID; return VDP_STATUS_OK; } VdpStatus PreemptionCallbackRegister(VdpDevice device, VdpPreemptionCallback callback, void *context) { std::ignore = device; std::ignore = callback; std::ignore = context; return VDP_STATUS_OK; } namespace Device { Resource::Resource(Display *a_display, int a_screen) : dpy{not not global.quirks.buggy_XCloseDisplay} , screen{a_screen} , glc{dpy.get(), screen} { { GLXLockGuard glx_lock_guard; root = DefaultRootWindow(dpy.get()); XWindowAttributes wnd_attrs; XGetWindowAttributes(dpy.get(), root, &wnd_attrs); color_depth = wnd_attrs.depth; fn.glXBindTexImageEXT = (PFNGLXBINDTEXIMAGEEXTPROC)glXGetProcAddress((GLubyte *)"glXBindTexImageEXT"); fn.glXReleaseTexImageEXT = (PFNGLXRELEASETEXIMAGEEXTPROC)glXGetProcAddress((GLubyte *)"glXReleaseTexImageEXT"); } if (!fn.glXBindTexImageEXT || !fn.glXReleaseTexImageEXT) { traceError("error (%s): can't get glXBindTexImageEXT address\n"); throw std::bad_alloc(); } GLXThreadLocalContext glc_guard{root}; glClearColor(0.0f, 0.0f, 0.0f, 0.0f); glMatrixMode(GL_PROJECTION); glLoadIdentity(); glMatrixMode(GL_MODELVIEW); glLoadIdentity(); // initialize VAAPI va_available = 0; if (global.quirks.avoid_va) { // pretend there is no VA-API available } else { va_dpy = vaGetDisplay(dpy.get()); VAStatus status = vaInitialize(va_dpy, &va_version_major, &va_version_minor); if (status == VA_STATUS_SUCCESS) va_available = 1; } compile_shaders(); glGenTextures(1, &watermark_tex_id); glBindTexture(GL_TEXTURE_2D, watermark_tex_id); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, watermark_width, watermark_height, 0, GL_BGRA, GL_UNSIGNED_BYTE, watermark_data); glFinish(); const auto gl_error = glGetError(); if (gl_error != GL_NO_ERROR) { traceError("Device::Resource::Resource(): gl error %d\n", gl_error); throw vdp::generic_error(); } } template void destroy_orphaned_resources(VdpDevice device_id) { for (const auto res_id: ResourceStorage::instance().enumerate()) { try { ResourceRef res{res_id}; if (res->device->id == device_id) ResourceStorage::instance().drop(res_id); } catch (const vdp::resource_not_found &) { // ignore missing resources } } } Resource::~Resource() { try { // cleaup libva if (va_available) vaTerminate(va_dpy); { GLXThreadLocalContext guard{root}; glDeleteTextures(1, &watermark_tex_id); glBindFramebuffer(GL_FRAMEBUFFER, 0); destroy_shaders(); } { GLXLockGuard guard; glXMakeCurrent(dpy.get(), None, nullptr); } const auto gl_error = glGetError(); if (gl_error != GL_NO_ERROR) traceError("Device::Resource::~Resource(): gl error %d\n", gl_error); } catch (...) { traceError("Device::Resource::~Resource(): caught exception\n"); } } void Resource::compile_shaders() { for (int k = 0; k < SHADER_COUNT; k ++) { struct shader_s *s = &glsl_shaders[k]; int ok; const GLuint f_shader = glCreateShader(GL_FRAGMENT_SHADER); glShaderSource(f_shader, 1, &s->body, &s->len); glCompileShader(f_shader); glGetShaderiv(f_shader, GL_COMPILE_STATUS, &ok); if (!ok) { GLint errmsg_len; glGetShaderiv(f_shader, GL_INFO_LOG_LENGTH, &errmsg_len); std::vector errmsg(errmsg_len); glGetShaderInfoLog(f_shader, errmsg.size(), nullptr, errmsg.data()); traceError("Device::Resource::compile_shaders(): compilation of shader #%d failed with " "'%s'\n", k, errmsg.data()); glDeleteShader(f_shader); throw shader_compilation_failed(); } const GLuint program = glCreateProgram(); glAttachShader(program, f_shader); glLinkProgram(program); glGetProgramiv(program, GL_LINK_STATUS, &ok); if (!ok) { GLint errmsg_len; glGetProgramiv(program, GL_INFO_LOG_LENGTH, &errmsg_len); std::vector errmsg(errmsg_len); glGetProgramInfoLog(program, errmsg.size(), nullptr, errmsg.data()); traceError("Device::Resource::compile_shaders(): linking of shader #%d failed with " "'%s'\n", k, errmsg.data()); glDeleteProgram(program); glDeleteShader(f_shader); throw shader_compilation_failed(); } shaders[k].f_shader = f_shader; shaders[k].program = program; switch (k) { case glsl_YV12_RGBA: case glsl_NV12_RGBA: shaders[k].uniform.tex_0 = glGetUniformLocation(program, "tex[0]"); shaders[k].uniform.tex_1 = glGetUniformLocation(program, "tex[1]"); break; case glsl_red_to_alpha_swizzle: shaders[k].uniform.tex_0 = glGetUniformLocation(program, "tex_0"); break; } } } void Resource::destroy_shaders() { for (int k = 0; k < SHADER_COUNT; k ++) { glDeleteProgram(shaders[k].program); glDeleteShader(shaders[k].f_shader); } } VdpStatus CreateX11Impl(Display *display_orig, int screen, VdpDevice *device, VdpGetProcAddress **get_proc_address) { if (!display_orig || !device) return VDP_STATUS_INVALID_POINTER; auto data = std::make_shared(display_orig, screen); *device = vdp::ResourceStorage::instance().insert(data); if (get_proc_address) *get_proc_address = &vdp::GetProcAddress; return VDP_STATUS_OK; } VdpStatus CreateX11(Display *display_orig, int screen, VdpDevice *device, VdpGetProcAddress **get_proc_address) { return check_for_exceptions(CreateX11Impl, display_orig, screen, device, get_proc_address); } VdpStatus DestroyImpl(VdpDevice device_id) { ResourceRef device{device_id}; ResourceStorage::instance().drop(device_id); destroy_orphaned_resources(device_id); destroy_orphaned_resources(device_id); destroy_orphaned_resources(device_id); destroy_orphaned_resources(device_id); destroy_orphaned_resources(device_id); destroy_orphaned_resources(device_id); destroy_orphaned_resources(device_id); return VDP_STATUS_OK; } VdpStatus Destroy(VdpDevice device_id) { return check_for_exceptions(DestroyImpl, device_id); } } } // namespace vdp::Device libvdpau-va-gl-0.4.2/src/api-device.hh000066400000000000000000000057141277566164500174770ustar00rootroot00000000000000/* * Copyright 2013-2016 Rinat Ibragimov * * This file is part of libvdpau-va-gl * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #pragma once #include "api.hh" #include "glx-context.hh" #include "shaders.h" #include "x-display-ref.hh" #include #include #include #include #include namespace vdp { namespace Device { struct Resource: public vdp::GenericResource { Resource(Display *a_dpy, int a_screen); ~Resource(); vdp::XDisplayRef dpy; ///< own X display connection int screen; ///< X screen int color_depth; ///< screen color depth GLXGlobalContext glc; ///< master GL context Window root; ///< X drawable (root window) used for offscreen drawing VADisplay va_dpy; ///< VA display int va_available; ///< 1 if VA-API available int va_version_major; int va_version_minor; GLuint watermark_tex_id; ///< GL texture id for watermark struct { GLuint f_shader; GLuint program; struct { int tex_0; int tex_1; } uniform; } shaders[SHADER_COUNT]; struct { PFNGLXBINDTEXIMAGEEXTPROC glXBindTexImageEXT; PFNGLXRELEASETEXIMAGEEXTPROC glXReleaseTexImageEXT; } fn; private: void compile_shaders(); void destroy_shaders(); }; VdpStatus CreateX11(Display *display, int screen, VdpDevice *device, VdpGetProcAddress **get_proc_address); VdpDeviceDestroy Destroy; } // namespace Device VdpGetApiVersion GetApiVersion; VdpGetErrorString GetErrorString; VdpGetInformationString GetInformationString; VdpGetProcAddress GetProcAddress; VdpPreemptionCallbackRegister PreemptionCallbackRegister; } // namespace vdp libvdpau-va-gl-0.4.2/src/api-output-surface.cc000066400000000000000000000743661277566164500212250ustar00rootroot00000000000000/* * Copyright 2013-2016 Rinat Ibragimov * * This file is part of libvdpau-va-gl * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #define GL_GLEXT_PROTOTYPES #include "api-bitmap-surface.hh" #include "api-device.hh" #include "api-output-surface.hh" #include "glx-context.hh" #include "handle-storage.hh" #include "reverse-constant.hh" #include "trace.hh" #include #include #include #include using std::shared_ptr; using std::make_shared; namespace vdp { namespace OutputSurface { struct blend_state_struct { GLuint srcFuncRGB; GLuint srcFuncAlpha; GLuint dstFuncRGB; GLuint dstFuncAlpha; GLuint modeRGB; GLuint modeAlpha; int invalid_func; int invalid_eq; }; static GLuint vdpBlendFuncToGLBlendFunc(VdpOutputSurfaceRenderBlendFactor blend_factor) { switch (blend_factor) { case VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ZERO: return GL_ZERO; case VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ONE: return GL_ONE; case VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_SRC_COLOR: return GL_SRC_COLOR; case VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ONE_MINUS_SRC_COLOR: return GL_ONE_MINUS_SRC_COLOR; case VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_SRC_ALPHA: return GL_SRC_ALPHA; case VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA: return GL_ONE_MINUS_SRC_ALPHA; case VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_DST_ALPHA: return GL_DST_ALPHA; case VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ONE_MINUS_DST_ALPHA: return GL_ONE_MINUS_DST_ALPHA; case VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_DST_COLOR: return GL_DST_COLOR; case VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ONE_MINUS_DST_COLOR: return GL_ONE_MINUS_DST_COLOR; case VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_SRC_ALPHA_SATURATE: return GL_SRC_ALPHA_SATURATE; case VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_CONSTANT_COLOR: return GL_CONSTANT_COLOR; case VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR: return GL_ONE_MINUS_CONSTANT_COLOR; case VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_CONSTANT_ALPHA: return GL_CONSTANT_ALPHA; case VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA: return GL_ONE_MINUS_CONSTANT_ALPHA; default: return GL_INVALID_VALUE; } } static GLenum vdpBlendEquationToGLEquation(VdpOutputSurfaceRenderBlendEquation blend_equation) { switch (blend_equation) { case VDP_OUTPUT_SURFACE_RENDER_BLEND_EQUATION_SUBTRACT: return GL_FUNC_SUBTRACT; case VDP_OUTPUT_SURFACE_RENDER_BLEND_EQUATION_REVERSE_SUBTRACT: return GL_FUNC_REVERSE_SUBTRACT; case VDP_OUTPUT_SURFACE_RENDER_BLEND_EQUATION_ADD: return GL_FUNC_ADD; case VDP_OUTPUT_SURFACE_RENDER_BLEND_EQUATION_MIN: return GL_MIN; case VDP_OUTPUT_SURFACE_RENDER_BLEND_EQUATION_MAX: return GL_MAX; default: return GL_INVALID_VALUE; } } static void compose_surfaces(struct blend_state_struct bs, VdpRect srcRect, VdpRect dstRect, VdpColor const *colors, int flags, bool has_src_surf) { glBlendFuncSeparate(bs.srcFuncRGB, bs.dstFuncRGB, bs.srcFuncAlpha, bs.dstFuncAlpha); glBlendEquationSeparate(bs.modeRGB, bs.modeAlpha); glColor4f(1, 1, 1, 1); glBegin(GL_QUADS); if (has_src_surf) { switch (flags & 3) { case VDP_OUTPUT_SURFACE_RENDER_ROTATE_0: glTexCoord2i(srcRect.x0, srcRect.y0); break; case VDP_OUTPUT_SURFACE_RENDER_ROTATE_90: glTexCoord2i(srcRect.x0, srcRect.y1); break; case VDP_OUTPUT_SURFACE_RENDER_ROTATE_180: glTexCoord2i(srcRect.x1, srcRect.y1); break; case VDP_OUTPUT_SURFACE_RENDER_ROTATE_270: glTexCoord2i(srcRect.x1, srcRect.y0); break; } } if (colors) glColor4f(colors[0].red, colors[0].green, colors[0].blue, colors[0].alpha); glVertex2f(dstRect.x0, dstRect.y0); if (has_src_surf) { switch (flags & 3) { case VDP_OUTPUT_SURFACE_RENDER_ROTATE_0: glTexCoord2i(srcRect.x1, srcRect.y0); break; case VDP_OUTPUT_SURFACE_RENDER_ROTATE_90: glTexCoord2i(srcRect.x0, srcRect.y0); break; case VDP_OUTPUT_SURFACE_RENDER_ROTATE_180: glTexCoord2i(srcRect.x0, srcRect.y1); break; case VDP_OUTPUT_SURFACE_RENDER_ROTATE_270: glTexCoord2i(srcRect.x1, srcRect.y1); break; } } if (colors && (flags & VDP_OUTPUT_SURFACE_RENDER_COLOR_PER_VERTEX)) glColor4f(colors[1].red, colors[1].green, colors[1].blue, colors[1].alpha); glVertex2f(dstRect.x1, dstRect.y0); if (has_src_surf) { switch (flags & 3) { case VDP_OUTPUT_SURFACE_RENDER_ROTATE_0: glTexCoord2i(srcRect.x1, srcRect.y1); break; case VDP_OUTPUT_SURFACE_RENDER_ROTATE_90: glTexCoord2i(srcRect.x1, srcRect.y0); break; case VDP_OUTPUT_SURFACE_RENDER_ROTATE_180: glTexCoord2i(srcRect.x0, srcRect.y0); break; case VDP_OUTPUT_SURFACE_RENDER_ROTATE_270: glTexCoord2i(srcRect.x0, srcRect.y1); break; } } if (colors && (flags & VDP_OUTPUT_SURFACE_RENDER_COLOR_PER_VERTEX)) glColor4f(colors[2].red, colors[2].green, colors[2].blue, colors[2].alpha); glVertex2f(dstRect.x1, dstRect.y1); if (has_src_surf) { switch (flags & 3) { case VDP_OUTPUT_SURFACE_RENDER_ROTATE_0: glTexCoord2i(srcRect.x0, srcRect.y1); break; case VDP_OUTPUT_SURFACE_RENDER_ROTATE_90: glTexCoord2i(srcRect.x1, srcRect.y1); break; case VDP_OUTPUT_SURFACE_RENDER_ROTATE_180: glTexCoord2i(srcRect.x1, srcRect.y0); break; case VDP_OUTPUT_SURFACE_RENDER_ROTATE_270: glTexCoord2i(srcRect.x0, srcRect.y0); break; } } if (colors && (flags & VDP_OUTPUT_SURFACE_RENDER_COLOR_PER_VERTEX)) glColor4f(colors[3].red, colors[3].green, colors[3].blue, colors[3].alpha); glVertex2f(dstRect.x0, dstRect.y1); glEnd(); glColor4f(1, 1, 1, 1); } static struct blend_state_struct vdpBlendStateToGLBlendState(VdpOutputSurfaceRenderBlendState const *blend_state) { struct blend_state_struct bs; bs.invalid_func = 0; bs.invalid_eq = 0; // it's ok to pass NULL as blend_state if (blend_state) { bs.srcFuncRGB = vdpBlendFuncToGLBlendFunc(blend_state->blend_factor_source_color); bs.srcFuncAlpha = vdpBlendFuncToGLBlendFunc(blend_state->blend_factor_source_alpha); bs.dstFuncRGB = vdpBlendFuncToGLBlendFunc(blend_state->blend_factor_destination_color); bs.dstFuncAlpha = vdpBlendFuncToGLBlendFunc(blend_state->blend_factor_destination_alpha); } else { bs.srcFuncRGB = bs.srcFuncAlpha = GL_ONE; bs.dstFuncRGB = bs.dstFuncAlpha = GL_ZERO; } if (bs.srcFuncRGB == GL_INVALID_VALUE || bs.srcFuncAlpha == GL_INVALID_VALUE || bs.dstFuncRGB == GL_INVALID_VALUE || bs.dstFuncAlpha == GL_INVALID_VALUE) { bs.invalid_func = 1; } if (blend_state) { bs.modeRGB = vdpBlendEquationToGLEquation(blend_state->blend_equation_color); bs.modeAlpha = vdpBlendEquationToGLEquation(blend_state->blend_equation_alpha); } else { bs.modeRGB = bs.modeAlpha = GL_FUNC_ADD; } if (bs.modeRGB == GL_INVALID_VALUE || bs.modeAlpha == GL_INVALID_VALUE) bs.invalid_eq = 1; return bs; } Resource::Resource(shared_ptr a_device, VdpRGBAFormat a_rgba_format, uint32_t a_width, uint32_t a_height) : rgba_format{a_rgba_format} , width{a_width} , height{a_height} , first_presentation_time{0} , status{VDP_PRESENTATION_QUEUE_STATUS_IDLE} { // TODO: figure out reasonable limits if (width > 4096 || height > 4096) throw vdp::invalid_size(); device = a_device; switch (rgba_format) { case VDP_RGBA_FORMAT_B8G8R8A8: gl_internal_format = GL_RGBA; gl_format = GL_BGRA; gl_type = GL_UNSIGNED_BYTE; bytes_per_pixel = 4; break; case VDP_RGBA_FORMAT_R8G8B8A8: gl_internal_format = GL_RGBA; gl_format = GL_RGBA; gl_type = GL_UNSIGNED_BYTE; bytes_per_pixel = 4; break; case VDP_RGBA_FORMAT_R10G10B10A2: gl_internal_format = GL_RGB10_A2; gl_format = GL_RGBA; gl_type = GL_UNSIGNED_INT_10_10_10_2; bytes_per_pixel = 4; break; case VDP_RGBA_FORMAT_B10G10R10A2: gl_internal_format = GL_RGB10_A2; gl_format = GL_BGRA; gl_type = GL_UNSIGNED_INT_10_10_10_2; bytes_per_pixel = 4; break; case VDP_RGBA_FORMAT_A8: gl_internal_format = GL_RGBA; gl_format = GL_RED; gl_type = GL_UNSIGNED_BYTE; bytes_per_pixel = 1; break; default: traceError("OutputSurface::Resource::Resource(): %s is not implemented\n", reverse_rgba_format(rgba_format)); throw vdp::invalid_rgba_format(); } GLXThreadLocalContext guard{device}; glGenTextures(1, &tex_id); glBindTexture(GL_TEXTURE_2D, tex_id); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); // reserve texture data glTexImage2D(GL_TEXTURE_2D, 0, gl_internal_format, width, height, 0, gl_format, gl_type, nullptr); glGenFramebuffers(1, &fbo_id); glBindFramebuffer(GL_FRAMEBUFFER, fbo_id); glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, tex_id, 0); const auto gl_status = glCheckFramebufferStatus(GL_FRAMEBUFFER); if (gl_status != GL_FRAMEBUFFER_COMPLETE) { traceError("OutputSurface::Resource::Resource(): framebuffer not ready, %d\n", gl_status); throw vdp::generic_error(); } glClearColor(0.0, 0.0, 0.0, 0.0); glClear(GL_COLOR_BUFFER_BIT); glFinish(); const auto gl_error = glGetError(); if (gl_error != GL_NO_ERROR) { traceError("OutputSurface::Resource::Resource(): gl error %d\n", gl_error); throw vdp::generic_error(); } } Resource::~Resource() { try { GLXThreadLocalContext guard{device}; glDeleteTextures(1, &tex_id); glDeleteFramebuffers(1, &fbo_id); const auto gl_error = glGetError(); if (gl_error != GL_NO_ERROR) traceError("OutputSurface::Resource::~Resource(): gl error %d\n", gl_error); } catch (...) { traceError("OutputSurface::Resource::~Resource(): caught exception\n"); } } VdpStatus CreateImpl(VdpDevice device_id, VdpRGBAFormat rgba_format, uint32_t width, uint32_t height, VdpOutputSurface *surface) { if (!surface) return VDP_STATUS_INVALID_POINTER; ResourceRef device{device_id}; auto data = std::make_shared(device, rgba_format, width, height); *surface = ResourceStorage::instance().insert(data); return VDP_STATUS_OK; } VdpStatus Create(VdpDevice device_id, VdpRGBAFormat rgba_format, uint32_t width, uint32_t height, VdpOutputSurface *surface) { return check_for_exceptions(CreateImpl, device_id, rgba_format, width, height, surface); } VdpStatus DestroyImpl(VdpOutputSurface surface_id) { ResourceRef surface{surface_id}; ResourceStorage::instance().drop(surface_id); return VDP_STATUS_OK; } VdpStatus Destroy(VdpOutputSurface surface_id) { return check_for_exceptions(DestroyImpl, surface_id); } VdpStatus GetBitsNativeImpl(VdpOutputSurface surface_id, VdpRect const *source_rect, void *const *destination_data, uint32_t const *destination_pitches) { if (!destination_data || !destination_pitches) return VDP_STATUS_INVALID_POINTER; ResourceRef surface{surface_id}; VdpRect src_rect = {0, 0, surface->width, surface->height}; if (source_rect) src_rect = *source_rect; GLXThreadLocalContext guard{surface->device}; glBindFramebuffer(GL_FRAMEBUFFER, surface->fbo_id); glReadBuffer(GL_COLOR_ATTACHMENT0); glPixelStorei(GL_UNPACK_ROW_LENGTH, destination_pitches[0] / surface->bytes_per_pixel); if (surface->bytes_per_pixel != 4) glPixelStorei(GL_PACK_ALIGNMENT, 1); glReadPixels(src_rect.x0, src_rect.y0, src_rect.x1 - src_rect.x0, src_rect.y1 - src_rect.y0, surface->gl_format, surface->gl_type, destination_data[0]); glPixelStorei(GL_UNPACK_ROW_LENGTH, 0); if (surface->bytes_per_pixel != 4) glPixelStorei(GL_PACK_ALIGNMENT, 4); glFinish(); const auto gl_error = glGetError(); if (gl_error != GL_NO_ERROR) { traceError("OutputSurface::GetBitsNativeImpl(): gl error %d\n", gl_error); return VDP_STATUS_ERROR; } return VDP_STATUS_OK; } VdpStatus GetBitsNative(VdpOutputSurface surface_id, VdpRect const *source_rect, void *const *destination_data, uint32_t const *destination_pitches) { return check_for_exceptions(GetBitsNativeImpl, surface_id, source_rect, destination_data, destination_pitches); } VdpStatus GetParametersImpl(VdpOutputSurface surface_id, VdpRGBAFormat *rgba_format, uint32_t *width, uint32_t *height) { if (!rgba_format || !width || !height) return VDP_STATUS_INVALID_POINTER; ResourceRef surface{surface_id}; *rgba_format = surface->rgba_format; *width = surface->width; *height = surface->height; return VDP_STATUS_OK; } VdpStatus GetParameters(VdpOutputSurface surface_id, VdpRGBAFormat *rgba_format, uint32_t *width, uint32_t *height) { return check_for_exceptions(GetParametersImpl, surface_id, rgba_format, width, height); } VdpStatus PutBitsIndexedImpl(VdpOutputSurface surface_id, VdpIndexedFormat source_indexed_format, void const *const *source_data, uint32_t const *source_pitch, VdpRect const *destination_rect, VdpColorTableFormat color_table_format, void const *color_table) { if (!source_data || !source_pitch || !color_table) return VDP_STATUS_INVALID_POINTER; ResourceRef surface{surface_id}; VdpRect dst_rect = {0, 0, surface->width, surface->height}; if (destination_rect) dst_rect = *destination_rect; // there are no other formats anyway if (color_table_format != VDP_COLOR_TABLE_FORMAT_B8G8R8X8) { // TODO: investigate return VDP_STATUS_INVALID_COLOR_TABLE_FORMAT; } const auto color_table32 = static_cast(color_table); GLXThreadLocalContext guard{surface->device}; switch (source_indexed_format) { case VDP_INDEXED_FORMAT_I8A8: // TODO: use shader? do { const uint32_t dstRectWidth = dst_rect.x1 - dst_rect.x0; const uint32_t dstRectHeight = dst_rect.y1 - dst_rect.y0; std::vector unpacked_buf(dstRectWidth * dstRectHeight); for (uint32_t y = 0; y < dstRectHeight; y ++) { auto src_ptr = static_cast(source_data[0]); src_ptr += y * source_pitch[0]; uint32_t *dst_ptr = unpacked_buf.data() + y * dstRectWidth; for (uint32_t x = 0; x < dstRectWidth; x ++) { const uint8_t i = *src_ptr++; const uint32_t a = (*src_ptr++) << 24; dst_ptr[x] = (color_table32[i] & 0x00ffffff) + a; } } glBindTexture(GL_TEXTURE_2D, surface->tex_id); glTexSubImage2D(GL_TEXTURE_2D, 0, dst_rect.x0, dst_rect.y0, dst_rect.x1 - dst_rect.x0, dst_rect.y1 - dst_rect.y0, GL_BGRA, GL_UNSIGNED_BYTE, unpacked_buf.data()); glFinish(); const auto gl_error = glGetError(); if (gl_error != GL_NO_ERROR) { traceError("OutputSurface::PutBitsIndexedImpl(): gl error %d\n", gl_error); return VDP_STATUS_ERROR; } } while (0); break; default: traceError("OutputSurface::PutBitsIndexedImpl(): unsupported indexed format %s\n", reverse_indexed_format(source_indexed_format)); return VDP_STATUS_INVALID_INDEXED_FORMAT; } return VDP_STATUS_OK; } VdpStatus PutBitsIndexed(VdpOutputSurface surface_id, VdpIndexedFormat source_indexed_format, void const *const *source_data, uint32_t const *source_pitch, VdpRect const *destination_rect, VdpColorTableFormat color_table_format, void const *color_table) { return check_for_exceptions(PutBitsIndexedImpl, surface_id, source_indexed_format, source_data, source_pitch, destination_rect, color_table_format, color_table); } VdpStatus PutBitsNativeImpl(VdpOutputSurface surface_id, void const *const *source_data, uint32_t const *source_pitches, VdpRect const *destination_rect) { if (!source_data || !source_pitches) return VDP_STATUS_INVALID_POINTER; ResourceRef surface{surface_id}; VdpRect dst_rect = {0, 0, surface->width, surface->height}; if (destination_rect) dst_rect = *destination_rect; GLXThreadLocalContext guard{surface->device}; glBindTexture(GL_TEXTURE_2D, surface->tex_id); glPixelStorei(GL_UNPACK_ROW_LENGTH, source_pitches[0] / surface->bytes_per_pixel); if (surface->bytes_per_pixel != 4) glPixelStorei(GL_UNPACK_ALIGNMENT, 1); glTexSubImage2D(GL_TEXTURE_2D, 0, dst_rect.x0, dst_rect.y0, dst_rect.x1 - dst_rect.x0, dst_rect.y1 - dst_rect.y0, surface->gl_format, surface->gl_type, source_data[0]); glPixelStorei(GL_UNPACK_ROW_LENGTH, 0); if (surface->bytes_per_pixel != 4) glPixelStorei(GL_UNPACK_ALIGNMENT, 4); glFinish(); const auto gl_error = glGetError(); if (gl_error != GL_NO_ERROR) { traceError("OutputSurface::PutBitsNativeImpl(): gl error %d\n", gl_error); return VDP_STATUS_ERROR; } return VDP_STATUS_OK; } VdpStatus PutBitsNative(VdpOutputSurface surface_id, void const *const *source_data, uint32_t const *source_pitches, VdpRect const *destination_rect) { return check_for_exceptions(PutBitsNativeImpl, surface_id, source_data, source_pitches, destination_rect); } VdpStatus PutBitsYCbCrImpl(VdpOutputSurface surface, VdpYCbCrFormat source_ycbcr_format, void const *const *source_data, uint32_t const *source_pitches, VdpRect const *destination_rect, VdpCSCMatrix const *csc_matrix) { std::ignore = surface; std::ignore = source_ycbcr_format; std::ignore = source_data; std::ignore = source_pitches; std::ignore = destination_rect; std::ignore = csc_matrix; return VDP_STATUS_NO_IMPLEMENTATION; } VdpStatus PutBitsYCbCr(VdpOutputSurface surface, VdpYCbCrFormat source_ycbcr_format, void const *const *source_data, uint32_t const *source_pitches, VdpRect const *destination_rect, VdpCSCMatrix const *csc_matrix) { return check_for_exceptions(PutBitsYCbCrImpl, surface, source_ycbcr_format, source_data, source_pitches, destination_rect, csc_matrix); } VdpStatus QueryCapabilitiesImpl(VdpDevice device_id, VdpRGBAFormat surface_rgba_format, VdpBool *is_supported, uint32_t *max_width, uint32_t *max_height) { if (!is_supported || !max_width || !max_height) return VDP_STATUS_INVALID_POINTER; ResourceRef device{device_id}; switch (surface_rgba_format) { case VDP_RGBA_FORMAT_B8G8R8A8: case VDP_RGBA_FORMAT_R8G8B8A8: case VDP_RGBA_FORMAT_R10G10B10A2: case VDP_RGBA_FORMAT_B10G10R10A2: case VDP_RGBA_FORMAT_A8: *is_supported = 1; // All these formats should be supported by OpenGL break; // implementation. default: *is_supported = 0; break; } GLXThreadLocalContext guard{device}; GLint max_texture_size; glGetIntegerv(GL_MAX_TEXTURE_SIZE, &max_texture_size); const auto gl_error = glGetError(); if (gl_error != GL_NO_ERROR) { traceError("OutputSurface::QueryCapabilitiesImpl(): gl error %d\n", gl_error); return VDP_STATUS_ERROR; } *max_width = max_texture_size; *max_height = max_texture_size; return VDP_STATUS_OK; } VdpStatus QueryCapabilities(VdpDevice device_id, VdpRGBAFormat surface_rgba_format, VdpBool *is_supported, uint32_t *max_width, uint32_t *max_height) { return check_for_exceptions(QueryCapabilitiesImpl, device_id, surface_rgba_format, is_supported, max_width, max_height); } VdpStatus QueryGetPutBitsNativeCapabilitiesImpl(VdpDevice device, VdpRGBAFormat surface_rgba_format, VdpBool *is_supported) { std::ignore = device; std::ignore = surface_rgba_format; std::ignore = is_supported; return VDP_STATUS_NO_IMPLEMENTATION; } VdpStatus QueryGetPutBitsNativeCapabilities(VdpDevice device, VdpRGBAFormat surface_rgba_format, VdpBool *is_supported) { return check_for_exceptions(QueryGetPutBitsNativeCapabilitiesImpl, device, surface_rgba_format, is_supported); } VdpStatus QueryPutBitsIndexedCapabilitiesImpl(VdpDevice device, VdpRGBAFormat surface_rgba_format, VdpIndexedFormat bits_indexed_format, VdpColorTableFormat color_table_format, VdpBool *is_supported) { std::ignore = device; std::ignore = surface_rgba_format; std::ignore = bits_indexed_format; std::ignore = color_table_format; std::ignore = is_supported; return VDP_STATUS_NO_IMPLEMENTATION; } VdpStatus QueryPutBitsIndexedCapabilities(VdpDevice device, VdpRGBAFormat surface_rgba_format, VdpIndexedFormat bits_indexed_format, VdpColorTableFormat color_table_format, VdpBool *is_supported) { return check_for_exceptions(QueryPutBitsIndexedCapabilitiesImpl, device, surface_rgba_format, bits_indexed_format, color_table_format, is_supported); } VdpStatus QueryPutBitsYCbCrCapabilitiesImpl(VdpDevice device, VdpRGBAFormat surface_rgba_format, VdpYCbCrFormat bits_ycbcr_format, VdpBool *is_supported) { std::ignore = device; std::ignore = surface_rgba_format; std::ignore = bits_ycbcr_format; std::ignore = is_supported; return VDP_STATUS_NO_IMPLEMENTATION; } VdpStatus QueryPutBitsYCbCrCapabilities(VdpDevice device, VdpRGBAFormat surface_rgba_format, VdpYCbCrFormat bits_ycbcr_format, VdpBool *is_supported) { return check_for_exceptions(QueryPutBitsYCbCrCapabilitiesImpl, device, surface_rgba_format, bits_ycbcr_format, is_supported); } VdpStatus RenderBitmapSurfaceImpl(VdpOutputSurface destination_surface, VdpRect const *destination_rect, VdpBitmapSurface source_surface, VdpRect const *source_rect, VdpColor const *colors, VdpOutputSurfaceRenderBlendState const *blend_state, uint32_t flags) { if (blend_state) { if (blend_state->struct_version != VDP_OUTPUT_SURFACE_RENDER_BLEND_STATE_VERSION) return VDP_STATUS_INVALID_VALUE; } ResourceRef dst_surf{destination_surface}; // select blend functions struct blend_state_struct bs = vdpBlendStateToGLBlendState(blend_state); if (bs.invalid_func) return VDP_STATUS_INVALID_BLEND_FACTOR; if (bs.invalid_eq) return VDP_STATUS_INVALID_BLEND_EQUATION; GLXThreadLocalContext guard{dst_surf->device}; glBindFramebuffer(GL_FRAMEBUFFER, dst_surf->fbo_id); glMatrixMode(GL_PROJECTION); glLoadIdentity(); glOrtho(0, dst_surf->width, 0, dst_surf->height, -1.0f, 1.0f); glViewport(0, 0, dst_surf->width, dst_surf->height); glEnable(GL_TEXTURE_2D); glEnable(GL_BLEND); glMatrixMode(GL_MODELVIEW); glLoadIdentity(); VdpRect s_rect = {0, 0, 1, 1}; if (source_surface != VDP_INVALID_HANDLE) { ResourceRef src_surf{source_surface}; if (dst_surf->device->id != src_surf->device->id) return VDP_STATUS_HANDLE_DEVICE_MISMATCH; s_rect.x1 = src_surf->width; s_rect.y1 = src_surf->height; glBindTexture(GL_TEXTURE_2D, src_surf->tex_id); if (src_surf->dirty) { if (src_surf->bytes_per_pixel != 4) glPixelStorei(GL_UNPACK_ALIGNMENT, 1); glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, src_surf->width, src_surf->height, src_surf->gl_format, src_surf->gl_type, src_surf->bitmap_data.data()); if (src_surf->bytes_per_pixel != 4) glPixelStorei(GL_UNPACK_ALIGNMENT, 4); src_surf->dirty = false; } glMatrixMode(GL_TEXTURE); glLoadIdentity(); glScalef(1.0f / src_surf->width, 1.0f / src_surf->height, 1.0f); if (src_surf->rgba_format == VDP_RGBA_FORMAT_A8) { glUseProgram(src_surf->device->shaders[glsl_red_to_alpha_swizzle].program); glUniform1i(src_surf->device->shaders[glsl_red_to_alpha_swizzle].uniform.tex_0, 0); } } VdpRect d_rect = {0, 0, dst_surf->width, dst_surf->height}; if (destination_rect) d_rect = *destination_rect; if (source_rect) s_rect = *source_rect; compose_surfaces(bs, s_rect, d_rect, colors, flags, source_surface != VDP_INVALID_HANDLE); glUseProgram(0); glFinish(); const auto gl_error = glGetError(); if (gl_error != GL_NO_ERROR) { traceError("OutputSurface::RenderBitmapSurfaceImpl(): gl error %d\n", gl_error); return VDP_STATUS_ERROR; } return VDP_STATUS_OK; } VdpStatus RenderBitmapSurface(VdpOutputSurface destination_surface, VdpRect const *destination_rect, VdpBitmapSurface source_surface, VdpRect const *source_rect, VdpColor const *colors, VdpOutputSurfaceRenderBlendState const *blend_state, uint32_t flags) { return check_for_exceptions(RenderBitmapSurfaceImpl, destination_surface, destination_rect, source_surface, source_rect, colors, blend_state, flags); } VdpStatus RenderOutputSurfaceImpl(VdpOutputSurface destination_surface, VdpRect const *destination_rect, VdpOutputSurface source_surface, VdpRect const *source_rect, VdpColor const *colors, VdpOutputSurfaceRenderBlendState const *blend_state, uint32_t flags) { if (blend_state) { if (blend_state->struct_version != VDP_OUTPUT_SURFACE_RENDER_BLEND_STATE_VERSION) return VDP_STATUS_INVALID_VALUE; } ResourceRef dst_surf{destination_surface}; // select blend functions struct blend_state_struct bs = vdpBlendStateToGLBlendState(blend_state); if (bs.invalid_func) return VDP_STATUS_INVALID_BLEND_FACTOR; if (bs.invalid_eq) return VDP_STATUS_INVALID_BLEND_EQUATION; GLXThreadLocalContext guard{dst_surf->device}; glBindFramebuffer(GL_FRAMEBUFFER, dst_surf->fbo_id); glMatrixMode(GL_PROJECTION); glLoadIdentity(); glOrtho(0, dst_surf->width, 0, dst_surf->height, -1.0f, 1.0f); glViewport(0, 0, dst_surf->width, dst_surf->height); glEnable(GL_TEXTURE_2D); glEnable(GL_BLEND); glMatrixMode(GL_MODELVIEW); glLoadIdentity(); VdpRect s_rect = {0, 0, 1, 1}; if (source_surface != VDP_INVALID_HANDLE) { ResourceRef src_surf{source_surface}; if (dst_surf->device->id != src_surf->device->id) return VDP_STATUS_HANDLE_DEVICE_MISMATCH; s_rect.x1 = src_surf->width; s_rect.y1 = src_surf->height; glBindTexture(GL_TEXTURE_2D, src_surf->tex_id); glMatrixMode(GL_TEXTURE); glLoadIdentity(); glScalef(1.0f / src_surf->width, 1.0f / src_surf->height, 1.0f); } VdpRect d_rect = {0, 0, dst_surf->width, dst_surf->height}; if (destination_rect) d_rect = *destination_rect; if (source_rect) s_rect = *source_rect; compose_surfaces(bs, s_rect, d_rect, colors, flags, source_surface != VDP_INVALID_HANDLE); glFinish(); const auto gl_error = glGetError(); if (gl_error != GL_NO_ERROR) { traceError("OutputSurface::RenderOutputSurfaceImpl(): gl error %d\n", gl_error); return VDP_STATUS_ERROR; } return VDP_STATUS_OK; } VdpStatus RenderOutputSurface(VdpOutputSurface destination_surface, VdpRect const *destination_rect, VdpOutputSurface source_surface, VdpRect const *source_rect, VdpColor const *colors, VdpOutputSurfaceRenderBlendState const *blend_state, uint32_t flags) { return check_for_exceptions(RenderOutputSurfaceImpl, destination_surface, destination_rect, source_surface, source_rect, colors, blend_state, flags); } } } // namespace vdp::OutputSurface libvdpau-va-gl-0.4.2/src/api-output-surface.hh000066400000000000000000000057461277566164500212330ustar00rootroot00000000000000/* * Copyright 2013-2016 Rinat Ibragimov * * This file is part of libvdpau-va-gl * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #pragma once #include "api.hh" #include #include #include namespace vdp { namespace OutputSurface { struct Resource: public vdp::GenericResource { Resource(std::shared_ptr a_device, VdpRGBAFormat a_rgba_format, uint32_t a_width, uint32_t a_height); ~Resource(); VdpRGBAFormat rgba_format; ///< RGBA format of data stored GLuint tex_id; ///< associated GL texture id GLuint fbo_id; ///< framebuffer object id uint32_t width; uint32_t height; GLuint gl_internal_format; ///< GL texture format: internal format GLuint gl_format; ///< GL texture format: preferred external format GLuint gl_type; ///< GL texture format: pixel type unsigned int bytes_per_pixel; ///< number of bytes per pixel VdpTime first_presentation_time; ///< first displayed time in queue VdpPresentationQueueStatus status; ///< status in presentation queue }; VdpOutputSurfaceQueryCapabilities QueryCapabilities; VdpOutputSurfaceQueryGetPutBitsNativeCapabilities QueryGetPutBitsNativeCapabilities; VdpOutputSurfaceQueryPutBitsIndexedCapabilities QueryPutBitsIndexedCapabilities; VdpOutputSurfaceQueryPutBitsYCbCrCapabilities QueryPutBitsYCbCrCapabilities; VdpOutputSurfaceCreate Create; VdpOutputSurfaceDestroy Destroy; VdpOutputSurfaceGetParameters GetParameters; VdpOutputSurfaceGetBitsNative GetBitsNative; VdpOutputSurfacePutBitsNative PutBitsNative; VdpOutputSurfacePutBitsIndexed PutBitsIndexed; VdpOutputSurfacePutBitsYCbCr PutBitsYCbCr; VdpOutputSurfaceRenderOutputSurface RenderOutputSurface; VdpOutputSurfaceRenderBitmapSurface RenderBitmapSurface; } } // namespace vdp::OutputSurface libvdpau-va-gl-0.4.2/src/api-presentation-queue.cc000066400000000000000000000476401277566164500220670ustar00rootroot00000000000000/* * Copyright 2013-2016 Rinat Ibragimov * * This file is part of libvdpau-va-gl * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #define GL_GLEXT_PROTOTYPES #include "api-output-surface.hh" #include "api-presentation-queue.hh" #include "globals.hh" #include "glx-context.hh" #include "handle-storage.hh" #include "trace.hh" #include "watermark.hh" #include #include #include #include #include #include #include #include #include #include #include using std::chrono::microseconds; using std::chrono::milliseconds; using std::make_shared; using std::shared_ptr; namespace { struct Task { Task() : when{} , clip_width{0} , clip_height{0} , wipe_tasks{false} , shutdown_thread{false} , pq_id{VDP_INVALID_HANDLE} , surface_id{VDP_INVALID_HANDLE} {} timespec when; uint32_t clip_width; uint32_t clip_height; bool wipe_tasks; bool shutdown_thread; VdpPresentationQueue pq_id; VdpOutputSurface surface_id; bool operator<(const Task &that) const { if (this->when.tv_sec < that.when.tv_sec) return true; if (this->when.tv_sec > that.when.tv_sec) return false; if (this->when.tv_nsec < that.when.tv_nsec) return true; return false; } }; std::queue g_task_queue; std::mutex g_task_queue_mtx; std::condition_variable g_task_queue_cv; } // anonymous namespace namespace vdp { namespace PresentationQueue { void PresentationQueueThreadRef::do_start_thread() { t_ = std::thread([this] () { thread_body(); }); } PresentationQueueThreadRef::PresentationQueueThreadRef() { vdp::GLXLockGuard guard; if (thread_refs_ == 0) do_start_thread(); thread_refs_ += 1; } PresentationQueueThreadRef::~PresentationQueueThreadRef() { try { { vdp::GLXLockGuard guard; if (thread_refs_ > 1) { thread_refs_ -= 1; // there are still other users, do nothing now return; } } // here thread_refs_ equals 1, which means this instance is the only user left. Ask worker // thread to stop. { std::unique_lock lock{g_task_queue_mtx}; Task task; task.shutdown_thread = true; g_task_queue.push(task); g_task_queue_cv.notify_one(); } t_.join(); { vdp::GLXLockGuard guard; thread_refs_ -= 1; if (thread_refs_ > 0) { // there is at least one another instance which increased reference count. But since // we were holding one reference, that instance didn't start the thread. // Start it now. do_start_thread(); } } } catch (...) { traceError("PresentationQueueThreadRef::~PresentationQueueThreadRef(): caught exception\n"); } } std::thread PresentationQueueThreadRef::t_; int PresentationQueueThreadRef::thread_refs_ = 0; VdpTime timespec2vdptime(struct timespec t) { return (uint64_t)t.tv_sec * 1000 * 1000 * 1000 + t.tv_nsec; } struct timespec vdptime2timespec(VdpTime t) { struct timespec res; res.tv_sec = t / (1000*1000*1000); res.tv_nsec = t % (1000*1000*1000); return res; } void do_presentation_queue_display(const Task &task) { try { ResourceRef pq{task.pq_id}; ResourceRef surface{task.surface_id}; const uint32_t clip_width = task.clip_width; const uint32_t clip_height = task.clip_height; vdp::GLXLockGuard guard; pq->target->recreate_pixmaps_if_geometry_changed(); glXMakeCurrent(pq->device->dpy.get(), pq->target->glx_pixmap, pq->target->glc); const uint32_t target_width = (clip_width > 0) ? clip_width : surface->width; const uint32_t target_height = (clip_height > 0) ? clip_height : surface->height; glMatrixMode(GL_PROJECTION); glLoadIdentity(); glOrtho(0, target_width, target_height, 0, -1.0, 1.0); glViewport(0, 0, target_width, target_height); glMatrixMode(GL_MODELVIEW); glLoadIdentity(); glMatrixMode(GL_TEXTURE); glLoadIdentity(); glScalef(1.0f/surface->width, 1.0f/surface->height, 1.0f); glEnable(GL_TEXTURE_2D); glDisable(GL_BLEND); glBindTexture(GL_TEXTURE_2D, surface->tex_id); glColor4f(1, 1, 1, 1); glBegin(GL_QUADS); glTexCoord2i(0, 0); glVertex2i(0, 0); glTexCoord2i(target_width, 0); glVertex2i(target_width, 0); glTexCoord2i(target_width, target_height); glVertex2i(target_width, target_height); glTexCoord2i(0, target_height); glVertex2i(0, target_height); glEnd(); if (global.quirks.show_watermark) { glEnable(GL_BLEND); glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); glBlendEquation(GL_FUNC_ADD); glBindTexture(GL_TEXTURE_2D, pq->device->watermark_tex_id); glMatrixMode(GL_TEXTURE); glLoadIdentity(); glColor4f(1.0, 1.0, 1.0, 0.2); glBegin(GL_QUADS); glTexCoord2i(0, 0); glVertex2i(target_width - watermark_width, target_height - watermark_height); glTexCoord2i(1, 0); glVertex2i(target_width, target_height - watermark_height); glTexCoord2i(1, 1); glVertex2i(target_width, target_height); glTexCoord2i(0, 1); glVertex2i(target_width - watermark_width, target_height); glEnd(); } glFinish(); x11_push_eh(); XCopyArea(pq->device->dpy.get(), pq->target->pixmap, pq->target->drawable, pq->target->plain_copy_gc, 0, 0, target_width, target_height, 0, 0); XSync(pq->device->dpy.get(), False); int x11_err = x11_pop_eh(); if (x11_err != Success) { char buf[200] = { 0 }; XGetErrorText(pq->device->dpy.get(), x11_err, buf, sizeof(buf)); traceError("PresentationQueue::do_presentation_queue_display(): caught X11 error %s\n", buf); } struct timespec now; clock_gettime(CLOCK_REALTIME, &now); surface->first_presentation_time = timespec2vdptime(now); surface->status = VDP_PRESENTATION_QUEUE_STATUS_IDLE; const auto gl_error = glGetError(); if (gl_error != GL_NO_ERROR) { traceError("PresentationQueue::do_presentation_queue_display(): gl error %d\n", gl_error); } } catch (const vdp::resource_not_found &) { // just ignoring return; } } void PresentationQueueThreadRef::thread_body() { std::set int_q; // internal queue of task, always sorted while (true) { int64_t timeout; Task task; if (int_q.size() != 0) { // internal queue have a task struct timespec now; clock_gettime(CLOCK_REALTIME, &now); task = *int_q.begin(); timeout = (task.when.tv_sec - now.tv_sec) * 1000 * 1000 + (task.when.tv_nsec - now.tv_nsec) / 1000; if (timeout <= 0) { // task is ready to go, run it do_presentation_queue_display(task); // remove it from queue int_q.erase(int_q.begin()); continue; } } else { // no tasks in queue, sleep for a while timeout = 1000 * 1000; // one second } { std::unique_lock lock(g_task_queue_mtx); if (g_task_queue.size() == 0) { g_task_queue_cv.wait_for(lock, microseconds(timeout)); continue; } else { task = g_task_queue.front(); g_task_queue.pop(); } } if (task.shutdown_thread) return; if (task.wipe_tasks) { // drop all tasks with the same queue id decltype(int_q) new_q; for (const auto &t: int_q) { if (t.pq_id != task.pq_id) new_q.insert(t); } std::swap(int_q, new_q); continue; } int_q.insert(task); } } VdpStatus BlockUntilSurfaceIdleImpl(VdpPresentationQueue presentation_queue, VdpOutputSurface surface_id, VdpTime *first_presentation_time) { // ensure presentation_queue is valid; { ResourceRef pq{presentation_queue}; } // TODO: use locking instead of busy loop while (true) { ResourceRef surface{surface_id}; if (surface->status == VDP_PRESENTATION_QUEUE_STATUS_IDLE) break; usleep(1000); } if (first_presentation_time) { ResourceRef surface{surface_id}; *first_presentation_time = surface->first_presentation_time; } return VDP_STATUS_OK; } VdpStatus BlockUntilSurfaceIdle(VdpPresentationQueue presentation_queue, VdpOutputSurface surface_id, VdpTime *first_presentation_time) { return check_for_exceptions(BlockUntilSurfaceIdleImpl, presentation_queue, surface_id, first_presentation_time); } VdpStatus QuerySurfaceStatusImpl(VdpPresentationQueue presentation_queue, VdpOutputSurface surface_id, VdpPresentationQueueStatus *status, VdpTime *first_presentation_time) { ResourceRef pq{presentation_queue}; ResourceRef surface{surface_id}; if (status) *status = surface->status; if (first_presentation_time) *first_presentation_time = surface->first_presentation_time; return VDP_STATUS_OK; } VdpStatus QuerySurfaceStatus(VdpPresentationQueue presentation_queue, VdpOutputSurface surface_id, VdpPresentationQueueStatus *status, VdpTime *first_presentation_time) { return check_for_exceptions(QuerySurfaceStatusImpl, presentation_queue, surface_id, status, first_presentation_time); } TargetResource::TargetResource(std::shared_ptr a_device, Drawable a_drawable) { device = a_device; drawable = a_drawable; // emulate geometry change. Hope there will be no drawables of such size drawable_width = (uint32_t)(-1); drawable_height = (uint32_t)(-1); pixmap = None; { GLXLockGuard guard; auto *dpy = device->dpy.get(); // No double buffering since we are going to render to glx pixmap GLint att[] = { GLX_RGBA, GLX_DEPTH_SIZE, 24, None }; xvi = glXChooseVisual(dpy, device->screen, att); if (!xvi) { traceError("PresentationQueue::TargetResource::TargetResource(): " "glXChooseVisual failed\n"); throw vdp::generic_error(); } recreate_pixmaps_if_geometry_changed(); // create context for dislaying result (can share display lists with device->glc) glc = glXCreateContext(dpy, xvi, device->glc.get(), GL_TRUE); } } TargetResource::~TargetResource() { try { // drawable may be destroyed already, so it's a global context that should be activated { GLXThreadLocalContext guard{device, false}; // keep that context set afterwards glXDestroyContext(device->dpy.get(), glc); // since previous was just destroyed free_glx_pixmaps(); const auto gl_error = glGetError(); if (gl_error != GL_NO_ERROR) { traceError("PresentationQueue::TargetResource::~TargetResource(): gl error %d\n", gl_error); } } XFree(xvi); } catch (...) { traceError("PresentationQueue::TargetResource::~TargetResource(): caught exception\n"); } } void TargetResource::free_glx_pixmaps() { auto *dpy = device->dpy.get(); // if pixmap is None, nothing was allocated if (pixmap == None) return; glXDestroyGLXPixmap(dpy, glx_pixmap); XFreeGC(dpy, plain_copy_gc); XFreePixmap(dpy, pixmap); pixmap = None; } // create new pixmap, glx pixmap, GC if size has changed. // This function relies on external serializing Xlib access void TargetResource::recreate_pixmaps_if_geometry_changed() { Window root_wnd; int xpos, ypos; unsigned int width, height, border_width, depth; auto *dpy = device->dpy.get(); XGetGeometry(dpy, drawable, &root_wnd, &xpos, &ypos, &width, &height, &border_width, &depth); if (width != drawable_width || height != drawable_height) { free_glx_pixmaps(); drawable_width = width; drawable_height = height; pixmap = XCreatePixmap(dpy, device->root, drawable_width, drawable_height, depth); XGCValues gc_values = {}; gc_values.function = GXcopy; gc_values.graphics_exposures = True; plain_copy_gc = XCreateGC(dpy, pixmap, GCFunction | GCGraphicsExposures, &gc_values); glx_pixmap = glXCreateGLXPixmap(dpy, xvi, pixmap); XSync(dpy, False); } } Resource::Resource(shared_ptr a_device, shared_ptr a_target) { device = a_device; target = a_target; bg_color.red = 0.0; bg_color.green = 0.0; bg_color.blue = 0.0; bg_color.alpha = 0.0; } Resource::~Resource() { try { Task task; task.when = vdptime2timespec(0); // as early as possible task.pq_id = id; task.wipe_tasks = true; { std::unique_lock lock{g_task_queue_mtx}; g_task_queue.push(task); g_task_queue_cv.notify_one(); } } catch (...) { traceError("PresentationQueue::Resource::~Resource(): caught exception\n"); } } VdpStatus CreateImpl(VdpDevice device_id, VdpPresentationQueueTarget presentation_queue_target, VdpPresentationQueue *presentation_queue) { if (!presentation_queue) return VDP_STATUS_INVALID_POINTER; ResourceRef device{device_id}; ResourceRef target{presentation_queue_target}; auto data = make_shared(device, target); *presentation_queue = ResourceStorage::instance().insert(data); return VDP_STATUS_OK; } VdpStatus Create(VdpDevice device_id, VdpPresentationQueueTarget presentation_queue_target, VdpPresentationQueue *presentation_queue) { return check_for_exceptions(CreateImpl, device_id, presentation_queue_target, presentation_queue); } VdpStatus DestroyImpl(VdpPresentationQueue presentation_queue) { ResourceRef pq{presentation_queue}; ResourceStorage::instance().drop(presentation_queue); return VDP_STATUS_OK; } VdpStatus Destroy(VdpPresentationQueue presentation_queue) { return check_for_exceptions(DestroyImpl, presentation_queue); } VdpStatus SetBackgroundColorImpl(VdpPresentationQueue presentation_queue, VdpColor *const background_color) { ResourceRef pq{presentation_queue}; if (background_color) { pq->bg_color = *background_color; } else { pq->bg_color.red = 0.0; pq->bg_color.green = 0.0; pq->bg_color.blue = 0.0; pq->bg_color.alpha = 0.0; } return VDP_STATUS_OK; } VdpStatus SetBackgroundColor(VdpPresentationQueue presentation_queue, VdpColor *const background_color) { return check_for_exceptions(SetBackgroundColorImpl, presentation_queue, background_color); } VdpStatus GetBackgroundColorImpl(VdpPresentationQueue presentation_queue, VdpColor *background_color) { ResourceRef pq{presentation_queue}; if (background_color) *background_color = pq->bg_color; return VDP_STATUS_OK; } VdpStatus GetBackgroundColor(VdpPresentationQueue presentation_queue, VdpColor *background_color) { return check_for_exceptions(GetBackgroundColorImpl, presentation_queue, background_color); } VdpStatus GetTime(VdpPresentationQueue, VdpTime *current_time) { struct timespec now; clock_gettime(CLOCK_REALTIME, &now); if (current_time) *current_time = timespec2vdptime(now); return VDP_STATUS_OK; } VdpStatus DisplayImpl(VdpPresentationQueue presentation_queue, VdpOutputSurface surface_id, uint32_t clip_width, uint32_t clip_height, VdpTime earliest_presentation_time) { ResourceRef pq{presentation_queue}; ResourceRef surface{surface_id}; if (pq->device->id != surface->device->id) return VDP_STATUS_HANDLE_DEVICE_MISMATCH; Task task; task.when = vdptime2timespec(earliest_presentation_time); task.clip_width = clip_width; task.clip_height = clip_height; task.surface_id = surface_id; task.pq_id = presentation_queue; surface->first_presentation_time = 0; surface->status = VDP_PRESENTATION_QUEUE_STATUS_QUEUED; { std::unique_lock lock{g_task_queue_mtx}; g_task_queue.push(task); g_task_queue_cv.notify_one(); } return VDP_STATUS_OK; } VdpStatus Display(VdpPresentationQueue presentation_queue, VdpOutputSurface surface_id, uint32_t clip_width, uint32_t clip_height, VdpTime earliest_presentation_time) { return check_for_exceptions(DisplayImpl, presentation_queue, surface_id, clip_width, clip_height, earliest_presentation_time); } VdpStatus TargetCreateX11Impl(VdpDevice device_id, Drawable drawable, VdpPresentationQueueTarget *target) { if (!target) return VDP_STATUS_INVALID_POINTER; ResourceRef device{device_id}; auto data = make_shared(device, drawable); *target = ResourceStorage::instance().insert(data); return VDP_STATUS_OK; } VdpStatus TargetCreateX11(VdpDevice device_id, Drawable drawable, VdpPresentationQueueTarget *target) { return check_for_exceptions(TargetCreateX11Impl, device_id, drawable, target); } VdpStatus TargetDestroyImpl(VdpPresentationQueueTarget presentation_queue_target) { ResourceRef target{presentation_queue_target}; ResourceStorage::instance().drop(presentation_queue_target); return VDP_STATUS_OK; } VdpStatus TargetDestroy(VdpPresentationQueueTarget presentation_queue_target) { return check_for_exceptions(TargetDestroyImpl, presentation_queue_target); } } } // namespace vdp::PresentationQueue libvdpau-va-gl-0.4.2/src/api-presentation-queue.hh000066400000000000000000000064761277566164500221030ustar00rootroot00000000000000/* * Copyright 2013-2016 Rinat Ibragimov * * This file is part of libvdpau-va-gl * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #pragma once #include "api.hh" #include #include #include #include namespace vdp { namespace PresentationQueue { struct TargetResource: public vdp::GenericResource { TargetResource(std::shared_ptr device, Drawable drawable); ~TargetResource(); void recreate_pixmaps_if_geometry_changed(); Drawable drawable; ///< X drawable to output to uint32_t drawable_width; ///< last seen drawable width uint32_t drawable_height;///< last seen drawable height Pixmap pixmap; ///< draw buffer GLXPixmap glx_pixmap; ///< GLX pixmap proxy GC plain_copy_gc; ///< X GC for displaying buffer content GLXContext glc; ///< GL context used for output XVisualInfo *xvi; private: void free_glx_pixmaps(); }; class PresentationQueueThreadRef { public: PresentationQueueThreadRef(); ~PresentationQueueThreadRef(); private: static void thread_body(); void do_start_thread(); static std::thread t_; static int thread_refs_; // amount of users that want thread alive }; struct Resource: public vdp::GenericResource { Resource(std::shared_ptr a_device, std::shared_ptr a_target); ~Resource(); std::shared_ptr target; VdpColor bg_color; ///< background color private: PresentationQueueThreadRef presentation_thread_reference; }; VdpPresentationQueueCreate Create; VdpPresentationQueueDestroy Destroy; VdpPresentationQueueSetBackgroundColor SetBackgroundColor; VdpPresentationQueueGetBackgroundColor GetBackgroundColor; VdpPresentationQueueGetTime GetTime; VdpPresentationQueueDisplay Display; VdpPresentationQueueBlockUntilSurfaceIdle BlockUntilSurfaceIdle; VdpPresentationQueueQuerySurfaceStatus QuerySurfaceStatus; VdpPresentationQueueTargetCreateX11 TargetCreateX11; VdpPresentationQueueTargetDestroy TargetDestroy; } } // namespace vdp::PresentationQueue libvdpau-va-gl-0.4.2/src/api-video-mixer.cc000066400000000000000000000500571277566164500204560ustar00rootroot00000000000000/* * Copyright 2013-2016 Rinat Ibragimov * * This file is part of libvdpau-va-gl * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #define GL_GLEXT_PROTOTYPES #include "api-device.hh" #include "api-output-surface.hh" #include "api-video-mixer.hh" #include "api-video-surface.hh" #include "glx-context.hh" #include "handle-storage.hh" #include "trace.hh" #include #include #include #include #include using std::make_shared; using std::shared_ptr; namespace vdp { namespace VideoMixer { void Resource::free_video_mixer_pixmaps() { Display *dpy = device->dpy.get(); if (glx_pixmap != None) { glXDestroyGLXPixmap(dpy, glx_pixmap); glx_pixmap = None; } if (pixmap != None) { XFreePixmap(dpy, pixmap); pixmap = None; } } void render_va_surf_to_texture(shared_ptr mixer, shared_ptr src_surf) { auto deviceData = mixer->device; Display *dpy = mixer->device->dpy.get(); if (src_surf->width != mixer->pixmap_width || src_surf->height != mixer->pixmap_height) { mixer->free_video_mixer_pixmaps(); mixer->pixmap = XCreatePixmap(dpy, mixer->device->root, src_surf->width, src_surf->height, mixer->device->color_depth); int fbconfig_attrs[] = { GLX_DRAWABLE_TYPE, GLX_PIXMAP_BIT, GLX_RENDER_TYPE, GLX_RGBA_BIT, GLX_X_RENDERABLE, GL_TRUE, GLX_Y_INVERTED_EXT, GL_TRUE, GLX_RED_SIZE, 8, GLX_GREEN_SIZE, 8, GLX_BLUE_SIZE, 8, GLX_ALPHA_SIZE, 8, GLX_DEPTH_SIZE, 16, GLX_BIND_TO_TEXTURE_RGBA_EXT, GL_TRUE, GL_NONE }; int nconfigs; GLXFBConfig *fbconfig = glXChooseFBConfig(dpy, mixer->device->screen, fbconfig_attrs, &nconfigs); int pixmap_attrs[] = { GLX_TEXTURE_TARGET_EXT, GLX_TEXTURE_2D_EXT, GLX_MIPMAP_TEXTURE_EXT, GL_FALSE, GLX_TEXTURE_FORMAT_EXT, GLX_TEXTURE_FORMAT_RGB_EXT, GL_NONE }; mixer->glx_pixmap = glXCreatePixmap(dpy, fbconfig[0], mixer->pixmap, pixmap_attrs); free(fbconfig); mixer->pixmap_width = src_surf->width; mixer->pixmap_height = src_surf->height; } glBindTexture(GL_TEXTURE_2D, mixer->tex_id); mixer->device->fn.glXBindTexImageEXT(dpy, mixer->glx_pixmap, GLX_FRONT_EXT, NULL); XSync(dpy, False); // TODO: avoid XSync vaPutSurface(mixer->device->va_dpy, src_surf->va_surf, mixer->pixmap, 0, 0, src_surf->width, src_surf->height, 0, 0, src_surf->width, src_surf->height, nullptr, 0, VA_FRAME_PICTURE); glBindFramebuffer(GL_FRAMEBUFFER, src_surf->fbo_id); glMatrixMode(GL_PROJECTION); glLoadIdentity(); glOrtho(0, src_surf->width, 0, src_surf->height, -1.0, 1.0); glViewport(0, 0, src_surf->width, src_surf->height); glMatrixMode(GL_MODELVIEW); glLoadIdentity(); glMatrixMode(GL_TEXTURE); glLoadIdentity(); glDisable(GL_BLEND); glBegin(GL_QUADS); glTexCoord2f(0, 0); glVertex2f(0, 0); glTexCoord2f(1, 0); glVertex2f(src_surf->width, 0); glTexCoord2f(1, 1); glVertex2f(src_surf->width, src_surf->height); glTexCoord2f(0, 1); glVertex2f(0, src_surf->height); glEnd(); glFinish(); mixer->device->fn.glXReleaseTexImageEXT(dpy, mixer->glx_pixmap, GLX_FRONT_EXT); glBindFramebuffer(GL_FRAMEBUFFER, 0); } Resource::Resource(shared_ptr a_device, uint32_t a_feature_count, VdpVideoMixerFeature const *a_features, uint32_t a_parameter_count, VdpVideoMixerParameter const *a_parameters, void const *const *a_parameter_values) { std::ignore = a_feature_count; std::ignore = a_features; // TODO: mixer features std::ignore = a_parameter_count; std::ignore = a_parameters; std::ignore = a_parameter_values; // TODO: mixer parameters device = a_device; pixmap = None; glx_pixmap = None; pixmap_width = (uint32_t)(-1); // set knowingly invalid geometry pixmap_height = (uint32_t)(-1); // to force pixmap recreation { GLXThreadLocalContext guard{device}; glGenTextures(1, &tex_id); glBindTexture(GL_TEXTURE_2D, tex_id); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); const auto gl_error = glGetError(); if (gl_error != GL_NO_ERROR) { traceError("VideoMixer::Resource::Resource(): gl error %d\n", gl_error); throw vdp::generic_error(); } } } Resource::~Resource() { try { { GLXLockGuard guard; free_video_mixer_pixmaps(); } { GLXThreadLocalContext guard{device}; glDeleteTextures(1, &tex_id); const auto gl_error = glGetError(); if (gl_error != GL_NO_ERROR) traceError("VideoMixer::Resource::~Resource(): gl error %d\n", gl_error); } } catch (...) { traceError("VideoMixer::Resource::~Resource(): caught exception\n"); } } VdpStatus CreateImpl(VdpDevice device_id, uint32_t feature_count, VdpVideoMixerFeature const *features, uint32_t parameter_count, VdpVideoMixerParameter const *parameters, void const *const *parameter_values, VdpVideoMixer *mixer) { if (!mixer) return VDP_STATUS_INVALID_POINTER; ResourceRef device{device_id}; auto data = make_shared(device, feature_count, features, parameter_count, parameters, parameter_values); *mixer = ResourceStorage::instance().insert(data); return VDP_STATUS_OK; } VdpStatus Create(VdpDevice device_id, uint32_t feature_count, VdpVideoMixerFeature const *features, uint32_t parameter_count, VdpVideoMixerParameter const *parameters, void const *const *parameter_values, VdpVideoMixer *mixer) { return check_for_exceptions(CreateImpl, device_id, feature_count, features, parameter_count, parameters, parameter_values, mixer); } VdpStatus DestroyImpl(VdpVideoMixer mixer_id) { ResourceRef mixer{mixer_id}; ResourceStorage::instance().drop(mixer_id); return VDP_STATUS_OK; } VdpStatus Destroy(VdpVideoMixer mixer_id) { return check_for_exceptions(DestroyImpl, mixer_id); } VdpStatus GetAttributeValuesImpl(VdpVideoMixer mixer, uint32_t attribute_count, VdpVideoMixerAttribute const *attributes, void *const *attribute_values) { std::ignore = mixer; std::ignore = attribute_count; std::ignore = attributes; std::ignore = attribute_values; return VDP_STATUS_NO_IMPLEMENTATION; } VdpStatus GetAttributeValues(VdpVideoMixer mixer, uint32_t attribute_count, VdpVideoMixerAttribute const *attributes, void *const *attribute_values) { return check_for_exceptions(GetAttributeValuesImpl, mixer, attribute_count, attributes, attribute_values); } VdpStatus GetFeatureEnablesImpl(VdpVideoMixer mixer, uint32_t feature_count, VdpVideoMixerFeature const *features, VdpBool *feature_enables) { std::ignore = mixer; std::ignore = feature_count; std::ignore = features; std::ignore = feature_enables; return VDP_STATUS_NO_IMPLEMENTATION; } VdpStatus GetFeatureEnables(VdpVideoMixer mixer, uint32_t feature_count, VdpVideoMixerFeature const *features, VdpBool *feature_enables) { return check_for_exceptions(GetFeatureEnablesImpl, mixer, feature_count, features, feature_enables); } VdpStatus GetFeatureSupportImpl(VdpVideoMixer mixer, uint32_t feature_count, VdpVideoMixerFeature const *features, VdpBool *feature_supports) { std::ignore = mixer; std::ignore = feature_count; std::ignore = features; std::ignore = feature_supports; return VDP_STATUS_NO_IMPLEMENTATION; } VdpStatus GetFeatureSupport(VdpVideoMixer mixer, uint32_t feature_count, VdpVideoMixerFeature const *features, VdpBool *feature_supports) { return check_for_exceptions(GetFeatureSupportImpl, mixer, feature_count, features, feature_supports); } VdpStatus GetParameterValuesImpl(VdpVideoMixer mixer, uint32_t parameter_count, VdpVideoMixerParameter const *parameters, void *const *parameter_values) { std::ignore = mixer; std::ignore = parameter_count; std::ignore = parameters; std::ignore = parameter_values; return VDP_STATUS_NO_IMPLEMENTATION; } VdpStatus GetParameterValues(VdpVideoMixer mixer, uint32_t parameter_count, VdpVideoMixerParameter const *parameters, void *const *parameter_values) { return check_for_exceptions(GetParameterValuesImpl, mixer, parameter_count, parameters, parameter_values); } VdpStatus QueryAttributeSupportImpl(VdpDevice device, VdpVideoMixerAttribute attribute, VdpBool *is_supported) { std::ignore = device; std::ignore = attribute; std::ignore = is_supported; return VDP_STATUS_NO_IMPLEMENTATION; } VdpStatus QueryAttributeSupport(VdpDevice device, VdpVideoMixerAttribute attribute, VdpBool *is_supported) { return check_for_exceptions(QueryAttributeSupportImpl, device, attribute, is_supported); } VdpStatus QueryAttributeValueRangeImpl(VdpDevice device, VdpVideoMixerAttribute attribute, void *min_value, void *max_value) { std::ignore = device; std::ignore = attribute; std::ignore = min_value; std::ignore = max_value; return VDP_STATUS_NO_IMPLEMENTATION; } VdpStatus QueryAttributeValueRange(VdpDevice device, VdpVideoMixerAttribute attribute, void *min_value, void *max_value) { return check_for_exceptions(QueryAttributeValueRangeImpl, device, attribute, min_value, max_value); } VdpStatus QueryFeatureSupportImpl(VdpDevice device, VdpVideoMixerFeature feature, VdpBool *is_supported) { std::ignore = device; std::ignore = feature; std::ignore = is_supported; return VDP_STATUS_NO_IMPLEMENTATION; } VdpStatus QueryFeatureSupport(VdpDevice device, VdpVideoMixerFeature feature, VdpBool *is_supported) { return check_for_exceptions(QueryFeatureSupportImpl, device, feature, is_supported); } VdpStatus QueryParameterSupportImpl(VdpDevice device, VdpVideoMixerParameter parameter, VdpBool *is_supported) { std::ignore = device; std::ignore = parameter; std::ignore = is_supported; return VDP_STATUS_NO_IMPLEMENTATION; } VdpStatus QueryParameterSupport(VdpDevice device, VdpVideoMixerParameter parameter, VdpBool *is_supported) { return check_for_exceptions(QueryParameterSupportImpl, device, parameter, is_supported); } VdpStatus QueryParameterValueRangeImpl(VdpDevice device, VdpVideoMixerParameter parameter, void *min_value, void *max_value) { uint32_t uint32_value; switch (parameter) { case VDP_VIDEO_MIXER_PARAMETER_VIDEO_SURFACE_WIDTH: // TODO: get actual limits uint32_value = 16; memcpy(min_value, &uint32_value, sizeof(uint32_value)); uint32_value = 4096; memcpy(max_value, &uint32_value, sizeof(uint32_value)); return VDP_STATUS_OK; case VDP_VIDEO_MIXER_PARAMETER_VIDEO_SURFACE_HEIGHT: // TODO: get actual limits uint32_value = 16; memcpy(min_value, &uint32_value, sizeof(uint32_value)); uint32_value = 4096; memcpy(max_value, &uint32_value, sizeof(uint32_value)); return VDP_STATUS_OK; case VDP_VIDEO_MIXER_PARAMETER_CHROMA_TYPE: // TODO case VDP_VIDEO_MIXER_PARAMETER_LAYERS: // TODO default: return VDP_STATUS_NO_IMPLEMENTATION; } } VdpStatus QueryParameterValueRange(VdpDevice device, VdpVideoMixerParameter parameter, void *min_value, void *max_value) { return check_for_exceptions(QueryParameterValueRangeImpl, device, parameter, min_value, max_value); } VdpStatus RenderImpl(VdpVideoMixer mixer_id, VdpOutputSurface background_surface, VdpRect const *background_source_rect, VdpVideoMixerPictureStructure current_picture_structure, uint32_t video_surface_past_count, VdpVideoSurface const *video_surface_past, VdpVideoSurface video_surface_current, uint32_t video_surface_future_count, VdpVideoSurface const *video_surface_future, VdpRect const *video_source_rect, VdpOutputSurface destination_surface, VdpRect const *destination_rect, VdpRect const *destination_video_rect, uint32_t layer_count, VdpLayer const *layers) { std::ignore = mixer_id; // TODO: mixer should be used to get mixing parameters // TODO: current implementation ignores previous and future surfaces, using only current. // Is that acceptable for interlaced video? Will VAAPI handle deinterlacing? std::ignore = background_surface; // TODO: background_surface. Is it safe to just ignore it? std::ignore = background_source_rect; std::ignore = current_picture_structure; std::ignore = video_surface_past_count; std::ignore = video_surface_past; std::ignore = video_surface_future_count; std::ignore = video_surface_future; std::ignore = layer_count; std::ignore = layers; ResourceRef mixer{mixer_id}; ResourceRef src_surf{video_surface_current}; ResourceRef dst_surf{destination_surface}; if (src_surf->device->id != dst_surf->device->id || src_surf->device->id != mixer->device->id) { return VDP_STATUS_HANDLE_DEVICE_MISMATCH; } VdpRect srcVideoRect = {0, 0, src_surf->width, src_surf->height}; if (video_source_rect) srcVideoRect = *video_source_rect; VdpRect dstRect = {0, 0, dst_surf->width, dst_surf->height}; if (destination_rect) dstRect = *destination_rect; // TODO: dstVideoRect once was equal srcVideoRect by default. But there are // possible subtleness in API documentation, which makes some people // interpret it in another way. More importantly, origial VDPAU driver // also does the other way. I hope this will be clarified one day. VdpRect dstVideoRect = {0, 0, dst_surf->width, dst_surf->height}; if (destination_video_rect) dstVideoRect = *destination_video_rect; // TODO: dstRect should clip dstVideoRect GLXThreadLocalContext guard{mixer->device}; if (src_surf->sync_va_to_glx) { render_va_surf_to_texture(mixer, src_surf); src_surf->sync_va_to_glx = false; } glBindFramebuffer(GL_FRAMEBUFFER, dst_surf->fbo_id); glMatrixMode(GL_PROJECTION); glLoadIdentity(); glOrtho(0, dst_surf->width, 0, dst_surf->height, -1.0f, 1.0f); glViewport(0, 0, dst_surf->width, dst_surf->height); glDisable(GL_BLEND); glMatrixMode(GL_MODELVIEW); glLoadIdentity(); glMatrixMode(GL_TEXTURE); glLoadIdentity(); glScalef(1.0f/src_surf->width, 1.0f/src_surf->height, 1.0f); // Clear dstRect area glDisable(GL_TEXTURE_2D); glColor4f(0, 0, 0, 1); glBegin(GL_QUADS); glVertex2f(dstRect.x0, dstRect.y0); glVertex2f(dstRect.x1, dstRect.y0); glVertex2f(dstRect.x1, dstRect.y1); glVertex2f(dstRect.x0, dstRect.y1); glEnd(); // Render (maybe scaled) data from video surface glEnable(GL_TEXTURE_2D); glBindTexture(GL_TEXTURE_2D, src_surf->tex_id); glColor4f(1, 1, 1, 1); glBegin(GL_QUADS); glTexCoord2i(srcVideoRect.x0, srcVideoRect.y0); glVertex2f(dstVideoRect.x0, dstVideoRect.y0); glTexCoord2i(srcVideoRect.x1, srcVideoRect.y0); glVertex2f(dstVideoRect.x1, dstVideoRect.y0); glTexCoord2i(srcVideoRect.x1, srcVideoRect.y1); glVertex2f(dstVideoRect.x1, dstVideoRect.y1); glTexCoord2i(srcVideoRect.x0, srcVideoRect.y1); glVertex2f(dstVideoRect.x0, dstVideoRect.y1); glEnd(); glFinish(); const auto gl_error = glGetError(); if (gl_error != GL_NO_ERROR) { traceError("VideoMixer::RenderImpl(): gl error %d\n", gl_error); return VDP_STATUS_ERROR; } return VDP_STATUS_OK; } VdpStatus Render(VdpVideoMixer mixer_id, VdpOutputSurface background_surface, VdpRect const *background_source_rect, VdpVideoMixerPictureStructure current_picture_structure, uint32_t video_surface_past_count, VdpVideoSurface const *video_surface_past, VdpVideoSurface video_surface_current, uint32_t video_surface_future_count, VdpVideoSurface const *video_surface_future, VdpRect const *video_source_rect, VdpOutputSurface destination_surface, VdpRect const *destination_rect, VdpRect const *destination_video_rect, uint32_t layer_count, VdpLayer const *layers) { return check_for_exceptions(RenderImpl, mixer_id, background_surface, background_source_rect, current_picture_structure, video_surface_past_count, video_surface_past, video_surface_current, video_surface_future_count, video_surface_future, video_source_rect, destination_surface, destination_rect, destination_video_rect, layer_count, layers); } VdpStatus SetAttributeValuesImpl(VdpVideoMixer mixer, uint32_t attribute_count, VdpVideoMixerAttribute const *attributes, void const *const *attribute_values) { std::ignore = mixer; std::ignore = attribute_count; std::ignore = attributes; std::ignore = attribute_values; return VDP_STATUS_OK; } VdpStatus SetAttributeValues(VdpVideoMixer mixer, uint32_t attribute_count, VdpVideoMixerAttribute const *attributes, void const *const *attribute_values) { return check_for_exceptions(SetAttributeValuesImpl, mixer, attribute_count, attributes, attribute_values); } VdpStatus SetFeatureEnablesImpl(VdpVideoMixer mixer, uint32_t feature_count, VdpVideoMixerFeature const *features, VdpBool const *feature_enables) { std::ignore = mixer; std::ignore = feature_count; std::ignore = features; std::ignore = feature_enables; return VDP_STATUS_OK; } VdpStatus SetFeatureEnables(VdpVideoMixer mixer, uint32_t feature_count, VdpVideoMixerFeature const *features, VdpBool const *feature_enables) { return check_for_exceptions(SetFeatureEnablesImpl, mixer, feature_count, features, feature_enables); } } } // namespace vdp::VideoMixer libvdpau-va-gl-0.4.2/src/api-video-mixer.hh000066400000000000000000000054511277566164500204660ustar00rootroot00000000000000/* * Copyright 2013-2016 Rinat Ibragimov * * This file is part of libvdpau-va-gl * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #pragma once #include "api.hh" #include namespace vdp { namespace VideoMixer { struct Resource: public vdp::GenericResource { Resource(std::shared_ptr a_device, uint32_t a_feature_count, VdpVideoMixerFeature const *a_features, uint32_t a_parameter_count, VdpVideoMixerParameter const *a_parameters, void const *const *a_parameter_values); ~Resource(); void free_video_mixer_pixmaps(); uint32_t pixmap_width; ///< last seen width uint32_t pixmap_height; ///< last seen height Pixmap pixmap; ///< target pixmap for vaPutSurface GLXPixmap glx_pixmap; ///< associated glx pixmap for texture-from-pixmap GLuint tex_id; ///< texture for texture-from-pixmap }; VdpVideoMixerQueryFeatureSupport QueryFeatureSupport; VdpVideoMixerQueryParameterSupport QueryParameterSupport; VdpVideoMixerQueryAttributeSupport QueryAttributeSupport; VdpVideoMixerQueryParameterValueRange QueryParameterValueRange; VdpVideoMixerQueryAttributeValueRange QueryAttributeValueRange; VdpVideoMixerCreate Create; VdpVideoMixerSetFeatureEnables SetFeatureEnables; VdpVideoMixerSetAttributeValues SetAttributeValues; VdpVideoMixerGetFeatureSupport GetFeatureSupport; VdpVideoMixerGetFeatureEnables GetFeatureEnables; VdpVideoMixerGetParameterValues GetParameterValues; VdpVideoMixerGetAttributeValues GetAttributeValues; VdpVideoMixerDestroy Destroy; VdpVideoMixerRender Render; } } // namespace vdp::VideoMixer libvdpau-va-gl-0.4.2/src/api-video-surface.cc000066400000000000000000000442111277566164500207550ustar00rootroot00000000000000/* * Copyright 2013-2016 Rinat Ibragimov * * This file is part of libvdpau-va-gl * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #define GL_GLEXT_PROTOTYPES #include "api-video-surface.hh" #include "api.hh" #include "compat.hh" #include "glx-context.hh" #include "handle-storage.hh" #include "reverse-constant.hh" #include "shaders.h" #include "trace.hh" #include #include #include #include #include using std::make_shared; using std::shared_ptr; namespace vdp { namespace VideoSurface { Resource::Resource(std::shared_ptr a_device, VdpChromaType a_chroma_type, uint32_t a_width, uint32_t a_height) : chroma_type{a_chroma_type} , width{a_width} , height{a_height} , rt_idx{0} { device = a_device; if (chroma_type != VDP_CHROMA_TYPE_420 && chroma_type != VDP_CHROMA_TYPE_422 && chroma_type != VDP_CHROMA_TYPE_444) { throw vdp::invalid_chroma_type(); } switch (chroma_type) { case VDP_CHROMA_TYPE_420: chroma_width = ((width + 1) & (~1u)) / 2; chroma_height = ((height + 1) & (~1u)) / 2; stride = (width + 0xfu) & (~0xfu); break; case VDP_CHROMA_TYPE_422: chroma_width = ((width + 1) & (~1u)) / 2; chroma_height = height; stride = (width + 2 * chroma_width + 0xfu) & (~0xfu); break; case VDP_CHROMA_TYPE_444: chroma_width = width; chroma_height = height; stride = (4 * width + 0xfu) & (~0xfu); break; } chroma_stride = (chroma_width + 0xfu) & (~0xfu); va_surf = VA_INVALID_SURFACE; tex_id = 0; sync_va_to_glx = false; GLXThreadLocalContext guard{device}; glGenTextures(1, &tex_id); glBindTexture(GL_TEXTURE_2D, tex_id); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_BGRA, GL_UNSIGNED_BYTE, nullptr); glGenFramebuffers(1, &fbo_id); glBindFramebuffer(GL_FRAMEBUFFER, fbo_id); glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, tex_id, 0); const auto gl_status = glCheckFramebufferStatus(GL_FRAMEBUFFER); if (gl_status != GL_FRAMEBUFFER_COMPLETE) { traceError("VideoSurface::Resource::Resource(): framebuffer not ready, %d\n", gl_status); throw vdp::generic_error(); } glFinish(); const auto gl_error = glGetError(); if (gl_error != GL_NO_ERROR) { traceError("VideoSurface::Resource::Resource(): gl error %d\n", gl_error); throw vdp::generic_error(); } // no VA surface creation here. Actual pool of VA surfaces should be allocated already // by VdpDecoderCreate. VdpDecoderCreate will update ->va_surf field as needed. } Resource::~Resource() { try { { GLXThreadLocalContext guard{device}; glDeleteTextures(1, &tex_id); glDeleteFramebuffers(1, &fbo_id); const auto gl_error = glGetError(); if (gl_error != GL_NO_ERROR) traceError("VideoSurface::Resource::~Resource(): gl error %d\n", gl_error); } if (device->va_available) { // return VA surface to the free list, decoder owns them if (decoder) decoder->free_list.push_back(rt_idx); } } catch (...) { traceError("VideoSurface::Resource::~Resource(): caught exception\n"); } } VdpStatus CreateImpl(VdpDevice device_id, VdpChromaType chroma_type, uint32_t width, uint32_t height, VdpVideoSurface *surface) { if (!surface) return VDP_STATUS_INVALID_POINTER; ResourceRef device{device_id}; auto data = make_shared(device, chroma_type, width, height); *surface = ResourceStorage::instance().insert(data); return VDP_STATUS_OK; } VdpStatus Create(VdpDevice device_id, VdpChromaType chroma_type, uint32_t width, uint32_t height, VdpVideoSurface *surface) { return check_for_exceptions(CreateImpl, device_id, chroma_type, width, height, surface); } VdpStatus DestroyImpl(VdpVideoSurface surface_id) { ResourceRef surf{surface_id}; ResourceStorage::instance().drop(surface_id); return VDP_STATUS_OK; } VdpStatus Destroy(VdpVideoSurface surface_id) { return check_for_exceptions(DestroyImpl, surface_id); } VdpStatus GetBitsYCbCrImpl(VdpVideoSurface surface_id, VdpYCbCrFormat destination_ycbcr_format, void *const *destination_data, uint32_t const *destination_pitches) { if (!destination_data || !destination_pitches) return VDP_STATUS_INVALID_POINTER; ResourceRef surf{surface_id}; VADisplay va_dpy = surf->device->va_dpy; if (surf->device->va_available) { VAImage q; vaDeriveImage(va_dpy, surf->va_surf, &q); if (q.format.fourcc == VA_FOURCC('N', 'V', '1', '2') && destination_ycbcr_format == VDP_YCBCR_FORMAT_NV12) { uint8_t *img_data; vaMapBuffer(va_dpy, q.buf, (void **)&img_data); if (destination_pitches[0] == q.pitches[0] && destination_pitches[1] == q.pitches[1]) { const uint32_t sz = (uint32_t)q.width * (uint32_t)q.height; memcpy(destination_data[0], img_data + q.offsets[0], sz); memcpy(destination_data[1], img_data + q.offsets[1], sz / 2); } else { uint8_t *src = img_data + q.offsets[0]; uint8_t *dst = static_cast(destination_data[0]); for (unsigned int y = 0; y < q.height; y ++) { // Y plane memcpy (dst, src, q.width); src += q.pitches[0]; dst += destination_pitches[0]; } src = img_data + q.offsets[1]; dst = static_cast(destination_data[1]); for (unsigned int y = 0; y < q.height / 2; y ++) { // UV plane memcpy(dst, src, q.width); // q.width/2 samples of U and V each, hence q.width src += q.pitches[1]; dst += destination_pitches[1]; } } vaUnmapBuffer(va_dpy, q.buf); } else if (q.format.fourcc == VA_FOURCC('N', 'V', '1', '2') && destination_ycbcr_format == VDP_YCBCR_FORMAT_YV12) { uint8_t *img_data; vaMapBuffer(va_dpy, q.buf, (void **)&img_data); // Y plane if (destination_pitches[0] == q.pitches[0]) { const uint32_t sz = (uint32_t)q.width * (uint32_t)q.height; memcpy(destination_data[0], img_data + q.offsets[0], sz); } else { uint8_t *src = img_data + q.offsets[0]; uint8_t *dst = static_cast(destination_data[0]); for (unsigned int y = 0; y < q.height; y ++) { memcpy(dst, src, q.width); src += q.pitches[0]; dst += destination_pitches[0]; } } // unpack mixed UV to separate planes for (unsigned int y = 0; y < q.height/2; y ++) { uint8_t *src = img_data + q.offsets[1] + y * q.pitches[1]; uint8_t *dst_u = static_cast(destination_data[1]) + y * destination_pitches[1]; uint8_t *dst_v = static_cast(destination_data[2]) + y * destination_pitches[2]; for (unsigned int x = 0; x < q.width/2; x++) { *dst_v++ = *src++; *dst_u++ = *src++; } } vaUnmapBuffer(va_dpy, q.buf); } else { const char *c = (const char *)&q.format.fourcc; traceError("VideoSurface::GetBitsYCbCrImpl(): not implemented conversion VA FOURCC " "%c%c%c%c -> %s\n", *c, *(c+1), *(c+2), *(c+3), reverse_ycbcr_format(destination_ycbcr_format)); vaDestroyImage(va_dpy, q.image_id); return VDP_STATUS_INVALID_Y_CB_CR_FORMAT; } vaDestroyImage(va_dpy, q.image_id); } else { // software fallback traceError("VideoSurface::GetBitsYCbCrImpl(): not implemented software fallback\n"); return VDP_STATUS_ERROR; } return VDP_STATUS_OK; } VdpStatus GetBitsYCbCr(VdpVideoSurface surface_id, VdpYCbCrFormat destination_ycbcr_format, void *const *destination_data, uint32_t const *destination_pitches) { return check_for_exceptions(GetBitsYCbCrImpl, surface_id, destination_ycbcr_format, destination_data, destination_pitches); } VdpStatus GetParametersImpl(VdpVideoSurface surface_id, VdpChromaType *chroma_type, uint32_t *width, uint32_t *height) { ResourceRef surf{surface_id}; if (chroma_type) *chroma_type = surf->chroma_type; if (width) *width = surf->width; if (height) *height = surf->height; return VDP_STATUS_OK; } VdpStatus GetParameters(VdpVideoSurface surface_id, VdpChromaType *chroma_type, uint32_t *width, uint32_t *height) { return check_for_exceptions(GetParametersImpl, surface_id, chroma_type, width, height); } VdpStatus PutBitsYCbCr_swscale(VdpVideoSurface surface_id, VdpYCbCrFormat source_ycbcr_format, void const *const *source_data, uint32_t const *source_pitches) { // TODO: implement this return VDP_STATUS_OK; } VdpStatus PutBitsYCbCr_glsl(VdpVideoSurface surface_id, VdpYCbCrFormat source_ycbcr_format, void const *const *source_data, uint32_t const *source_pitches) { if (!source_data || !source_pitches) return VDP_STATUS_INVALID_POINTER; // TODO: implement VDP_YCBCR_FORMAT_UYVY // TODO: implement VDP_YCBCR_FORMAT_YUYV // TODO: implement VDP_YCBCR_FORMAT_Y8U8V8A8 // TODO: implement VDP_YCBCR_FORMAT_V8U8Y8A8 ResourceRef surf{surface_id}; switch (source_ycbcr_format) { case VDP_YCBCR_FORMAT_NV12: case VDP_YCBCR_FORMAT_YV12: /* do nothing */ break; case VDP_YCBCR_FORMAT_UYVY: case VDP_YCBCR_FORMAT_YUYV: case VDP_YCBCR_FORMAT_Y8U8V8A8: case VDP_YCBCR_FORMAT_V8U8Y8A8: default: traceError("VideoSurface::PutBitsYCbCr_glsl(): not implemented source YCbCr format '%s'\n", reverse_ycbcr_format(source_ycbcr_format)); return VDP_STATUS_INVALID_Y_CB_CR_FORMAT; } GLXThreadLocalContext guard{surf->device}; glBindFramebuffer(GL_FRAMEBUFFER, surf->fbo_id); GLuint tex_id[2]; glGenTextures(2, tex_id); glEnable(GL_TEXTURE_2D); switch (source_ycbcr_format) { case VDP_YCBCR_FORMAT_NV12: glActiveTexture(GL_TEXTURE1); glBindTexture(GL_TEXTURE_2D, tex_id[1]); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST); // UV plane glPixelStorei(GL_UNPACK_ROW_LENGTH, source_pitches[1]); glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, surf->width/2, surf->height/2, 0, GL_RG, GL_UNSIGNED_BYTE, source_data[1]); glActiveTexture(GL_TEXTURE0); glBindTexture(GL_TEXTURE_2D, tex_id[0]); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST); // Y plane glPixelStorei(GL_UNPACK_ROW_LENGTH, source_pitches[0]); glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, surf->width, surf->height, 0, GL_RED, GL_UNSIGNED_BYTE, source_data[0]); break; case VDP_YCBCR_FORMAT_YV12: glActiveTexture(GL_TEXTURE1); glBindTexture(GL_TEXTURE_2D, tex_id[1]); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST); glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, surf->width/2, surf->height, 0, GL_RED, GL_UNSIGNED_BYTE, NULL); // U plane glPixelStorei(GL_UNPACK_ROW_LENGTH, source_pitches[2]); glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, surf->width/2, surf->height/2, GL_RED, GL_UNSIGNED_BYTE, source_data[2]); // V plane glPixelStorei(GL_UNPACK_ROW_LENGTH, source_pitches[1]); glTexSubImage2D(GL_TEXTURE_2D, 0, 0, surf->height/2, surf->width/2, surf->height/2, GL_RED, GL_UNSIGNED_BYTE, source_data[1]); glActiveTexture(GL_TEXTURE0); glBindTexture(GL_TEXTURE_2D, tex_id[0]); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST); // Y plane glPixelStorei(GL_UNPACK_ROW_LENGTH, source_pitches[0]); glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, surf->width, surf->height, 0, GL_RED, GL_UNSIGNED_BYTE, source_data[0]); break; } glPixelStorei(GL_UNPACK_ROW_LENGTH, 0); glMatrixMode(GL_PROJECTION); glLoadIdentity(); glOrtho(0, surf->width, 0, surf->height, -1.0f, 1.0f); glViewport(0, 0, surf->width, surf->height); glMatrixMode(GL_MODELVIEW); glLoadIdentity(); glMatrixMode(GL_TEXTURE); glLoadIdentity(); glDisable(GL_BLEND); switch (source_ycbcr_format) { case VDP_YCBCR_FORMAT_NV12: glUseProgram(surf->device->shaders[glsl_NV12_RGBA].program); glUniform1i(surf->device->shaders[glsl_NV12_RGBA].uniform.tex_0, 0); glUniform1i(surf->device->shaders[glsl_NV12_RGBA].uniform.tex_1, 1); break; case VDP_YCBCR_FORMAT_YV12: glUseProgram(surf->device->shaders[glsl_YV12_RGBA].program); glUniform1i(surf->device->shaders[glsl_YV12_RGBA].uniform.tex_0, 0); glUniform1i(surf->device->shaders[glsl_YV12_RGBA].uniform.tex_1, 1); break; } glBegin(GL_QUADS); glTexCoord2f(0, 0); glVertex2f(0, 0); glTexCoord2f(1, 0); glVertex2f(surf->width, 0); glTexCoord2f(1, 1); glVertex2f(surf->width, surf->height); glTexCoord2f(0, 1); glVertex2f(0, surf->height); glEnd(); glUseProgram(0); glFinish(); glBindFramebuffer(GL_FRAMEBUFFER, 0); glDeleteTextures(2, tex_id); const auto gl_error = glGetError(); if (gl_error != GL_NO_ERROR) { traceError("VideoSurface::PutBitsYCbCr_glsl(): gl error %d\n", gl_error); return VDP_STATUS_ERROR; } return VDP_STATUS_OK; } VdpStatus PutBitsYCbCrImpl(VdpVideoSurface surface, VdpYCbCrFormat source_ycbcr_format, void const *const *source_data, uint32_t const *source_pitches) { int using_glsl = 1; VdpStatus ret; if (using_glsl) { ret = PutBitsYCbCr_glsl(surface, source_ycbcr_format, source_data, source_pitches); } else { ret = PutBitsYCbCr_swscale(surface, source_ycbcr_format, source_data, source_pitches); } return ret; } VdpStatus PutBitsYCbCr(VdpVideoSurface surface, VdpYCbCrFormat source_ycbcr_format, void const *const *source_data, uint32_t const *source_pitches) { return check_for_exceptions(PutBitsYCbCrImpl, surface, source_ycbcr_format, source_data, source_pitches); } VdpStatus QueryCapabilitiesImpl(VdpDevice device, VdpChromaType surface_chroma_type, VdpBool *is_supported, uint32_t *max_width, uint32_t *max_height) { // TODO: don't ignore std::ignore = device; std::ignore = surface_chroma_type; // TODO: implement if (is_supported) *is_supported = 1; if (max_width) *max_width = 4096; if (max_height) *max_height = 4096; return VDP_STATUS_OK; } VdpStatus QueryCapabilities(VdpDevice device, VdpChromaType surface_chroma_type, VdpBool *is_supported, uint32_t *max_width, uint32_t *max_height) { return check_for_exceptions(QueryCapabilitiesImpl, device, surface_chroma_type, is_supported, max_width, max_height); } VdpStatus QueryGetPutBitsYCbCrCapabilitiesImpl(VdpDevice device, VdpChromaType surface_chroma_type, VdpYCbCrFormat bits_ycbcr_format, VdpBool *is_supported) { // TODO: don't ignore std::ignore = device; std::ignore = surface_chroma_type; std::ignore = bits_ycbcr_format; // TODO: implement if (is_supported) *is_supported = 1; return VDP_STATUS_OK; } VdpStatus QueryGetPutBitsYCbCrCapabilities(VdpDevice device, VdpChromaType surface_chroma_type, VdpYCbCrFormat bits_ycbcr_format, VdpBool *is_supported) { return check_for_exceptions(QueryGetPutBitsYCbCrCapabilitiesImpl, device, surface_chroma_type, bits_ycbcr_format, is_supported); } } } // namespace vdp::VideoSurface libvdpau-va-gl-0.4.2/src/api-video-surface.hh000066400000000000000000000053641277566164500207750ustar00rootroot00000000000000/* * Copyright 2013-2016 Rinat Ibragimov * * This file is part of libvdpau-va-gl * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #pragma once #include "api-decoder.hh" #include "api.hh" #include #include namespace vdp { namespace VideoSurface { struct Resource: public vdp::GenericResource { Resource(std::shared_ptr a_device, VdpChromaType a_chroma_type, uint32_t a_width, uint32_t a_height); ~Resource(); VdpChromaType chroma_type; ///< video chroma type uint32_t width; uint32_t height; uint32_t stride; uint32_t chroma_width; uint32_t chroma_height; uint32_t chroma_stride; VASurfaceID va_surf; ///< VA-API surface bool sync_va_to_glx; ///< whenever VA-API surface should be converted to GL texture GLuint tex_id; ///< GL texture id (RGBA) GLuint fbo_id; ///< framebuffer object id int32_t rt_idx; ///< index in VdpDecoder's render_targets std::vector y_plane; std::vector u_plane; std::vector v_plane; std::shared_ptr decoder; ///< associated VdpDecoder }; VdpVideoSurfaceQueryCapabilities QueryCapabilities; VdpVideoSurfaceQueryGetPutBitsYCbCrCapabilities QueryGetPutBitsYCbCrCapabilities; VdpVideoSurfaceCreate Create; VdpVideoSurfaceDestroy Destroy; VdpVideoSurfaceGetParameters GetParameters; VdpVideoSurfaceGetBitsYCbCr GetBitsYCbCr; VdpVideoSurfacePutBitsYCbCr PutBitsYCbCr; } } // namespace vdp::VideoSurface libvdpau-va-gl-0.4.2/src/api.hh000066400000000000000000000027701277566164500162410ustar00rootroot00000000000000/* * Copyright 2013-2016 Rinat Ibragimov * * This file is part of libvdpau-va-gl * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #pragma once #include #include namespace vdp { const int kMaxRenderTargets = 21; const int kNumRenderTargets = 21; namespace Device { struct Resource; } // namespace Device struct GenericResource { uint32_t id; std::shared_ptr device; std::recursive_mutex mtx; }; } // namespace vdp libvdpau-va-gl-0.4.2/src/bitstream.hh000066400000000000000000000130731277566164500174600ustar00rootroot00000000000000/* * Copyright 2013-2016 Rinat Ibragimov * * This file is part of libvdpau-va-gl * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #pragma once #include #include #include #include namespace vdp { /// Raw byte sequence payload state /// /// throws ByteReader::error() class RBSPState { public: class error: public std::logic_error { public: explicit error(const char *descr) : std::logic_error(descr) {} }; private: /// encapsulates handling of emulation prevention bytes (EPB) class ByteReader { public: explicit ByteReader(const std::vector &buffer) : data_{buffer} , byte_ofs_{0} , zeros_in_row_{0} {} ByteReader(const ByteReader &other) : data_{other.data_} , byte_ofs_{other.byte_ofs_} , zeros_in_row_{other.zeros_in_row_} {} uint8_t get_byte() { if (byte_ofs_ >= data_.size()) throw error("ByteReader: trying to read beyond bounds"); const uint8_t current_byte = data_[byte_ofs_ ++]; if (zeros_in_row_ >= 2 && current_byte == 3) { if (byte_ofs_ >= data_.size()) throw error("ByteReader: trying to read beyond bounds"); const uint8_t another_byte = data_[byte_ofs_ ++]; zeros_in_row_ = (another_byte == 0) ? 1 : 0; return another_byte; } if (current_byte == 0) { zeros_in_row_ += 1; } else { zeros_in_row_ = 0; } return current_byte; } size_t get_ofs() const { return byte_ofs_; } /// rewind to the next NAL unit begin marker (0x00 0x00 0x01) int64_t navigate_to_nal_unit() { const size_t prev_ofs = byte_ofs_; uint32_t window = ~0u; do { if (byte_ofs_ >= data_.size()) throw error("ByteReader: no more bytes"); const uint32_t c = data_[byte_ofs_++]; window = (window << 8) | c; } while ((window & 0xffffff) != 0x000001); return byte_ofs_ - prev_ofs; } private: ByteReader & operator=(const ByteReader &) = delete; const std::vector &data_; size_t byte_ofs_; size_t zeros_in_row_; }; public: explicit RBSPState(const std::vector &buffer) : byte_reader_{buffer} , bits_eaten_{0} , current_byte_{0} , bit_ofs_{7} {} ~RBSPState() = default; RBSPState(const RBSPState &other) : byte_reader_{other.byte_reader_} , bits_eaten_{other.bits_eaten_} , current_byte_{other.current_byte_} , bit_ofs_{other.bit_ofs_} {} int64_t navigate_to_nal_unit() { // reset bit position to ensure next read will fetch a fresh byte from byte_reader_ bit_ofs_ = 7; return byte_reader_.navigate_to_nal_unit(); } void reset_bit_counter() { bits_eaten_ = 0; } size_t bits_eaten() const { return bits_eaten_; } uint32_t get_u(size_t bitcount) { uint32_t res = 0; for (size_t k = 0; k < bitcount; k ++) res = (res << 1) + get_bit(); return res; } uint32_t get_uev() { size_t zeros = 0; while (get_bit() == 0) zeros ++; if (zeros == 0) return 0; return (1 << zeros) - 1 + get_u(zeros); } int32_t get_sev() { size_t zeros = 0; while (get_bit() == 0) zeros ++; if (zeros == 0) return 0; const int32_t val = (1 << zeros) + get_u(zeros); if (val & 1) return -(val / 2); return val / 2; } private: RBSPState & operator=(const RBSPState &) = delete; uint32_t get_bit() { if (bit_ofs_ == 7) current_byte_ = byte_reader_.get_byte(); const uint32_t val = (current_byte_ >> bit_ofs_) & 1; if (bit_ofs_ > 0) { bit_ofs_ -= 1; } else { bit_ofs_ = 7; // most significant bit in a 8-bit byte } bits_eaten_ += 1; return val; } ByteReader byte_reader_; size_t bits_eaten_; uint8_t current_byte_; uint8_t bit_ofs_; }; } // namespace vdp libvdpau-va-gl-0.4.2/src/compat.hh000066400000000000000000000034371277566164500167540ustar00rootroot00000000000000/* * Copyright 2013-2016 Rinat Ibragimov * * This file is part of libvdpau-va-gl * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #pragma once #include #include #include #ifdef __FreeBSD__ #include #endif #if defined(__linux__) typedef int thread_id_t; static inline thread_id_t get_current_thread_id() { return syscall(__NR_gettid); } static inline size_t thread_is_alive(thread_id_t tid) { return kill(tid, 0) == 0; } #elif defined(__FreeBSD__) typedef long thread_id_t; static inline thread_id_t get_current_thread_id() { long thread_id; thr_self(&thread_id); return thread_id; } static inline size_t thread_is_alive(thread_id_t tid) { return thr_kill(tid, 0) == 0; } #else #error Unknown OS #endif libvdpau-va-gl-0.4.2/src/entry.cc000066400000000000000000000056131277566164500166160ustar00rootroot00000000000000/* * Copyright 2013-2016 Rinat Ibragimov * * This file is part of libvdpau-va-gl * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #define _XOPEN_SOURCE 500 #include "api.hh" #include "compat.hh" #include "globals.hh" #include "handle-storage.hh" #include "trace.hh" #include #include #include #include #include #include static void initialize_quirks() { global.quirks.buggy_XCloseDisplay = 0; global.quirks.show_watermark = 0; global.quirks.avoid_va = 0; const char *value = getenv("VDPAU_QUIRKS"); if (!value) return; char *value_lc = strdup(value); if (NULL == value_lc) return; for (int k = 0; value_lc[k] != 0; k ++) value_lc[k] = tolower(value_lc[k]); // tokenize string const char delimiter = ','; char *item_start = value_lc; char *ptr = item_start; while (1) { int last = (0 == *ptr); if (delimiter == *ptr || 0 == *ptr) { *ptr = 0; if (!strcmp("xclosedisplay", item_start)) { global.quirks.buggy_XCloseDisplay = 1; } else if (!strcmp("showwatermark", item_start)) { global.quirks.show_watermark = 1; } else if (!strcmp("avoidva", item_start)) { global.quirks.avoid_va = 1; } item_start = ptr + 1; } ptr ++; if (last) break; } free(value_lc); } __attribute__((constructor)) void va_gl_library_constructor() { // Initialize global data initialize_quirks(); } extern "C" __attribute__ ((visibility("default"))) VdpStatus vdp_imp_device_create_x11(Display *display, int screen, VdpDevice *device, VdpGetProcAddress **get_proc_address) { return vdp::Device::CreateX11(display, screen, device, get_proc_address); } libvdpau-va-gl-0.4.2/src/exceptions.hh000066400000000000000000000033011277566164500176400ustar00rootroot00000000000000/* * Copyright 2013-2016 Rinat Ibragimov * * This file is part of libvdpau-va-gl * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include namespace vdp { class resource_not_found: public std::exception {}; // VDP_STATUS_INVALID_HANDLE class generic_error: public std::exception {}; // VDP_STATUS_ERROR class invalid_size: public std::exception {}; // VDP_STATUS_INVALID_SIZE class invalid_rgba_format: public std::exception {}; // VDP_STATUS_INVALID_RGBA_FORMAT class invalid_decoder_profile: public std::exception {};// VDP_INVALID_DECODER_PROFILE class invalid_chroma_type: public std::exception {}; // VDP_INVALID_CHROMA_TYPE } // namespace vdp libvdpau-va-gl-0.4.2/src/globals.cc000066400000000000000000000023001277566164500170660ustar00rootroot00000000000000/* * Copyright 2013-2016 Rinat Ibragimov * * This file is part of libvdpau-va-gl * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include "globals.hh" struct global_data global; libvdpau-va-gl-0.4.2/src/globals.hh000066400000000000000000000031411277566164500171040ustar00rootroot00000000000000/* * Copyright 2013-2016 Rinat Ibragimov * * This file is part of libvdpau-va-gl * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #pragma once /** @brief place where all shared global variables live */ struct global_data { /** @brief tunables */ struct { int buggy_XCloseDisplay; ///< avoid calling XCloseDisplay int show_watermark; ///< show picture over output int avoid_va; ///< do not use VA-API video decoding acceleration even if ///< available } quirks; }; extern struct global_data global; libvdpau-va-gl-0.4.2/src/glx-context.cc000066400000000000000000000132511277566164500177260ustar00rootroot00000000000000/* * Copyright 2013-2016 Rinat Ibragimov * * This file is part of libvdpau-va-gl * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ /* * GLX context related helpers */ #include "api-device.hh" #include "compat.hh" #include "globals.hh" #include "glx-context.hh" #include "trace.hh" #include #include #include #include #include #include using std::vector; namespace { std::map g_glc_map; std::recursive_mutex g_glc_mutex; GLXContext g_root_glc; int g_root_glc_refcnt; XVisualInfo *g_root_vi; int x11_error_code = 0; } // anonymouse namespace namespace vdp { GLXManagedContext & GLXManagedContext::operator=(GLXManagedContext &&that) { if (this == &that) return *this; glc_ = that.glc_; that.glc_ = nullptr; return *this; } GLXManagedContext::GLXManagedContext(GLXManagedContext &&other) : dpy_{} , glc_{other.glc_} { other.glc_ = nullptr; } void GLXManagedContext::destroy() { if (glc_ == nullptr) return; if (glc_ == glXGetCurrentContext()) glXMakeCurrent(dpy_.get(), None, nullptr); glXDestroyContext(dpy_.get(), glc_); glc_ = nullptr; } GLXThreadLocalContext::GLXThreadLocalContext(std::shared_ptr device, bool restore_previous_context) : GLXThreadLocalContext(device->root, restore_previous_context) { } GLXThreadLocalContext::GLXThreadLocalContext(Window wnd, bool restore_previous_context) : restore_previous_context_(restore_previous_context) { g_glc_mutex.lock(); XDisplayRef dpy_ref{}; Display *const dpy = dpy_ref.get(); const thread_id_t thread_id = get_current_thread_id(); prev_dpy_ = glXGetCurrentDisplay(); if (!prev_dpy_) prev_dpy_ = dpy; prev_wnd_ = glXGetCurrentDrawable(); prev_glc_ = glXGetCurrentContext(); GLXContext glc; auto val = g_glc_map.find(thread_id); if (val == g_glc_map.end()) { glc = glXCreateContext(dpy, g_root_vi, g_root_glc, GL_TRUE); assert(glc); g_glc_map.emplace(thread_id, GLXManagedContext(glc)); // find which threads are not alive already vector dead_threads; for (const auto &it: g_glc_map) if (not thread_is_alive(it.first)) dead_threads.push_back(it.first); // and delete every context associated with them for (const auto &it: dead_threads) g_glc_map.erase(it); } else { glc = val->second.get(); } glXMakeCurrent(dpy, wnd, glc); } GLXThreadLocalContext::~GLXThreadLocalContext() { if (restore_previous_context_) glXMakeCurrent(prev_dpy_, prev_wnd_, prev_glc_); else glXMakeCurrent(prev_dpy_, None, nullptr); g_glc_mutex.unlock(); } GLXLockGuard::GLXLockGuard() { g_glc_mutex.lock(); } GLXLockGuard::~GLXLockGuard() { g_glc_mutex.unlock(); } GLXGlobalContext::GLXGlobalContext(Display *dpy, int screen) : dpy_{dpy} { std::unique_lock lock{g_glc_mutex}; g_root_glc_refcnt += 1; if (g_root_glc_refcnt > 1) return; GLint att[] = { GLX_RGBA, GLX_DEPTH_SIZE, 24, GLX_DOUBLEBUFFER, None }; g_root_vi = glXChooseVisual(dpy, screen, att); if (!g_root_vi) { traceError("GLXGlobalContext::GLXGlobalContext: glXChooseVisual failed\n"); throw std::bad_alloc(); } g_root_glc = glXCreateContext(dpy, g_root_vi, NULL, GL_TRUE); if (!g_root_glc) throw std::bad_alloc(); } GLXGlobalContext::~GLXGlobalContext() { try { std::unique_lock lock{g_glc_mutex}; g_root_glc_refcnt -= 1; if (g_root_glc_refcnt <= 0) { // destroying global GL context glXMakeCurrent(dpy_, None, nullptr); glXDestroyContext(dpy_, g_root_glc); XFree(g_root_vi); // destroying all per-thread GL contexts g_glc_map.clear(); } } catch (...) { traceError("GLXGlobalContext::~GLXGlobalContext(): caught exception\n"); } } GLXContext GLXGlobalContext::get() const { std::unique_lock lock{g_glc_mutex}; if (g_root_glc_refcnt > 0) return g_root_glc; else return nullptr; } } // namespace vdp static int x11_error_handler(Display *, XErrorEvent *ee) { x11_error_code = ee->error_code; return 0; } void x11_push_eh() { x11_error_code = 0; XSetErrorHandler(&x11_error_handler); } int x11_pop_eh() { // restoring error handler is too risky, leave it as is return x11_error_code; } libvdpau-va-gl-0.4.2/src/glx-context.hh000066400000000000000000000053161277566164500177430ustar00rootroot00000000000000/* * Copyright 2013-2016 Rinat Ibragimov * * This file is part of libvdpau-va-gl * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #pragma once #include "x-display-ref.hh" #include #include #include namespace vdp { namespace Device { struct Resource; } // namespace Device class GLXManagedContext { public: explicit GLXManagedContext(GLXContext glc) : dpy_{} , glc_{glc} {} GLXManagedContext(GLXManagedContext &&other); ~GLXManagedContext() { destroy(); } GLXManagedContext & operator=(GLXManagedContext &&that); GLXContext get() const { return glc_; } private: XDisplayRef dpy_; GLXContext glc_; void destroy(); }; class GLXThreadLocalContext { public: explicit GLXThreadLocalContext(Window wnd, bool restore_previous_context = true); explicit GLXThreadLocalContext(std::shared_ptr device, bool restore_previous_context = true); ~GLXThreadLocalContext(); GLXThreadLocalContext(const GLXThreadLocalContext &) = delete; GLXThreadLocalContext & operator=(const GLXThreadLocalContext &) = delete; private: Display *prev_dpy_; Window prev_wnd_; GLXContext prev_glc_; bool restore_previous_context_; }; class GLXLockGuard { public: GLXLockGuard(); ~GLXLockGuard(); }; class GLXGlobalContext { public: GLXGlobalContext(Display *dpy, int screen); ~GLXGlobalContext(); GLXContext get() const; private: GLXGlobalContext & operator=(const GLXGlobalContext &that) = delete; Display *dpy_; }; } // namespace vdp void x11_push_eh(); int x11_pop_eh(); libvdpau-va-gl-0.4.2/src/h264-parse.cc000066400000000000000000000571341277566164500172550ustar00rootroot00000000000000/* * Copyright 2013-2016 Rinat Ibragimov * * This file is part of libvdpau-va-gl * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include "h264-parse.hh" #include #include #include #include namespace vdp { enum { SLICE_TYPE_P = 0, SLICE_TYPE_B = 1, SLICE_TYPE_I = 2, SLICE_TYPE_SP = 3, SLICE_TYPE_SI = 4, }; enum { NAL_UNSPECIFIED = 0, NAL_SLICE = 1, NAL_SLICE_DATA_A = 2, NAL_SLICE_DATA_B = 3, NAL_SLICE_DATA_C = 4, NAL_IDR_SLICE = 5, }; #define NOT_IMPLEMENTED(str) assert(0 && "not implemented" && str) struct slice_parameters { int nal_ref_idc; int nal_unit_type; int first_mb_in_slice; int slice_type; int pic_parameter_set_id; int frame_num; int field_pic_flag; int bottom_field_flag; int idr_pic_id; int pic_order_cnt_lsb; int delta_pic_order_cnt_bottom; int delta_pic_order_cnt[2]; int redundant_pic_cnt; int direct_spatial_mv_pred_flag; int num_ref_idx_active_override_flag; int num_ref_idx_l0_active_minus1; int num_ref_idx_l1_active_minus1; int luma_log2_weight_denom; int chroma_log2_weight_denom; unsigned int luma_weight_l0_flag; int luma_weight_l0[32]; int luma_offset_l0[32]; unsigned int chroma_weight_l0_flag; int chroma_weight_l0[32][2]; int chroma_offset_l0[32][2]; unsigned int luma_weight_l1_flag; int luma_weight_l1[32]; int luma_offset_l1[32]; unsigned int chroma_weight_l1_flag; int chroma_weight_l1[32][2]; int chroma_offset_l1[32][2]; unsigned int no_output_of_prior_pics_flag; unsigned int long_term_reference_flag; unsigned int cabac_init_idc; int slice_qp_delta; unsigned int sp_for_switch_flag; int slice_qs_delta; unsigned int disable_deblocking_filter_idc; int slice_alpha_c0_offset_div2; int slice_beta_offset_div2; VAPictureH264 RefPicList0[32]; VAPictureH264 RefPicList1[32]; }; static void parse_ref_pic_list_modification(RBSPState &st, const VAPictureParameterBufferH264 *vapp, struct slice_parameters *sp); static void parse_pred_weight_table(RBSPState &st, const int ChromaArrayType, struct slice_parameters *sp); static void parse_dec_ref_pic_marking(RBSPState &st, struct slice_parameters *sp); static void do_fill_va_slice_parameter_buffer(struct slice_parameters const * const sp, VASliceParameterBufferH264 *vasp, int bit_offset) { vasp->slice_data_bit_offset = bit_offset; vasp->first_mb_in_slice = sp->first_mb_in_slice; vasp->slice_type = sp->slice_type; vasp->direct_spatial_mv_pred_flag = sp->direct_spatial_mv_pred_flag; vasp->num_ref_idx_l0_active_minus1 = sp->num_ref_idx_l0_active_minus1; vasp->num_ref_idx_l1_active_minus1 = sp->num_ref_idx_l1_active_minus1; vasp->cabac_init_idc = sp->cabac_init_idc; vasp->slice_qp_delta = sp->slice_qp_delta; vasp->disable_deblocking_filter_idc = sp->disable_deblocking_filter_idc; vasp->slice_alpha_c0_offset_div2 = sp->slice_alpha_c0_offset_div2; vasp->slice_beta_offset_div2 = sp->slice_beta_offset_div2; for (int k = 0; k < 32; k ++) { vasp->RefPicList0[k] = sp->RefPicList0[k]; vasp->RefPicList1[k] = sp->RefPicList1[k]; } vasp->luma_log2_weight_denom = sp->luma_log2_weight_denom; vasp->chroma_log2_weight_denom = sp->chroma_log2_weight_denom; vasp->luma_weight_l0_flag = sp->luma_weight_l0_flag; for (int k = 0; k < 32; k ++) vasp->luma_weight_l0[k] = sp->luma_weight_l0[k]; for (int k = 0; k < 32; k ++) vasp->luma_offset_l0[k] = sp->luma_offset_l0[k]; vasp->chroma_weight_l0_flag = sp->chroma_weight_l0_flag; for (int k = 0; k < 32; k ++) vasp->chroma_weight_l0[k][0] = sp->chroma_weight_l0[k][0]; for (int k = 0; k < 32; k ++) vasp->chroma_weight_l0[k][1] = sp->chroma_weight_l0[k][1]; for (int k = 0; k < 32; k ++) vasp->chroma_offset_l0[k][0] = sp->chroma_offset_l0[k][0]; for (int k = 0; k < 32; k ++) vasp->chroma_offset_l0[k][1] = sp->chroma_offset_l0[k][1]; vasp->luma_weight_l1_flag = sp->luma_weight_l1_flag; for (int k = 0; k < 32; k ++) vasp->luma_weight_l1[k] = sp->luma_weight_l1[k]; for (int k = 0; k < 32; k ++) vasp->luma_offset_l1[k] = sp->luma_offset_l1[k]; vasp->chroma_weight_l1_flag = sp->chroma_weight_l1_flag; for (int k = 0; k < 32; k ++) vasp->chroma_weight_l1[k][0] = sp->chroma_weight_l1[k][0]; for (int k = 0; k < 32; k ++) vasp->chroma_weight_l1[k][1] = sp->chroma_weight_l1[k][1]; for (int k = 0; k < 32; k ++) vasp->chroma_offset_l1[k][0] = sp->chroma_offset_l1[k][0]; for (int k = 0; k < 32; k ++) vasp->chroma_offset_l1[k][1] = sp->chroma_offset_l1[k][1]; } void reset_va_picture_h264(VAPictureH264 *p) { p->picture_id = VA_INVALID_SURFACE; p->frame_idx = 0; p->flags = VA_PICTURE_H264_INVALID; p->TopFieldOrderCnt = 0; p->BottomFieldOrderCnt = 0; } static void fill_ref_pic_list(struct slice_parameters *sp, const VAPictureParameterBufferH264 *vapp) { int idcs_asc[32], idcs_desc[32]; if (sp->slice_type == SLICE_TYPE_I || sp->slice_type == SLICE_TYPE_SI) return; int frame_count = 0; for (int k = 0; k < vapp->num_ref_frames; k ++) { if (vapp->ReferenceFrames[k].flags & VA_PICTURE_H264_INVALID) continue; sp->RefPicList0[frame_count] = vapp->ReferenceFrames[k]; idcs_asc[frame_count] = idcs_desc[frame_count] = k; frame_count ++; } if (sp->slice_type == SLICE_TYPE_P || sp->slice_type == SLICE_TYPE_SP) { // TODO: implement interlaced P slices std::stable_sort(idcs_asc, idcs_asc + frame_count, [vapp](int idx_1, int idx_2) { auto value1 = vapp->ReferenceFrames[idx_1].TopFieldOrderCnt; auto value2 = vapp->ReferenceFrames[idx_2].TopFieldOrderCnt; return value1 < value2; }); std::stable_sort(idcs_desc, idcs_desc + frame_count, [vapp](int idx_1, int idx_2) { auto value1 = vapp->ReferenceFrames[idx_1].TopFieldOrderCnt; auto value2 = vapp->ReferenceFrames[idx_2].TopFieldOrderCnt; return value1 > value2; }); int ptr = 0; for (int k = 0; k < frame_count; k ++) if (vapp->ReferenceFrames[idcs_desc[k]].flags & VA_PICTURE_H264_SHORT_TERM_REFERENCE) sp->RefPicList0[ptr++] = vapp->ReferenceFrames[idcs_desc[k]]; for (int k = 0; k < frame_count; k ++) if (vapp->ReferenceFrames[idcs_asc[k]].flags & VA_PICTURE_H264_LONG_TERM_REFERENCE) sp->RefPicList0[ptr++] = vapp->ReferenceFrames[idcs_asc[k]]; } else if (sp->slice_type == SLICE_TYPE_B && !vapp->pic_fields.bits.field_pic_flag) { std::stable_sort(idcs_asc, idcs_asc + frame_count, [vapp](int idx_1, int idx_2) { auto value1 = vapp->ReferenceFrames[idx_1].TopFieldOrderCnt; auto value2 = vapp->ReferenceFrames[idx_2].TopFieldOrderCnt; return value1 < value2; }); std::stable_sort(idcs_desc, idcs_desc + frame_count, [vapp](int idx_1, int idx_2) { auto value1 = vapp->ReferenceFrames[idx_1].TopFieldOrderCnt; auto value2 = vapp->ReferenceFrames[idx_2].TopFieldOrderCnt; return value1 > value2; }); int ptr0 = 0; int ptr1 = 0; for (int k = 0; k < frame_count; k ++) { const VAPictureH264 *rf = &vapp->ReferenceFrames[idcs_desc[k]]; if (rf->flags & VA_PICTURE_H264_SHORT_TERM_REFERENCE) if (rf->TopFieldOrderCnt < vapp->CurrPic.TopFieldOrderCnt) sp->RefPicList0[ptr0++] = *rf; rf = &vapp->ReferenceFrames[idcs_asc[k]]; if (rf->flags & VA_PICTURE_H264_SHORT_TERM_REFERENCE) if (rf->TopFieldOrderCnt >= vapp->CurrPic.TopFieldOrderCnt) sp->RefPicList1[ptr1++] = *rf; } for (int k = 0; k < frame_count; k ++) { const VAPictureH264 *rf = &vapp->ReferenceFrames[idcs_asc[k]]; if (rf->flags & VA_PICTURE_H264_SHORT_TERM_REFERENCE) if (rf->TopFieldOrderCnt >= vapp->CurrPic.TopFieldOrderCnt) sp->RefPicList0[ptr0++] = *rf; rf = &vapp->ReferenceFrames[idcs_desc[k]]; if (rf->flags & VA_PICTURE_H264_SHORT_TERM_REFERENCE) if (rf->TopFieldOrderCnt < vapp->CurrPic.TopFieldOrderCnt) sp->RefPicList1[ptr1++] = *rf; } for (int k = 0; k < frame_count; k ++) { const VAPictureH264 *rf = &vapp->ReferenceFrames[idcs_asc[k]]; if (rf->flags & VA_PICTURE_H264_LONG_TERM_REFERENCE) { sp->RefPicList0[ptr0++] = *rf; sp->RefPicList1[ptr1++] = *rf; } } } else { // TODO: implement interlaced B slices assert(0 && "not implemeted: interlaced SLICE_TYPE_B sorting"); } } void parse_slice_header(RBSPState &st, const VAPictureParameterBufferH264 *vapp, const int ChromaArrayType, unsigned int p_num_ref_idx_l0_active_minus1, unsigned int p_num_ref_idx_l1_active_minus1, VASliceParameterBufferH264 *vasp) { struct slice_parameters sp = { 0 }; for (int k = 0; k < 32; k ++) { reset_va_picture_h264(&sp.RefPicList0[k]); reset_va_picture_h264(&sp.RefPicList1[k]); } st.get_u(1); // forbidden_zero_bit sp.nal_ref_idc = st.get_u(2); sp.nal_unit_type = st.get_u(5); if (sp.nal_unit_type == 14 || sp.nal_unit_type == 20) { NOT_IMPLEMENTED("nal unit types 14 and 20"); } sp.first_mb_in_slice = st.get_uev(); sp.slice_type = st.get_uev(); if (sp.slice_type > 4) sp.slice_type -= 5; // wrap 5-9 to 0-4 // as now we know slice_type, time to fill RefPicListX fill_ref_pic_list(&sp, vapp); sp.pic_parameter_set_id = st.get_uev(); // TODO: separate_colour_plane_flag is 0 for all but YUV444. Now ok, but should detect properly. // See 7.3.3 sp.frame_num = st.get_u(vapp->seq_fields.bits.log2_max_frame_num_minus4 + 4); sp.field_pic_flag = 0; sp.bottom_field_flag = 0; if (!vapp->seq_fields.bits.frame_mbs_only_flag) { sp.field_pic_flag = st.get_u(1); if (sp.field_pic_flag) { sp.bottom_field_flag = st.get_u(1); } } sp.idr_pic_id = 0; if (sp.nal_unit_type == NAL_IDR_SLICE) // IDR picture sp.idr_pic_id = st.get_uev(); sp.pic_order_cnt_lsb = 0; sp.delta_pic_order_cnt_bottom = 0; if (vapp->seq_fields.bits.pic_order_cnt_type == 0) { sp.pic_order_cnt_lsb = st.get_u(vapp->seq_fields.bits.log2_max_pic_order_cnt_lsb_minus4 + 4); if (vapp->pic_fields.bits.pic_order_present_flag && !vapp->pic_fields.bits.field_pic_flag) { sp.delta_pic_order_cnt_bottom = st.get_sev(); } } sp.delta_pic_order_cnt[0] = 0; sp.delta_pic_order_cnt[1] = 0; if (vapp->seq_fields.bits.pic_order_cnt_type == 1 && !vapp->seq_fields.bits.delta_pic_order_always_zero_flag) { sp.delta_pic_order_cnt[0] = st.get_sev(); if (vapp->pic_fields.bits.pic_order_present_flag && !vapp->pic_fields.bits.field_pic_flag) sp.delta_pic_order_cnt[1] = st.get_sev(); } sp.redundant_pic_cnt = 0; if (vapp->pic_fields.bits.redundant_pic_cnt_present_flag) sp.redundant_pic_cnt = st.get_uev(); sp.direct_spatial_mv_pred_flag = 0; if (sp.slice_type == SLICE_TYPE_B) sp.direct_spatial_mv_pred_flag = st.get_u(1); sp.num_ref_idx_active_override_flag = 0; sp.num_ref_idx_l0_active_minus1 = 0; sp.num_ref_idx_l1_active_minus1 = 0; if (sp.slice_type == SLICE_TYPE_P || sp.slice_type == SLICE_TYPE_SP || sp.slice_type == SLICE_TYPE_B) { sp.num_ref_idx_l0_active_minus1 = p_num_ref_idx_l0_active_minus1; if (sp.slice_type != SLICE_TYPE_P) sp.num_ref_idx_l1_active_minus1 = p_num_ref_idx_l1_active_minus1; sp.num_ref_idx_active_override_flag = st.get_u(1); if (sp.num_ref_idx_active_override_flag) { sp.num_ref_idx_l0_active_minus1 = st.get_uev(); if (sp.slice_type == SLICE_TYPE_B) sp.num_ref_idx_l1_active_minus1 = st.get_uev(); } } if (sp.nal_unit_type == 20) { NOT_IMPLEMENTED("nal unit type 20"); } else { parse_ref_pic_list_modification(st, vapp, &sp); } // here fields {luma,chroma}_weight_l{0,1}_flag differ from same-named flags from // H.264 recommendation. Each of those flags should be set to 1 if any of // weight tables differ from default sp.luma_weight_l0_flag = 0; sp.luma_weight_l1_flag = 0; sp.chroma_weight_l0_flag = 0; sp.chroma_weight_l1_flag = 0; if ((vapp->pic_fields.bits.weighted_pred_flag && (sp.slice_type == SLICE_TYPE_P || sp.slice_type == SLICE_TYPE_SP)) || (vapp->pic_fields.bits.weighted_bipred_idc == 1 && sp.slice_type == SLICE_TYPE_B)) { parse_pred_weight_table(st, ChromaArrayType, &sp); } if (sp.nal_ref_idc != 0) { parse_dec_ref_pic_marking(st, &sp); } sp.cabac_init_idc = 0; if (vapp->pic_fields.bits.entropy_coding_mode_flag && sp.slice_type != SLICE_TYPE_I && sp.slice_type != SLICE_TYPE_SI) { sp.cabac_init_idc = st.get_uev(); } sp.slice_qp_delta = st.get_sev(); sp.sp_for_switch_flag = 0; sp.slice_qs_delta = 0; if (sp.slice_type == SLICE_TYPE_SP || sp.slice_type == SLICE_TYPE_SI) { if (sp.slice_type == SLICE_TYPE_SP) sp.sp_for_switch_flag = st.get_u(1); sp.slice_qs_delta = st.get_sev(); } sp.disable_deblocking_filter_idc = 0; sp.slice_alpha_c0_offset_div2 = 0; sp.slice_beta_offset_div2 = 0; if (vapp->pic_fields.bits.deblocking_filter_control_present_flag) { sp.disable_deblocking_filter_idc = st.get_uev(); if (sp.disable_deblocking_filter_idc != 1) { sp.slice_alpha_c0_offset_div2 = st.get_sev(); sp.slice_beta_offset_div2 = st.get_sev(); } } if (vapp->num_slice_groups_minus1 > 0 && vapp->slice_group_map_type >= 3 && vapp->slice_group_map_type <= 5) { NOT_IMPLEMENTED("don't know what length to consume\n"); } do_fill_va_slice_parameter_buffer(&sp, vasp, st.bits_eaten()); } static void parse_ref_pic_list_modification(RBSPState &st, const VAPictureParameterBufferH264 *vapp, struct slice_parameters *sp) { const int MaxFrameNum = 1 << (vapp->seq_fields.bits.log2_max_frame_num_minus4 + 4); const int MaxPicNum = (vapp->pic_fields.bits.field_pic_flag) ? 2*MaxFrameNum : MaxFrameNum; if (sp->slice_type != SLICE_TYPE_I && sp->slice_type != SLICE_TYPE_SI) { int ref_pic_list_modification_flag_l0 = st.get_u(1); if (ref_pic_list_modification_flag_l0) { int modification_of_pic_nums_idc; int refIdxL0 = 0; unsigned int picNumL0 = vapp->frame_num; do { modification_of_pic_nums_idc = st.get_uev(); if (modification_of_pic_nums_idc < 2) { int abs_diff_pic_num_minus1 = st.get_uev(); if (modification_of_pic_nums_idc == 0) { picNumL0 -= (abs_diff_pic_num_minus1 + 1); } else { // modification_of_pic_nums_idc == 1 picNumL0 += (abs_diff_pic_num_minus1 + 1); } // wrap picNumL0 picNumL0 &= (MaxPicNum - 1); // there is no need to subtract MaxPicNum as in (8-36) in 8.2.4.3.1 // because frame_num already wrapped int j; for (j = 0; j < vapp->num_ref_frames; j ++) { if (vapp->ReferenceFrames[j].flags & VA_PICTURE_H264_INVALID) continue; if (vapp->ReferenceFrames[j].frame_idx == picNumL0 && (vapp->ReferenceFrames[j].flags & VA_PICTURE_H264_SHORT_TERM_REFERENCE)) break; } assert (j < vapp->num_ref_frames); VAPictureH264 swp = vapp->ReferenceFrames[j]; for (int k = sp->num_ref_idx_l0_active_minus1; k > refIdxL0; k --) sp->RefPicList0[k] = sp->RefPicList0[k-1]; sp->RefPicList0[refIdxL0 ++] = swp; j = refIdxL0; for (int k = refIdxL0; k <= sp->num_ref_idx_l0_active_minus1 + 1; k ++) { if (sp->RefPicList0[k].frame_idx != picNumL0 && (sp->RefPicList0[k].flags & VA_PICTURE_H264_SHORT_TERM_REFERENCE)) { sp->RefPicList0[j++] = sp->RefPicList0[k]; } } } else if (modification_of_pic_nums_idc == 2) { NOT_IMPLEMENTED("long"); fprintf(stderr, "long_term_pic_num = %d\n", st.get_uev()); } } while (modification_of_pic_nums_idc != 3); } } if (sp->slice_type == SLICE_TYPE_B) { int ref_pic_list_modification_flag_l1 = st.get_u(1); if (ref_pic_list_modification_flag_l1) { NOT_IMPLEMENTED("ref pic list modification 1"); // TODO: implement this int modification_of_pic_nums_idc; do { modification_of_pic_nums_idc = st.get_uev(); if (modification_of_pic_nums_idc == 0 || modification_of_pic_nums_idc == 1) { fprintf(stderr, "abs_diff_pic_num_minus1 = %d\n", st.get_uev()); } else if (modification_of_pic_nums_idc == 2) { fprintf(stderr, "long_term_pic_num = %d\n", st.get_uev()); } } while (modification_of_pic_nums_idc != 3); } } } static void fill_default_pred_weight_table(struct slice_parameters *sp) { const int default_luma_weight = (1 << sp->luma_log2_weight_denom); const int default_chroma_weight = (1 << sp->chroma_log2_weight_denom); for (int k = 0; k < sp->num_ref_idx_l0_active_minus1 + 1; k ++) { sp->luma_weight_l0[k] = default_luma_weight; sp->luma_offset_l0[k] = 0; sp->chroma_weight_l0[k][0] = sp->chroma_weight_l0[k][1] = default_chroma_weight; sp->chroma_offset_l0[k][0] = sp->chroma_offset_l0[k][1] = 0; } for (int k = 0; k < sp->num_ref_idx_l1_active_minus1 + 1; k ++) { sp->luma_weight_l1[k] = default_luma_weight; sp->luma_offset_l1[k] = 0; sp->chroma_weight_l1[k][0] = sp->chroma_weight_l1[k][1] = default_chroma_weight; sp->chroma_offset_l1[k][0] = sp->chroma_offset_l1[k][1] = 0; } } static void parse_pred_weight_table(RBSPState &st, const int ChromaArrayType, struct slice_parameters *sp) { sp->luma_log2_weight_denom = st.get_uev(); sp->chroma_log2_weight_denom = 0; if (ChromaArrayType != 0) sp->chroma_log2_weight_denom = st.get_uev(); fill_default_pred_weight_table(sp); const int default_luma_weight = (1 << sp->luma_log2_weight_denom); const int default_chroma_weight = (1 << sp->chroma_log2_weight_denom); for (int k = 0; k <= sp->num_ref_idx_l0_active_minus1; k ++) { int luma_weight_l0_flag = st.get_u(1); if (luma_weight_l0_flag) { sp->luma_weight_l0[k] = st.get_sev(); sp->luma_offset_l0[k] = st.get_sev(); if (default_luma_weight != sp->luma_weight_l0[k]) sp->luma_weight_l0_flag = 1; } if (ChromaArrayType != 0) { int chroma_weight_l0_flag = st.get_u(1); if (chroma_weight_l0_flag) { for (int j = 0; j < 2; j ++) { sp->chroma_weight_l0[k][j] = st.get_sev(); sp->chroma_offset_l0[k][j] = st.get_sev(); if (default_chroma_weight != sp->chroma_weight_l0[k][j]) sp->chroma_weight_l0_flag = 1; } } } } if (sp->slice_type == SLICE_TYPE_B) { for (int k = 0; k <= sp->num_ref_idx_l1_active_minus1; k ++) { int luma_weight_l1_flag = st.get_u(1); if (luma_weight_l1_flag) { sp->luma_weight_l1[k] = st.get_sev(); sp->luma_offset_l1[k] = st.get_sev(); if (default_luma_weight != sp->luma_weight_l1[k]) sp->luma_weight_l1_flag = 1; } if (ChromaArrayType != 0) { int chroma_weight_l1_flag = st.get_u(1); if (chroma_weight_l1_flag) { for (int j = 0; j < 2; j ++) { sp->chroma_weight_l1[k][j] = st.get_sev(); sp->chroma_offset_l1[k][j] = st.get_sev(); if (default_chroma_weight != sp->chroma_weight_l1[k][j]) sp->chroma_weight_l1_flag = 1; } } } } } } static void parse_dec_ref_pic_marking(RBSPState &st, struct slice_parameters *sp) { if (sp->nal_unit_type == NAL_IDR_SLICE) { sp->no_output_of_prior_pics_flag = st.get_u(1); sp->long_term_reference_flag = st.get_u(1); } else { int adaptive_ref_pic_marking_mode_flag = st.get_u(1); if (adaptive_ref_pic_marking_mode_flag) { // no need to do any action, just consume bits. All management should be done // on client side int memory_management_control_operation; do { memory_management_control_operation = st.get_uev(); if (memory_management_control_operation == 1 || memory_management_control_operation == 3) { st.get_uev(); // difference_of_pic_nums_minus1 } if (memory_management_control_operation == 2) { st.get_uev(); // long_term_pic_num } if (memory_management_control_operation == 3 || memory_management_control_operation == 6) { st.get_uev(); // long_term_frame_idx } if (memory_management_control_operation == 4) { st.get_uev(); // max_long_term_frame_idx_plus1 } } while (memory_management_control_operation != 0); } } } } // namespace vdp libvdpau-va-gl-0.4.2/src/h264-parse.hh000066400000000000000000000030541277566164500172570ustar00rootroot00000000000000/* * Copyright 2013-2016 Rinat Ibragimov * * This file is part of libvdpau-va-gl * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #pragma once #include "bitstream.hh" #include namespace vdp { void parse_slice_header(RBSPState &st, const VAPictureParameterBufferH264 *vapp, const int ChromaArrayType, unsigned int p_num_ref_idx_l0_active_minus1, unsigned int p_num_ref_idx_l1_active_minus1, VASliceParameterBufferH264 *vasp); void reset_va_picture_h264(VAPictureH264 *p); } // namespace vdp libvdpau-va-gl-0.4.2/src/handle-storage.cc000066400000000000000000000067331277566164500203560ustar00rootroot00000000000000/* * Copyright 2013-2016 Rinat Ibragimov * * This file is part of libvdpau-va-gl * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include "api-bitmap-surface.hh" #include "api-decoder.hh" #include "api-output-surface.hh" #include "api-presentation-queue.hh" #include "api-video-mixer.hh" #include "api-video-surface.hh" #include "api.hh" #include "handle-storage.hh" #include namespace { struct { vdp::ResourceStorage bitmap_surface; vdp::ResourceStorage device; vdp::ResourceStorage output_surface; vdp::ResourceStorage presentation_queue; vdp::ResourceStorage presentation_queue_target; vdp::ResourceStorage video_decoder; vdp::ResourceStorage video_mixer; vdp::ResourceStorage video_surface; } storage; } // anonymous namespace namespace vdp { uint32_t get_resource_id() { static std::atomic id{300000}; return ++id; } template<> vdp::ResourceStorage & vdp::ResourceStorage::instance() { return storage.bitmap_surface; } template<> vdp::ResourceStorage & vdp::ResourceStorage::instance() { return storage.device; } template<> vdp::ResourceStorage & vdp::ResourceStorage::instance() { return storage.output_surface; } template<> vdp::ResourceStorage & vdp::ResourceStorage::instance() { return storage.presentation_queue; } template<> vdp::ResourceStorage & vdp::ResourceStorage::instance() { return storage.presentation_queue_target; } template<> vdp::ResourceStorage & vdp::ResourceStorage::instance() { return storage.video_decoder; } template<> vdp::ResourceStorage & vdp::ResourceStorage::instance() { return storage.video_mixer; } template<> vdp::ResourceStorage & vdp::ResourceStorage::instance() { return storage.video_surface; } } // namespace vdp libvdpau-va-gl-0.4.2/src/handle-storage.hh000066400000000000000000000076571277566164500203760ustar00rootroot00000000000000/* * Copyright 2013-2016 Rinat Ibragimov * * This file is part of libvdpau-va-gl * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #pragma once #include "api-device.hh" #include "exceptions.hh" #include #include #include #include #include #include namespace vdp { template VdpStatus check_for_exceptions(callable fwd, Args... args) { try { return fwd(args...); } catch (const std::bad_alloc &) { return VDP_STATUS_RESOURCES; } catch (const vdp::resource_not_found &) { return VDP_STATUS_INVALID_HANDLE; } catch (const vdp::generic_error &) { return VDP_STATUS_ERROR; } catch (const vdp::invalid_size &) { return VDP_STATUS_INVALID_SIZE; } catch (const vdp::invalid_rgba_format &) { return VDP_STATUS_INVALID_RGBA_FORMAT; } catch (const vdp::invalid_decoder_profile &) { return VDP_STATUS_INVALID_DECODER_PROFILE; } catch (const vdp::invalid_chroma_type &) { return VDP_STATUS_INVALID_CHROMA_TYPE; } catch (...) { return VDP_STATUS_ERROR; } } // TODO: what to do when it flips over uint32_t limit? uint32_t get_resource_id(); template class ResourceStorage { public: uint32_t insert(std::shared_ptr res) { std::unique_lock lock(mtx_); auto id = get_resource_id(); res->id = id; map_.insert(std::make_pair(id, res)); return id; } std::shared_ptr find(uint32_t handle) { std::unique_lock lock(mtx_); auto res = map_.find(handle); if (res == map_.end()) throw vdp::resource_not_found(); return res->second; } void drop(uint32_t handle) { std::unique_lock lock(mtx_); map_.erase(handle); } std::vector enumerate() { std::vector v; for (const auto &it: map_) v.push_back(it.first); return v; } static ResourceStorage & instance(); private: std::recursive_mutex mtx_; std::map> map_; }; template class ResourceRef { public: explicit ResourceRef(uint32_t handle) { auto &storage = ResourceStorage::instance(); while (true) { auto res = storage.find(handle); if (res->mtx.try_lock()) { res_ = res; return; } usleep(1); } } ~ResourceRef() { res_->mtx.unlock(); } ResourceRef & operator=(const ResourceRef &) = delete; std::shared_ptr get_ref() const { return res_; } T * operator->() const { return res_.get(); } operator std::shared_ptr() const { return res_; } private: std::shared_ptr res_; }; } // namespace vdp libvdpau-va-gl-0.4.2/src/reverse-constant.cc000066400000000000000000000106151277566164500207550ustar00rootroot00000000000000/* * Copyright 2013-2016 Rinat Ibragimov * * This file is part of libvdpau-va-gl * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include "reverse-constant.hh" #define CASE(q) case q: return #q const char * reverse_rgba_format(VdpRGBAFormat rgba_format) { switch (rgba_format) { CASE(VDP_RGBA_FORMAT_B8G8R8A8); CASE(VDP_RGBA_FORMAT_R8G8B8A8); CASE(VDP_RGBA_FORMAT_R10G10B10A2); CASE(VDP_RGBA_FORMAT_B10G10R10A2); CASE(VDP_RGBA_FORMAT_A8); default: return "Unknown RGBA format"; } } const char * reverse_ycbcr_format(VdpYCbCrFormat ycbcr_format) { switch (ycbcr_format) { CASE(VDP_YCBCR_FORMAT_NV12); CASE(VDP_YCBCR_FORMAT_YV12); CASE(VDP_YCBCR_FORMAT_UYVY); CASE(VDP_YCBCR_FORMAT_YUYV); CASE(VDP_YCBCR_FORMAT_Y8U8V8A8); CASE(VDP_YCBCR_FORMAT_V8U8Y8A8); default: return "Unknown YCbCr format"; } } const char * reverse_decoder_profile(VdpDecoderProfile profile) { switch (profile) { CASE(VDP_DECODER_PROFILE_MPEG1); CASE(VDP_DECODER_PROFILE_MPEG2_SIMPLE); CASE(VDP_DECODER_PROFILE_MPEG2_MAIN); CASE(VDP_DECODER_PROFILE_H264_CONSTRAINED_BASELINE); CASE(VDP_DECODER_PROFILE_H264_BASELINE); CASE(VDP_DECODER_PROFILE_H264_MAIN); CASE(VDP_DECODER_PROFILE_H264_HIGH); CASE(VDP_DECODER_PROFILE_VC1_SIMPLE); CASE(VDP_DECODER_PROFILE_VC1_MAIN); CASE(VDP_DECODER_PROFILE_VC1_ADVANCED); CASE(VDP_DECODER_PROFILE_MPEG4_PART2_SP); CASE(VDP_DECODER_PROFILE_MPEG4_PART2_ASP); CASE(VDP_DECODER_PROFILE_DIVX4_QMOBILE); CASE(VDP_DECODER_PROFILE_DIVX4_MOBILE); CASE(VDP_DECODER_PROFILE_DIVX4_HOME_THEATER); CASE(VDP_DECODER_PROFILE_DIVX4_HD_1080P); CASE(VDP_DECODER_PROFILE_DIVX5_QMOBILE); CASE(VDP_DECODER_PROFILE_DIVX5_MOBILE); CASE(VDP_DECODER_PROFILE_DIVX5_HOME_THEATER); CASE(VDP_DECODER_PROFILE_DIVX5_HD_1080P); default: return "Unknown decoder profile"; } } const char * reverse_status(VdpStatus status) { switch (status) { CASE(VDP_STATUS_OK); CASE(VDP_STATUS_NO_IMPLEMENTATION); CASE(VDP_STATUS_DISPLAY_PREEMPTED); CASE(VDP_STATUS_INVALID_HANDLE); CASE(VDP_STATUS_INVALID_POINTER); CASE(VDP_STATUS_INVALID_CHROMA_TYPE); CASE(VDP_STATUS_INVALID_Y_CB_CR_FORMAT); CASE(VDP_STATUS_INVALID_RGBA_FORMAT); CASE(VDP_STATUS_INVALID_INDEXED_FORMAT); CASE(VDP_STATUS_INVALID_COLOR_STANDARD); CASE(VDP_STATUS_INVALID_COLOR_TABLE_FORMAT); CASE(VDP_STATUS_INVALID_BLEND_FACTOR); CASE(VDP_STATUS_INVALID_BLEND_EQUATION); CASE(VDP_STATUS_INVALID_FLAG); CASE(VDP_STATUS_INVALID_DECODER_PROFILE); CASE(VDP_STATUS_INVALID_VIDEO_MIXER_FEATURE); CASE(VDP_STATUS_INVALID_VIDEO_MIXER_PARAMETER); CASE(VDP_STATUS_INVALID_VIDEO_MIXER_ATTRIBUTE); CASE(VDP_STATUS_INVALID_VIDEO_MIXER_PICTURE_STRUCTURE); CASE(VDP_STATUS_INVALID_FUNC_ID); CASE(VDP_STATUS_INVALID_SIZE); CASE(VDP_STATUS_INVALID_VALUE); CASE(VDP_STATUS_INVALID_STRUCT_VERSION); CASE(VDP_STATUS_RESOURCES); CASE(VDP_STATUS_HANDLE_DEVICE_MISMATCH); CASE(VDP_STATUS_ERROR); default: return "Unknown VDP error"; } } const char * reverse_indexed_format(VdpIndexedFormat indexed_format) { switch (indexed_format) { CASE(VDP_INDEXED_FORMAT_A4I4); CASE(VDP_INDEXED_FORMAT_I4A4); CASE(VDP_INDEXED_FORMAT_A8I8); CASE(VDP_INDEXED_FORMAT_I8A8); default: return "Unknown indexed format"; } } libvdpau-va-gl-0.4.2/src/reverse-constant.hh000066400000000000000000000027551277566164500207750ustar00rootroot00000000000000/* * Copyright 2013-2016 Rinat Ibragimov * * This file is part of libvdpau-va-gl * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #pragma once #include const char * reverse_status(VdpStatus status); const char * reverse_rgba_format(VdpRGBAFormat rgba_format); const char * reverse_ycbcr_format(VdpYCbCrFormat ycbcr_format); const char * reverse_decoder_profile(VdpDecoderProfile profile); const char * reverse_indexed_format(VdpIndexedFormat indexed_format); libvdpau-va-gl-0.4.2/src/symbolmap000066400000000000000000000001121277566164500170610ustar00rootroot00000000000000{ global: vdp_imp_device_create_x11; local: *; }; libvdpau-va-gl-0.4.2/src/trace.cc000066400000000000000000000026631277566164500165550ustar00rootroot00000000000000/* * Copyright 2013-2016 Rinat Ibragimov * * This file is part of libvdpau-va-gl * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include "trace.hh" #include #include static const char *trace_header = "libvdpau-va-gl: "; void traceError(const char *fmt, ...) { va_list args; fprintf(stderr, "%s", trace_header); va_start(args, fmt); vfprintf(stderr, fmt, args); va_end(args); } libvdpau-va-gl-0.4.2/src/trace.hh000066400000000000000000000023031277566164500165560ustar00rootroot00000000000000/* * Copyright 2013-2016 Rinat Ibragimov * * This file is part of libvdpau-va-gl * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #pragma once void traceError(const char *buf, ...); libvdpau-va-gl-0.4.2/src/watermark.cc000066400000000000000000000514321277566164500174520ustar00rootroot00000000000000/* * Copyright 2013-2016 Rinat Ibragimov * * This file is part of libvdpau-va-gl * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include "watermark.hh" // grayscale (yet RGBA) image with text "vagl" on it const int watermark_width = 50; const int watermark_height = 27; const char *watermark_data = "\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\20\377" "\377\377r\377\377\377\237\377\377\377\313\377\377\377\356\377\377\377\362" "\377\377\377\362\377\377\377\362\377\377\377\362\377\377\377\362\377\377" "\377\362\377\377\377\362\377\377\377\362\377\377\377\362\377\377\377\362" "\377\377\377\362\377\377\377\362\377\377\377\362\377\377\377\362\377\377" "\377\362\377\377\377\362\377\377\377\362\377\377\377\362\377\377\377\362" "\377\377\377\362\377\377\377\362\377\377\377\362\377\377\377\362\377\377" "\377\362\377\377\377\362\377\377\377\362\377\377\377\362\377\377\377\362" "\377\377\377\362\377\377\377\362\377\377\377\362\377\377\377\362\377\377" "\377\356\377\377\377\313\377\377\377\237\377\377\377s\377\377\377\20\377" "\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377" "\377\0\377\377\377\0\377\377\377H\377\377\377\325\377\377\377\377\377\377" "\377\377\372\372\372\377\377\377\377\377\377\377\377\377\377\377\377\377" "\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377" "\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377" "\377\377\377\377\327\327\327\377\312\312\312\377\302\302\302\377\302\302" "\302\377\302\302\302\377\302\302\302\377\302\302\302\377\302\302\302\377" "\302\302\302\377\302\302\302\377\302\302\302\377\302\302\302\377\302\302" "\302\377\302\302\302\377\302\302\302\377\302\302\302\377\302\302\302\377" "\302\302\302\377\302\302\302\377\302\302\302\377\317\317\317\377\367\367" "\367\377\377\377\377\377\377\377\377\377\377\377\377\325\377\377\377G\377" "\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377" "\377\177\377\377\377\371\376\376\376\377\377\377\377\377\377\377\377\377" "\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377" "\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377" "\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377" "\377\377\377\377\377\377\375\375\375\377\236\236\236\377\23\23\23\377\0\0" "\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0" "\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377" "\0\0\0\377\0\0\0\377\23\23\23\377LLL\377\242\242\242\377\376\376\376\377" "\377\377\377\371\377\377\377\177\377\377\377\0\377\377\377\0\377\377\377" "\0\377\377\377F\377\377\377\371\377\377\377\377\377\377\377\377\377\377\377" "\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377" "\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377" "\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377" "\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377" "\377\332\332\332\377;;;\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0" "\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377" "\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377VV" "V\377\342\342\342\377\377\377\377\371\377\377\377F\377\377\377\0\377\377" "\377\15\377\377\377\324\374\374\374\377\377\377\377\377\377\377\377\377\377" "\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377" "\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377" "\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377" "\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377" "\377\377\377\377\377\377\377\327\327\327\377\20\20\20\377\0\0\0\377\0\0\0" "\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377" "\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\13" "\13\13\377\20\20\20\377\17\17\17\377BBB\377\371\371\371\377\377\377\377\324" "\377\377\377\15\377\377\377m\377\377\377\377\375\375\375\377\377\377\377" "\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377" "\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377" "\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377" "\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377" "\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\230" "\230\230\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377" "\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0" "\0\0\377\0\0\0\377\0\0\0\377\267\267\267\377\377\377\377\377\370\370\370" "\377\0\0\0\377\216\216\216\377\377\377\377\377\377\377\377m\377\377\377\233" "\373\373\373\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377" "\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377" "\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377" "\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377" "\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377" "\377\377\377\377\377\377\377\377\377\377\372\372\372\377\31\31\31\377\0\0" "\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0" "\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377" "\267\267\267\377\377\377\377\377\370\370\370\377\0\0\0\377\15\15\15\377\361" "\361\361\377\377\377\377\233\377\377\377\310\370\370\370\377\377\377\377" "\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377" "\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377" "\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377" "\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377" "\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377" "\377\377\377\377\377\377\377QQQ\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377" "\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0" "\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\267\267\267\377\377\377\377\377\370" "\370\370\377\0\0\0\377\0\0\0\377\307\307\307\377\377\377\377\310\377\377" "\377\360\377\377\377\377\361\361\361\377\317\317\317\377\317\317\317\377" "\344\344\344\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377" "\377\377\353\353\353\377\317\317\317\377\317\317\317\377\354\354\354\377" "\377\377\377\377\377\377\377\377\376\376\376\377\320\320\320\377\260\260" "\260\377\257\257\257\377\323\323\323\377\377\377\377\377\377\377\377\377" "\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\177\177" "\177\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\40\40\40\377V" "VV\377555\377\3\3\3\377\0\0\0\377///\377000\377\34\34\34\377\0\0\0\377\0" "\0\0\377\267\267\267\377\377\377\377\377\370\370\370\377\0\0\0\377\0\0\0" "\377\234\234\234\377\377\377\377\355\377\377\377\365\377\377\377\377\345" "\345\345\377\2\2\2\377\0\0\0\377AAA\377\377\377\377\377\377\377\377\377\377" "\377\377\377\377\377\377\377bbb\377\0\0\0\377\0\0\0\377\316\316\316\377\377" "\377\377\377\312\312\312\377+++\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377" "HHH\377\355\355\355\377\377\377\377\377\377\377\377\377\377\377\377\377\377" "\377\377\377\235\235\235\377\0\0\0\377\0\0\0\377\0\0\0\377\6\6\6\377\240" "\240\240\377\377\377\377\377\377\377\377\377\377\377\377\377\333\333\333" "\377$$$\377\377\377\377\377\377\377\377\377\221\221\221\377\0\0\0\377\0\0" "\0\377\267\267\267\377\377\377\377\377\370\370\370\377\0\0\0\377\0\0\0\377" "\202\202\202\377\377\377\377\362\377\377\377\365\377\377\377\377\377\377" "\377\377CCC\377\0\0\0\377\4\4\4\377\357\357\357\377\377\377\377\377\377\377" "\377\377\374\374\374\377\25\25\25\377\0\0\0\377---\377\377\377\377\377\363" "\363\363\377\27\27\27\377\0\0\0\377\24\24\24\377\216\216\216\377{{{\377\3" "\3\3\377\0\0\0\377YYY\377\377\377\377\377\377\377\377\377\377\377\377\377" "\377\377\377\377\236\236\236\377\0\0\0\377\0\0\0\377\0\0\0\377\201\201\201" "\377\377\377\377\377\377\377\377\377\331\331\331\377\206\206\206\377\266" "\266\266\377\333\333\333\377\377\377\377\377\377\377\377\377\213\213\213" "\377\0\0\0\377\0\0\0\377\267\267\267\377\377\377\377\377\370\370\370\377" "\0\0\0\377\0\0\0\377\201\201\201\377\377\377\377\362\377\377\377\365\377" "\377\377\377\377\377\377\377\237\237\237\377\0\0\0\377\0\0\0\377\250\250" "\250\377\377\377\377\377\377\377\377\377\301\301\301\377\0\0\0\377\0\0\0" "\377\213\213\213\377\377\377\377\377\305\305\305\377===\377111\377\227\227" "\227\377\377\377\377\377\377\377\377\377OOO\377\0\0\0\377\11\11\11\377\375" "\375\375\377\377\377\377\377\377\377\377\377\377\377\377\377\236\236\236" "\377\0\0\0\377\0\0\0\377\6\6\6\377\353\353\353\377\377\377\377\377\364\364" "\364\377\26\26\26\377\0\0\0\377\0\0\0\377\256\256\256\377\377\377\377\377" "\377\377\377\377\207\207\207\377\0\0\0\377\0\0\0\377\267\267\267\377\377" "\377\377\377\370\370\370\377\0\0\0\377\0\0\0\377\201\201\201\377\377\377" "\377\362\377\377\377\365\377\377\377\377\377\377\377\377\361\361\361\377" "\10\10\10\377\0\0\0\377[[[\377\377\377\377\377\377\377\377\377qqq\377\0\0" "\0\377\3\3\3\377\346\346\346\377\377\377\377\377\377\377\377\377\377\377" "\377\377\377\377\377\377\377\377\377\377\366\366\366\377\356\356\356\377" "ddd\377\0\0\0\377\0\0\0\377\343\343\343\377\377\377\377\377\377\377\377\377" "\377\377\377\377\236\236\236\377\0\0\0\377\0\0\0\377###\377\377\377\377\377" "\377\377\377\377\252\252\252\377\0\0\0\377\0\0\0\377\0\0\0\377KKK\377\377" "\377\377\377\377\377\377\377\207\207\207\377\0\0\0\377\0\0\0\377\267\267" "\267\377\377\377\377\377\370\370\370\377\0\0\0\377\0\0\0\377\201\201\201" "\377\377\377\377\362\377\377\377\365\377\377\377\377\377\377\377\377\377" "\377\377\377UUU\377\0\0\0\377\22\22\22\377\374\374\374\377\377\377\377\377" "!!!\377\0\0\0\377GGG\377\377\377\377\377\377\377\377\377\377\377\377\377" "\253\253\253\377999\377\14\14\14\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0" "\377\0\0\0\377\340\340\340\377\377\377\377\377\377\377\377\377\377\377\377" "\377\236\236\236\377\0\0\0\377\0\0\0\377===\377\377\377\377\377\377\377\377" "\377\220\220\220\377\0\0\0\377\0\0\0\377\0\0\0\377+++\377\377\377\377\377" "\377\377\377\377\207\207\207\377\0\0\0\377\0\0\0\377\267\267\267\377\377" "\377\377\377\370\370\370\377\0\0\0\377\0\0\0\377\201\201\201\377\377\377" "\377\362\377\377\377\365\377\377\377\377\377\377\377\377\377\377\377\377" "\260\260\260\377\0\0\0\377\0\0\0\377\302\302\302\377\320\320\320\377\0\0" "\0\377\0\0\0\377\246\246\246\377\377\377\377\377\377\377\377\377\252\252" "\252\377\0\0\0\377\0\0\0\377111\377\203\203\203\377\222\222\222\377AAA\377" "\0\0\0\377\0\0\0\377\340\340\340\377\377\377\377\377\377\377\377\377\377" "\377\377\377\236\236\236\377\0\0\0\377\0\0\0\377999\377\377\377\377\377\377" "\377\377\377\245\245\245\377\0\0\0\377\0\0\0\377\0\0\0\377222\377\377\377" "\377\377\377\377\377\377\207\207\207\377\0\0\0\377\0\0\0\377\267\267\267" "\377\377\377\377\377\370\370\370\377\0\0\0\377\0\0\0\377\201\201\201\377" "\377\377\377\362\377\377\377\365\377\377\377\377\377\377\377\377\377\377" "\377\377\371\371\371\377\22\22\22\377\0\0\0\377uuu\377\177\177\177\377\0" "\0\0\377\16\16\16\377\366\366\366\377\377\377\377\377\377\377\377\377EEE" "\377\0\0\0\377\3\3\3\377\357\357\357\377\377\377\377\377\377\377\377\377" "XXX\377\0\0\0\377\0\0\0\377\340\340\340\377\377\377\377\377\377\377\377\377" "\377\377\377\377\236\236\236\377\0\0\0\377\0\0\0\377\37\37\37\377\377\377" "\377\377\377\377\377\377\332\332\332\377\0\0\0\377\0\0\0\377\0\0\0\377WW" "W\377\377\377\377\377\377\377\377\377\207\207\207\377\0\0\0\377\0\0\0\377" "\267\267\267\377\377\377\377\377\370\370\370\377\0\0\0\377\0\0\0\377\201" "\201\201\377\377\377\377\362\377\377\377\365\377\377\377\377\377\377\377" "\377\377\377\377\377\377\377\377\377ggg\377\0\0\0\377)))\377///\377\0\0\0" "\377bbb\377\377\377\377\377\377\377\377\377\377\377\377\377$$$\377\0\0\0" "\377\5\5\5\377\361\361\361\377\377\377\377\377\342\342\342\377\14\14\14\377" "\0\0\0\377\0\0\0\377\332\332\332\377\377\377\377\377\377\377\377\377\377" "\377\377\377\236\236\236\377\0\0\0\377\0\0\0\377\2\2\2\377\342\342\342\377" "\377\377\377\377\376\376\376\377FFF\377\0\0\0\377\10\10\10\377\307\307\307" "\377\377\377\377\377\377\377\377\377\207\207\207\377\0\0\0\377\0\0\0\377" "\267\267\267\377\377\377\377\377\370\370\370\377\0\0\0\377\0\0\0\377\201" "\201\201\377\377\377\377\362\377\377\377\365\377\377\377\377\377\377\377" "\377\377\377\377\377\377\377\377\377\302\302\302\377\0\0\0\377\0\0\0\377" "\0\0\0\377\0\0\0\377\301\301\301\377\377\377\377\377\377\377\377\377\377" "\377\377\377SSS\377\0\0\0\377\0\0\0\377CCC\377yyy\377\32\32\32\377III\377" "\0\0\0\377\0\0\0\377rrr\377\322\322\322\377\377\377\377\377\377\377\377\377" "\236\236\236\377\0\0\0\377\0\0\0\377\0\0\0\377uuu\377\377\377\377\377\377" "\377\377\377\377\377\377\377\307\307\307\377\336\336\336\377\304\304\304" "\377\377\377\377\377\377\377\377\377\207\207\207\377\0\0\0\377\0\0\0\377" "\267\267\267\377\377\377\377\377\370\370\370\377\0\0\0\377\0\0\0\377\202" "\202\202\377\377\377\377\362\377\377\377\362\377\377\377\377\377\377\377" "\377\377\377\377\377\377\377\377\377\376\376\376\377\36\36\36\377\0\0\0\377" "\0\0\0\377\40\40\40\377\376\376\376\377\377\377\377\377\377\377\377\377\377" "\377\377\377\331\331\331\377\31\31\31\377\0\0\0\377\0\0\0\377\0\0\0\377Q" "QQ\377\374\374\374\377999\377\0\0\0\377\0\0\0\377ppp\377\377\377\377\377" "\377\377\377\377\226\226\226\377\0\0\0\377\0\0\0\377\0\0\0\377\1\1\1\377" "\213\213\213\377\370\370\370\377\377\377\377\377\371\371\371\377\260\260" "\260\377///\377\377\377\377\377\377\377\377\377\207\207\207\377\0\0\0\377" "\0\0\0\377\267\267\267\377\377\377\377\377\370\370\370\377\0\0\0\377\0\0" "\0\377\234\234\234\377\377\377\377\355\377\377\377\314\375\375\375\377\377" "\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\325\325\325" "\377\277\277\277\377\277\277\277\377\326\326\326\377\377\377\377\377\377" "\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\361\361\361" "\377\254\254\254\377\241\241\241\377\337\337\337\377\377\377\377\377\377" "\377\377\377\374\374\374\377\276\276\276\377\254\254\254\377\344\344\344" "\377\377\377\377\377\377\377\377\377lll\377\0\0\0\377\0\0\0\377\0\0\0\377" "\0\0\0\377\0\0\0\377\6\6\6\377)))\377\7\7\7\377\0\0\0\377:::\377\377\377" "\377\377\377\377\377\377nnn\377\0\0\0\377\0\0\0\377---\377@@@\377>>>\377" "\0\0\0\377\0\0\0\377\307\307\307\377\377\377\377\310\377\377\377\233\374" "\374\374\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377" "\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377" "\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377" "\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377" "\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377" "\377\377\377\377\377\377\377\377\377\377\377\377\377>>>\377\0\0\0\377\0\0" "\0\377\0\0\0\377TTT\377\210\210\210\377\240\240\240\377\16\16\16\377\0\0" "\0\377\0\0\0\377xxx\377\377\377\377\377\377\377\377\377AAA\377\0\0\0\377" "\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\15\15\15\377\361\361\361" "\377\377\377\377\233\377\377\377m\377\377\377\377\377\377\377\377\377\377" "\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377" "\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377" "\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377" "\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377" "\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377" "\335\335\335\377\5\5\5\377\0\0\0\377\0\0\0\377\0\0\0\377|||\377\377\377\377" "\377\377\377\377\377\304\304\304\377kkk\377\203\203\203\377\366\366\366\377" "\377\377\377\377\307\307\307\377\2\2\2\377\0\0\0\377\0\0\0\377\0\0\0\377" "\0\0\0\377\0\0\0\377\0\0\0\377\216\216\216\377\377\377\377\377\377\377\377" "m\377\377\377\15\377\377\377\327\376\376\376\377\377\377\377\377\377\377" "\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377" "\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377" "\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377" "\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377" "\377\377\377\377\377\377\377\377\377\377\376\376\376\377UUU\377\0\0\0\377" "\0\0\0\377\0\0\0\377\0\0\0\377\3\3\3\377\232\232\232\377\373\373\373\377" "\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\267\267" "\267\377\40\40\40\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0" "\0\0\377BBB\377\371\371\371\377\377\377\377\324\377\377\377\15\377\377\377" "\0\377\377\377F\377\377\377\373\377\377\377\377\377\377\377\377\377\377\377" "\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377" "\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377" "\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377" "\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377" "\377\377\377\377\377\242\242\242\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0" "\377\0\0\0\377\0\0\0\377\0\0\0\377\31\31\31\377MMM\377ggg\377QQQ\377)))\377" "\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377YY" "Y\377\343\343\343\377\377\377\377\371\377\377\377F\377\377\377\0\377\377" "\377\0\377\377\377\0\377\377\377\177\377\377\377\373\377\377\377\377\377" "\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377" "\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377" "\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377" "\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\370" "\370\370\377\213\213\213\377\11\11\11\377\0\0\0\377\0\0\0\377\0\0\0\377\0" "\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0" "\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\20\20\20\377KKK" "\377\243\243\243\377\376\376\376\377\377\377\377\371\377\377\377\177\377" "\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377" "\377H\377\377\377\330\377\377\377\377\377\377\377\377\377\377\377\377\377" "\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377" "\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377" "\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\366\366\366" "\377\354\354\354\377\322\322\322\377\302\302\302\377\302\302\302\377\302" "\302\302\377\302\302\302\377\302\302\302\377\302\302\302\377\302\302\302" "\377\302\302\302\377\302\302\302\377\302\302\302\377\302\302\302\377\302" "\302\302\377\302\302\302\377\302\302\302\377\302\302\302\377\302\302\302" "\377\302\302\302\377\316\316\316\377\367\367\367\377\377\377\377\377\377" "\377\377\377\377\377\377\325\377\377\377G\377\377\377\0\377\377\377\0\377" "\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377" "\377\20\377\377\377r\377\377\377\237\377\377\377\315\377\377\377\362\377" "\377\377\365\377\377\377\365\377\377\377\365\377\377\377\365\377\377\377" "\365\377\377\377\365\377\377\377\365\377\377\377\365\377\377\377\365\377" "\377\377\365\377\377\377\365\377\377\377\364\377\377\377\362\377\377\377" "\362\377\377\377\362\377\377\377\362\377\377\377\362\377\377\377\362\377" "\377\377\362\377\377\377\362\377\377\377\362\377\377\377\362\377\377\377" "\362\377\377\377\362\377\377\377\362\377\377\377\362\377\377\377\362\377" "\377\377\362\377\377\377\362\377\377\377\362\377\377\377\362\377\377\377" "\362\377\377\377\356\377\377\377\313\377\377\377\237\377\377\377s\377\377" "\377\20\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0"; libvdpau-va-gl-0.4.2/src/watermark.hh000066400000000000000000000024031277566164500174560ustar00rootroot00000000000000/* * Copyright 2013-2016 Rinat Ibragimov * * This file is part of libvdpau-va-gl * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #pragma once extern const char *watermark_data; extern const int watermark_width; extern const int watermark_height; libvdpau-va-gl-0.4.2/src/x-display-ref.cc000066400000000000000000000024661277566164500201440ustar00rootroot00000000000000/* * Copyright 2013-2016 Rinat Ibragimov * * This file is part of libvdpau-va-gl * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include "x-display-ref.hh" namespace vdp { std::mutex XDisplayRef::mtx_; Display * XDisplayRef::dpy_ = nullptr; int XDisplayRef::ref_cnt_ = 0; } // namespace vdp libvdpau-va-gl-0.4.2/src/x-display-ref.hh000066400000000000000000000041701277566164500201500ustar00rootroot00000000000000/* * Copyright 2013-2016 Rinat Ibragimov * * This file is part of libvdpau-va-gl * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #pragma once #include #include namespace vdp { class XDisplayRef { public: explicit XDisplayRef(bool one_more_ref = false) { std::unique_lock lock(mtx_); const bool ref_cnt_was_zero = (ref_cnt_ == 0); ref_cnt_ += 1; if (one_more_ref) ref_cnt_ += 1; if (ref_cnt_was_zero) { dpy_ = XOpenDisplay(nullptr); // TODO: do we need to throw if (dpy_ == nullptr)? } } ~XDisplayRef() { std::unique_lock lock(mtx_); ref_cnt_ -= 1; if (ref_cnt_ <= 0) { XCloseDisplay(dpy_); dpy_ = nullptr; } } XDisplayRef(const XDisplayRef &ref) = delete; XDisplayRef & operator=(const XDisplayRef &that) = delete; Display * get() const { return dpy_; } private: static Display *dpy_; static std::mutex mtx_; static int ref_cnt_; }; } // namespace vdp libvdpau-va-gl-0.4.2/tests/000077500000000000000000000000001277566164500155145ustar00rootroot00000000000000libvdpau-va-gl-0.4.2/tests/CMakeLists.txt000066400000000000000000000016571277566164500202650ustar00rootroot00000000000000include_directories(..) include_directories(${GENERATED_INCLUDE_DIRS}) add_definitions(-DDRIVER_NAME="${CMAKE_BINARY_DIR}/lib${DRIVER_NAME}.so.1") link_libraries(${X11_LIBRARIES}) link_directories(${X11_LIBRARY_DIRS}) list(APPEND _vdpau_tests test-001 test-002 test-003 test-004 test-005 test-006 test-007 test-008 test-009 test-010) list(APPEND _all_tests test-000 ${_vdpau_tests}) add_executable(test-000 EXCLUDE_FROM_ALL test-000.cc) foreach(_test ${_vdpau_tests}) add_executable(${_test} EXCLUDE_FROM_ALL "${_test}.c" tests-common.c) add_dependencies(${_test} ${DRIVER_NAME}) target_link_libraries(${_test} dl) endforeach(_test) foreach(_test ${_all_tests}) add_test(${_test} ${CMAKE_CURRENT_BINARY_DIR}/${_test}) add_dependencies(build-tests ${_test}) endforeach(_test) # tmp for testing add_executable(conv-speed EXCLUDE_FROM_ALL conv-speed.c) target_link_libraries(conv-speed ${DRIVER_NAME}_static) libvdpau-va-gl-0.4.2/tests/conv-speed.c000066400000000000000000000050271277566164500177270ustar00rootroot00000000000000#ifdef NDEBUG #undef NDEBUG #endif #define CHECK(expr) if (VDP_STATUS_OK != (expr)) assert(0); #include #include "api.h" #include #include #include // force linking library constructor void va_gl_library_constructor(); void *dummy_ptr = va_gl_library_constructor; int main(int argc, char *argv[]) { const int width = 720; const int height = 480; VdpDevice vdp_device = create_vdp_device(); VdpVideoSurface vdp_video_surface; VdpVideoMixer vdp_video_mixer; VdpOutputSurface vdp_output_surface; CHECK(vdpVideoSurfaceCreate(vdp_device, VDP_CHROMA_TYPE_420, width, height, &vdp_video_surface)); CHECK(vdpOutputSurfaceCreate(vdp_device, VDP_RGBA_FORMAT_B8G8R8A8, width, height, &vdp_output_surface)); CHECK(vdpVideoMixerCreate(vdp_device, 0, NULL, 0, NULL, NULL, &vdp_video_mixer)); char *y_plane = malloc(width * height); char *u_plane = malloc((width/2) * (height/2)); char *v_plane = malloc((width/2) * (height/2)); const void *source_planes[4] = { y_plane, u_plane, v_plane, NULL }; uint32_t source_pitches[4] = { width, width/2, width/2, 0 }; assert(y_plane); assert(u_plane); assert(v_plane); memset(y_plane, 128, width * height); memset(u_plane, 200, (width/2) * (height/2)); memset(v_plane, 95, (width/2) * (height/2)); struct timespec t_start, t_end; int rep_count = 3000; if (argc >= 2) rep_count = atoi(argv[1]); clock_gettime(CLOCK_MONOTONIC, &t_start); for (int k = 0; k < rep_count; k ++) { CHECK(vdpVideoSurfacePutBitsYCbCr(vdp_video_surface, VDP_YCBCR_FORMAT_YV12, source_planes, source_pitches)); CHECK(vdpVideoMixerRender(vdp_video_mixer, -1, NULL, VDP_VIDEO_MIXER_PICTURE_STRUCTURE_FRAME, 0, NULL, vdp_video_surface, 0, NULL, NULL, vdp_output_surface, NULL, NULL, 0, NULL)); } clock_gettime(CLOCK_MONOTONIC, &t_end); double duration = t_end.tv_sec - t_start.tv_sec + (t_end.tv_nsec - t_start.tv_nsec) / 1.0e9; printf("%d repetitions in %f secs, %f per sec\n", rep_count, duration, rep_count / duration); CHECK(vdpOutputSurfaceDestroy(vdp_output_surface)); CHECK(vdpVideoMixerDestroy(vdp_video_mixer)); CHECK(vdpVideoSurfaceDestroy(vdp_video_surface)); CHECK(vdpDeviceDestroy(vdp_device)); return 0; } libvdpau-va-gl-0.4.2/tests/test-000.cc000066400000000000000000000071021277566164500172770ustar00rootroot00000000000000#undef NDEBUG #include #include #include #include "../src/bitstream.hh" using std::vector; static void test_get_uev() { const vector buf{0xa6, 0x42, 0x98, 0xe2, 0x3f}; vdp::RBSPState st{buf}; int64_t a; a = st.get_uev(); assert(a == 0); a = st.get_uev(); assert(a == 1); a = st.get_uev(); assert(a == 2); a = st.get_uev(); assert(a == 3); a = st.get_uev(); assert(a == 4); a = st.get_uev(); assert(a == 5); a = st.get_uev(); assert(a == 6); a = st.get_uev(); assert(a == 7); a = st.get_uev(); assert(a == 0); a = st.get_uev(); assert(a == 0); } static void test_get_u() { const vector buf{0xa6, 0x42, 0x98, 0xe2, 0x3f}; vdp::RBSPState st{buf}; int64_t a; a = st.get_u(1); assert(a == 1); a = st.get_u(3); assert(a == 2); a = st.get_u(3); assert(a == 3); a = st.get_u(5); assert(a == 4); a = st.get_u(5); assert(a == 5); a = st.get_u(5); assert(a == 6); a = st.get_u(5); assert(a == 7); a = st.get_u(7); assert(a == 8); a = st.get_u(1); assert(a == 1); } static void test_get_sev() { const vector buf{0xa6, 0x42, 0x98, 0xe2, 0x3f}; vdp::RBSPState st{buf}; int64_t a; a = st.get_sev(); assert(a == 0); a = st.get_sev(); assert(a == 1); a = st.get_sev(); assert(a == -1); a = st.get_sev(); assert(a == 2); a = st.get_sev(); assert(a == -2); a = st.get_sev(); assert(a == 3); a = st.get_sev(); assert(a == -3); a = st.get_sev(); assert(a == 4); a = st.get_sev(); assert(a == 0); a = st.get_sev(); assert(a == 0); } static void test_emulation_prevention_bytes_1() { const vector buf{0x00, 0x00, 0x03, 0x00, 0x00, 0x03, 0x00, 0x00}; vdp::RBSPState st{buf}; for (int k = 0; k < 6 * 8; k ++) { int64_t a = st.get_u(1); assert(a == 0); } const auto bits_eaten = st.bits_eaten(); assert(bits_eaten == 6 * 8); } static void test_emulation_prevention_bytes_2() { const vector buf{0x00, 0x00, 0x03, 0xff, 0xff}; vdp::RBSPState st{buf}; for (int k = 0; k < 16; k ++) { const int64_t a = st.get_u(1); assert(a == 0); } for (int k = 0; k < 16; k ++) { const int64_t a = st.get_u(1); assert(a == 1); } } static void test_emulation_prevention_bytes_3() { const vector buf{0x00, 0x00, 0x00, 0x03, 0xff}; vdp::RBSPState st{buf}; for (int k = 0; k < 24; k ++) { const int64_t a = st.get_u(1); assert(a == 0); } for (int k = 0; k < 8; k ++) { const int64_t a = st.get_u(1); assert(a == 1); } } static void test_navigating_to_nal_element_1() { const vector buf{0xff, 0x00, 0x00, 0x03, 0x01, 0x00, 0x00, 0x01, 0xa3, 0x43}; vdp::RBSPState st{buf}; // skip first byte st.get_u(8); const int64_t a = st.navigate_to_nal_unit(); assert(a == 7); } static void test_navigating_to_nal_element_2() { const vector buf{0xff, 0x00, 0x00, 0x03, 0x01, 0x00, 0x00, 0x01, 0xa3, 0x43}; vdp::RBSPState st{buf}; // skip part of the first byte st.get_u(3); const int64_t a = st.navigate_to_nal_unit(); assert(a == 7); // test further data reading const uint32_t b = st.get_u(8); assert(b == 0xa3); } int main() { test_get_uev(); test_get_u(); test_get_sev(); test_emulation_prevention_bytes_1(); test_emulation_prevention_bytes_2(); test_emulation_prevention_bytes_3(); test_navigating_to_nal_element_1(); test_navigating_to_nal_element_2(); printf("pass\n"); } libvdpau-va-gl-0.4.2/tests/test-001.c000066400000000000000000000115231277566164500171370ustar00rootroot00000000000000// Create two output surfaces (B8G8R8A8) of 4x4, fill first with opaque black // and second with black and two red dots (opaque too). // Render second into first. Check that red dots do not get smoothed. // The dot at (1, 1) checks for smoothing, one at (3,3) checks for edge condition. #include "tests-common.h" #include #include #include int main(void) { VdpDevice device = create_vdp_device(); VdpOutputSurface out_surface_1; VdpOutputSurface out_surface_2; ASSERT_OK(vdpOutputSurfaceCreate(device, VDP_RGBA_FORMAT_B8G8R8A8, 4, 4, &out_surface_1)); ASSERT_OK(vdpOutputSurfaceCreate(device, VDP_RGBA_FORMAT_B8G8R8A8, 4, 4, &out_surface_2)); uint32_t black_box[] = { 0xff000000, 0xff000000, 0xff000000, 0xff000000, 0xff000000, 0xff000000, 0xff000000, 0xff000000, 0xff000000, 0xff000000, 0xff000000, 0xff000000, 0xff000000, 0xff000000, 0xff000000, 0xff000000 }; uint32_t two_red_dots[] = { 0xff000000, 0xff000000, 0xff000000, 0xff000000, 0xff000000, 0xffff0000, 0xff000000, 0xff000000, 0xff000000, 0xff000000, 0xff000000, 0xff000000, 0xff000000, 0xff000000, 0xff000000, 0xffff0000 }; const void * const source_data_1[] = {black_box}; const void * const source_data_2[] = {two_red_dots}; uint32_t source_pitches[] = { 4 * 4 }; // upload data ASSERT_OK(vdpOutputSurfacePutBitsNative(out_surface_1, source_data_1, source_pitches, NULL)); ASSERT_OK(vdpOutputSurfacePutBitsNative(out_surface_2, source_data_2, source_pitches, NULL)); // render VdpOutputSurfaceRenderBlendState blend_state = { .struct_version = VDP_OUTPUT_SURFACE_RENDER_BLEND_STATE_VERSION, .blend_factor_source_color = VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ONE, .blend_factor_source_alpha = VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ONE, .blend_factor_destination_color = VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ZERO, .blend_factor_destination_alpha = VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ZERO, .blend_equation_color = VDP_OUTPUT_SURFACE_RENDER_BLEND_EQUATION_ADD, .blend_equation_alpha = VDP_OUTPUT_SURFACE_RENDER_BLEND_EQUATION_ADD, .blend_constant = {0, 0, 0, 0} }; ASSERT_OK(vdpOutputSurfaceRenderOutputSurface(out_surface_1, NULL, out_surface_2, NULL, NULL, &blend_state, VDP_OUTPUT_SURFACE_RENDER_ROTATE_0)); // get data back uint32_t receive_buf[16]; void * const dest_data[] = {receive_buf}; ASSERT_OK(vdpOutputSurfaceGetBitsNative(out_surface_1, NULL, dest_data, source_pitches)); printf("output surface\n"); for (int k = 0; k < 16; k ++) { printf("%x ", receive_buf[k]); if (3 == k % 4) printf("\n"); } printf("----------\n"); for (int k = 0; k < 16; k ++) { printf("%x ", two_red_dots[k]); if (3 == k % 4) printf("\n"); } // compare recieve_buf with two_red_dots if (memcmp(receive_buf, two_red_dots, 4*4*4)) { printf("fail\n"); return 1; } // Check bitmap surface rendering smoothing issue VdpBitmapSurface bmp_surface; ASSERT_OK(vdpBitmapSurfaceCreate(device, VDP_RGBA_FORMAT_B8G8R8A8, 4, 4, 1, &bmp_surface)); ASSERT_OK(vdpBitmapSurfacePutBitsNative(bmp_surface, source_data_2, source_pitches, NULL)); VdpOutputSurfaceRenderBlendState blend_state_opaque_copy = { .struct_version = VDP_OUTPUT_SURFACE_RENDER_BLEND_STATE_VERSION, .blend_factor_source_color = VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ONE, .blend_factor_source_alpha = VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ONE, .blend_factor_destination_color = VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ZERO, .blend_factor_destination_alpha = VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ZERO, .blend_equation_color = VDP_OUTPUT_SURFACE_RENDER_BLEND_EQUATION_ADD, .blend_equation_alpha = VDP_OUTPUT_SURFACE_RENDER_BLEND_EQUATION_ADD, .blend_constant = {0, 0, 0, 0} }; ASSERT_OK(vdpOutputSurfaceRenderBitmapSurface(out_surface_1, NULL, bmp_surface, NULL, NULL, &blend_state_opaque_copy, VDP_OUTPUT_SURFACE_RENDER_ROTATE_0)); ASSERT_OK(vdpOutputSurfaceGetBitsNative(out_surface_1, NULL, dest_data, source_pitches)); printf("bitmap surface\n"); for (int k = 0; k < 16; k ++) { printf("%x ", receive_buf[k]); if (3 == k % 4) printf("\n"); } printf("----------\n"); for (int k = 0; k < 16; k ++) { printf("%x ", two_red_dots[k]); if (3 == k % 4) printf("\n"); } if (memcmp(receive_buf, two_red_dots, 4*4*4)) { printf("fail\n"); return 2; } ASSERT_OK(vdpDeviceDestroy(device)); printf("pass\n"); return 0; } libvdpau-va-gl-0.4.2/tests/test-002.c000066400000000000000000000067751277566164500171550ustar00rootroot00000000000000// test-002 // Test alignment issues of output surface {get,put}bits. // Uploads 5x5 square of A8 samples, thus breaking 4-byte alignment. Then downloads and // compares. Buffers should contain identical data. // // Bitmap surfaces checked too. But since there is no way to download data directly from // bitmap surface, we doing this via rendering to output surface. #include "tests-common.h" #include #include int main(void) { VdpDevice device = create_vdp_device(); VdpOutputSurface out_surface; VdpBitmapSurface bmp_surface; uint8_t twenty_five[] = { 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19 }; uint8_t out_buf[25]; const void * const source_data[] = { twenty_five }; void * const destination_data[] = { out_buf }; uint32_t source_pitches[] = { 5 }; uint32_t destination_pitches[] = { 5 }; ASSERT_OK(vdpOutputSurfaceCreate(device, VDP_RGBA_FORMAT_A8, 5, 5, &out_surface)); // upload image to surface, download image from surface ASSERT_OK(vdpOutputSurfacePutBitsNative(out_surface, source_data, source_pitches, NULL)); ASSERT_OK(vdpOutputSurfaceGetBitsNative(out_surface, NULL, destination_data, destination_pitches)); printf("outputsurface\n"); for (int k = 0; k < 25; k ++) { printf(" %02x", twenty_five[k]); if (k % 5 == 4) printf("\n"); } printf("----------\n"); for (int k = 0; k < 25; k ++) { printf(" %02x", out_buf[k]); if (k % 5 == 4) printf("\n"); } printf("==========\n"); if (calc_difference_a8(out_buf, twenty_five, 25) > 2) { printf("failure\n"); return 1; } // Do check bitmap surface ASSERT_OK(vdpBitmapSurfaceCreate(device, VDP_RGBA_FORMAT_A8, 5, 5, 1, &bmp_surface)); ASSERT_OK(vdpBitmapSurfacePutBitsNative(bmp_surface, source_data, source_pitches, NULL)); // draw alpha channel as color VdpOutputSurfaceRenderBlendState blend_state = { .struct_version = VDP_OUTPUT_SURFACE_RENDER_BLEND_STATE_VERSION, .blend_factor_source_color = VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_SRC_ALPHA, .blend_factor_source_alpha = VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ONE, .blend_factor_destination_color = VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ZERO, .blend_factor_destination_alpha = VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ZERO, .blend_equation_color = VDP_OUTPUT_SURFACE_RENDER_BLEND_EQUATION_ADD, .blend_equation_alpha = VDP_OUTPUT_SURFACE_RENDER_BLEND_EQUATION_ADD, .blend_constant = {0, 0, 0, 0} }; ASSERT_OK(vdpOutputSurfaceRenderBitmapSurface(out_surface, NULL, bmp_surface, NULL, NULL, &blend_state, VDP_OUTPUT_SURFACE_RENDER_ROTATE_0)); ASSERT_OK(vdpOutputSurfaceGetBitsNative(out_surface, NULL, destination_data, destination_pitches)); printf("bitmapsurface\n"); for (int k = 0; k < 25; k ++) { printf(" %02x", twenty_five[k]); if (k % 5 == 4) printf("\n"); } printf("----------\n"); for (int k = 0; k < 25; k ++) { printf(" %02x", out_buf[k]); if (k % 5 == 4) printf("\n"); } printf("==========\n"); if (calc_difference_a8(out_buf, twenty_five, 25) > 2) { printf("failure\n"); return 2; } ASSERT_OK(vdpDeviceDestroy(device)); printf("pass\n"); return 0; } libvdpau-va-gl-0.4.2/tests/test-003.c000066400000000000000000000066671277566164500171560ustar00rootroot00000000000000// test-003 // // Testing blending A8 bitmap surface into B8G8R8A8 output with following blend parameters: // source/destination colors : src alpha / 1 - src alpha // source/destination alpha : one / src alpha // blend equation for color / alpha : add / add // // target surface filled with {0, 0, 0, 1} // // coloring with color {0, 1, 0, 1}. This should be green with alpha == 1. #include "tests-common.h" #include #include int main(void) { VdpDevice device = create_vdp_device(); VdpBitmapSurface bmp_surface; VdpOutputSurface out_surface; const uint8_t bmp_1[] = { 0x00, 0x01, 0x02, 0x03, 0x14, 0x15, 0x16, 0x17, 0x28, 0x29, 0x2a, 0x2b, 0x3c, 0x3d, 0x3e, 0x3f }; const void * const source_data_bmp[] = { bmp_1 }; uint32_t source_pitches_bmp[] = { 4 }; const uint32_t black_4x4[] = { 0xff000000, 0xff000000, 0xff000000, 0xff000000, 0xff000000, 0xff000000, 0xff000000, 0xff000000, 0xff000000, 0xff000000, 0xff000000, 0xff000000, 0xff000000, 0xff000000, 0xff000000, 0xff000000 }; const void * const source_data_black[] = { black_4x4 }; uint32_t source_pitches_black[] = { 4 * 4 }; // create surfaces ASSERT_OK(vdpBitmapSurfaceCreate(device, VDP_RGBA_FORMAT_A8, 4, 4, 1, &bmp_surface)); ASSERT_OK(vdpOutputSurfaceCreate(device, VDP_RGBA_FORMAT_B8G8R8A8, 4, 4, &out_surface)); // upload data ASSERT_OK(vdpBitmapSurfacePutBitsNative(bmp_surface, source_data_bmp, source_pitches_bmp, NULL)); ASSERT_OK(vdpOutputSurfacePutBitsNative(out_surface, source_data_black, source_pitches_black, NULL)); VdpOutputSurfaceRenderBlendState blend_state = { .blend_factor_source_color = VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_SRC_ALPHA, .blend_factor_destination_color = VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA, .blend_factor_source_alpha = VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ONE, .blend_factor_destination_alpha = VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_SRC_ALPHA, .blend_equation_color = VDP_OUTPUT_SURFACE_RENDER_BLEND_EQUATION_ADD, .blend_equation_alpha = VDP_OUTPUT_SURFACE_RENDER_BLEND_EQUATION_ADD, .blend_constant = {0, 0, 0, 0} }; VdpColor color[] = {{0, 1.0, 0, 1.0}}; ASSERT_OK(vdpOutputSurfaceRenderBitmapSurface(out_surface, NULL, bmp_surface, NULL, color, &blend_state, VDP_OUTPUT_SURFACE_RENDER_ROTATE_0)); const uint32_t expected_result[] = { 0x00000000, 0x02000100, 0x04000200, 0x06000300, 0x28001400, 0x2a001500, 0x2c001600, 0x2e001700, 0x50002800, 0x52002900, 0x54002a00, 0x56002b00, 0x78003c00, 0x7a003d00, 0x7c003e00, 0x7e003f00 }; uint32_t result[16]; void * const dest_data[] = { result }; ASSERT_OK(vdpOutputSurfaceGetBitsNative(out_surface, NULL, dest_data, source_pitches_black)); printf("=== expected ===\n"); for (int k = 0; k < 16; k ++) { printf(" %08x", expected_result[k]); if (k % 4 == 3) printf("\n"); } printf("--- actual ---\n"); for (int k = 0; k < 16; k ++) { printf(" %08x", result[k]); if (k % 4 == 3) printf("\n"); } printf("==========\n"); if (memcmp(expected_result, result, sizeof(expected_result))) { printf("fail\n"); return 1; } ASSERT_OK(vdpDeviceDestroy(device)); printf("pass\n"); return 0; } libvdpau-va-gl-0.4.2/tests/test-004.c000066400000000000000000000067141277566164500171500ustar00rootroot00000000000000// test-004 // // Large scale (> 500 pixels) smooth test for VdpOutputSurfaceRenderBitmapSurface and // VdpOutputSurfaceRenderOutputSurface. // Rendering the same pattern via both paths and then comparing results. Using opaque copy, // only source matters. #include "tests-common.h" #include #include #include #define WIDTH 509 #define HEIGHT 601 int main(void) { int err_code = 0; VdpDevice device = create_vdp_device(); VdpOutputSurface out_surface_in; VdpOutputSurface out_surface_out; VdpBitmapSurface bmp_surface; ASSERT_OK(vdpOutputSurfaceCreate(device, VDP_RGBA_FORMAT_B8G8R8A8, WIDTH, HEIGHT, &out_surface_in)); ASSERT_OK(vdpOutputSurfaceCreate(device, VDP_RGBA_FORMAT_B8G8R8A8, WIDTH, HEIGHT, &out_surface_out)); ASSERT_OK(vdpBitmapSurfaceCreate(device, VDP_RGBA_FORMAT_B8G8R8A8, WIDTH, HEIGHT, 1, &bmp_surface)); uint32_t *src = malloc(4 * WIDTH * HEIGHT); uint32_t *dst = malloc(4 * WIDTH * HEIGHT); assert (NULL != src || NULL != dst); for (int k = 0; k < WIDTH * HEIGHT; k ++) { src[k] = ((k & 0xff) << 8) + (0xff << 24); // green pixel pattern } const void * const source_data[] = { src }; void * const destination_data[] = { dst }; uint32_t source_pitches[] = { 4 * WIDTH }; uint32_t destination_pitches[] = { 4 * WIDTH }; ASSERT_OK(vdpOutputSurfacePutBitsNative(out_surface_in, source_data, source_pitches, NULL)); ASSERT_OK(vdpBitmapSurfacePutBitsNative(bmp_surface, source_data, source_pitches, NULL)); VdpOutputSurfaceRenderBlendState blend_state_opaque_copy = { .struct_version = VDP_OUTPUT_SURFACE_RENDER_BLEND_STATE_VERSION, .blend_factor_source_color = VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ONE, .blend_factor_source_alpha = VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ONE, .blend_factor_destination_color = VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ZERO, .blend_factor_destination_alpha = VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ZERO, .blend_equation_color = VDP_OUTPUT_SURFACE_RENDER_BLEND_EQUATION_ADD, .blend_equation_alpha = VDP_OUTPUT_SURFACE_RENDER_BLEND_EQUATION_ADD, .blend_constant = {0, 0, 0, 0} }; ASSERT_OK(vdpOutputSurfaceRenderOutputSurface(out_surface_out, NULL, out_surface_in, NULL, NULL, &blend_state_opaque_copy, VDP_OUTPUT_SURFACE_RENDER_ROTATE_0)); // check result of vdpOutputSurfaceRenderOutputSurface ASSERT_OK(vdpOutputSurfaceGetBitsNative(out_surface_out, NULL, destination_data, destination_pitches)); if (calc_difference_r8g8b8a8(src, dst, WIDTH * HEIGHT) > 2) { printf("fail / vdpOutputSurfaceRenderOutputSurface\n"); err_code = 1; goto free_resources_and_exit; } // check vdpOutputSurfaceRenderBitmapSurface ASSERT_OK(vdpOutputSurfaceRenderBitmapSurface(out_surface_out, NULL, bmp_surface, NULL, NULL, &blend_state_opaque_copy, VDP_OUTPUT_SURFACE_RENDER_ROTATE_0)); ASSERT_OK(vdpOutputSurfaceGetBitsNative(out_surface_out, NULL, destination_data, destination_pitches)); if (calc_difference_r8g8b8a8(src, dst, WIDTH * HEIGHT) > 2) { printf("fail / vdpOutputSurfaceRenderBitmapSurface\n"); err_code = 2; goto free_resources_and_exit; } printf("pass\n"); free_resources_and_exit: ASSERT_OK(vdpDeviceDestroy(device)); free(src); free(dst); return err_code; } libvdpau-va-gl-0.4.2/tests/test-005.c000066400000000000000000000127651277566164500171540ustar00rootroot00000000000000// test-005 // // rendering A8 bitmap to non-black output surface. // source/destination colors : src alpha / 1 - src alpha // source/destination alpha : one / src alpha // blend equation for color / alpha : add / add // // target surface filled with {1, 0, 0, 1} // // coloring with color {0, 1, 0, 1}. This should be green with alpha == 1. #include "tests-common.h" #include #include #include static uint32_t max_u32(uint32_t a, uint32_t b) { return a > b ? a : b; } static uint32_t min_u32(uint32_t a, uint32_t b) { return a < b ? a : b; } static uint32_t absdiff(int a, int b) { return abs(a - b); } int main(void) { VdpDevice device = create_vdp_device(); VdpBitmapSurface bmp_surface; VdpOutputSurface out_surface; ASSERT_OK(vdpBitmapSurfaceCreate(device, VDP_RGBA_FORMAT_A8, 5, 5, 1, &bmp_surface)); ASSERT_OK(vdpOutputSurfaceCreate(device, VDP_RGBA_FORMAT_B8G8R8A8, 7, 7, &out_surface)); const uint8_t bmp_data[5 * 5] = { /* 1 2 3 4 5 */ /* 1 */ 0x00, 0x1e, 0x1f, 0x20, 0x21, /* 2 */ 0x01, 0x3e, 0x3f, 0x40, 0x41, /* 3 */ 0x02, 0x5e, 0x5f, 0x60, 0x61, /* 4 */ 0x03, 0x8e, 0x7f, 0xff, 0xff, /* 5 */ 0x04, 0xce, 0x7f, 0xff, 0xff }; const void * const source_data_bmp[] = { bmp_data }; uint32_t source_pitches_bmp[] = { 5 * 1 }; uint32_t green_screen[7 * 7]; const void * const source_data[] = { green_screen }; uint32_t source_pitches[] = { 7 * 4 }; for (int k = 0; k < 7 * 7; k ++) green_screen[k] = 0xff00ff00; ASSERT_OK(vdpOutputSurfacePutBitsNative(out_surface, source_data, source_pitches, NULL)); ASSERT_OK(vdpBitmapSurfacePutBitsNative(bmp_surface, source_data_bmp, source_pitches_bmp, NULL)); VdpOutputSurfaceRenderBlendState blend_state = { .blend_factor_source_color = VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_SRC_ALPHA, .blend_factor_destination_color = VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA, .blend_factor_source_alpha = VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ONE, .blend_factor_destination_alpha = VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_SRC_ALPHA, .blend_equation_color = VDP_OUTPUT_SURFACE_RENDER_BLEND_EQUATION_ADD, .blend_equation_alpha = VDP_OUTPUT_SURFACE_RENDER_BLEND_EQUATION_ADD, .blend_constant = {0, 0, 0, 0} }; VdpColor color[] = {{0.7, 0.3, 0.1, 0.6}}; VdpRect dest_rect = {1, 1, 6, 6}; ASSERT_OK(vdpOutputSurfaceRenderBitmapSurface(out_surface, &dest_rect, bmp_surface, NULL, color, &blend_state, VDP_OUTPUT_SURFACE_RENDER_ROTATE_0)); uint32_t result_buf[7 * 7]; void * const dest_data[] = { result_buf }; ASSERT_OK(vdpOutputSurfaceGetBitsNative(out_surface, NULL, dest_data, source_pitches)); printf("--- actual ---\n"); for (int k = 0; k < 7 * 7; k ++) { printf(" %08x", result_buf[k]); if (k % 7 == 7 - 1) printf("\n"); } uint32_t expected[7 * 7]; // compute expected result for (int x = 0; x < 7 * 7; x ++) expected[x] = green_screen[x]; for (int y = 0; y < 5; y ++) { for (int x = 0; x < 5; x ++) { const float src_r = 1.0 * color[0].red; const float src_g = 1.0 * color[0].green; const float src_b = 1.0 * color[0].blue; const float src_a = bmp_data[y*5+x]/255.0 * color[0].alpha; const uint32_t dst_bgra = expected[(y+1)*7 + (x+1)]; const float dst_a = ((dst_bgra >> 24) & 0xff) / 255.0; const float dst_r = ((dst_bgra >> 16) & 0xff) / 255.0; const float dst_g = ((dst_bgra >> 8) & 0xff) / 255.0; const float dst_b = ((dst_bgra >> 0) & 0xff) / 255.0; const float res_r = src_r * src_a + dst_r * (1.0 - src_a); const float res_g = src_g * src_a + dst_g * (1.0 - src_a); const float res_b = src_b * src_a + dst_b * (1.0 - src_a); const float res_a = src_a * 1.0 + dst_a * src_a; const uint32_t r = min_u32(255, res_r * 255.0); const uint32_t g = min_u32(255, res_g * 255.0); const uint32_t b = min_u32(255, res_b * 255.0); const uint32_t a = min_u32(255, res_a * 255.0); expected[(y + 1) * 7 + (x + 1)] = (a << 24) | (r << 16) | (g << 8) | (b); } } printf("--- expected ---\n"); for (int k = 0; k < 7 * 7; k ++) { printf(" %08x", expected[k]); if (k % 7 == 7 - 1) printf("\n"); } printf("=================\n"); printf("--- difference --- \n"); uint32_t max_diff = 0; for (int k = 0; k < 7 * 7; k ++) { uint32_t diff_a = absdiff((expected[k] >> 24) & 0xff, (result_buf[k] >> 24) & 0xff); uint32_t diff_r = absdiff((expected[k] >> 16) & 0xff, (result_buf[k] >> 16) & 0xff); uint32_t diff_g = absdiff((expected[k] >> 8) & 0xff, (result_buf[k] >> 8) & 0xff); uint32_t diff_b = absdiff((expected[k] >> 0) & 0xff, (result_buf[k] >> 0) & 0xff); printf(" %08x", (diff_a << 24) + (diff_r << 16) + (diff_g << 8) + (diff_b)); if (k % 7 == 7 - 1) printf("\n"); max_diff = max_u32(max_diff, diff_a); max_diff = max_u32(max_diff, diff_r); max_diff = max_u32(max_diff, diff_g); max_diff = max_u32(max_diff, diff_b); } printf("=================\n"); if (max_diff > 1) { printf("fail\n"); return 1; } ASSERT_OK(vdpDeviceDestroy(device)); printf("pass\n"); return 0; } libvdpau-va-gl-0.4.2/tests/test-006.c000066400000000000000000000061071277566164500171460ustar00rootroot00000000000000// test-006 // // initializing/finalizing number of times a row with some drawing between. // This test is to reveal thread-safety failure inside VDPAU library. // // Initialization function executed once, but then THREAD_COUNT threads try to do the same work // with rendering simultaneously. #include "tests-common.h" #include #include #define THREAD_COUNT 50 VdpDevice device; Window window; void *thread_1_func(void *p) { (void)p; // unused VdpPresentationQueueTarget pq_target; VdpPresentationQueue pq; VdpOutputSurface out_surface; VdpOutputSurface out_surface_2; VdpBitmapSurface bmp_surface; ASSERT_OK(vdpPresentationQueueTargetCreateX11(device, window, &pq_target)); ASSERT_OK(vdpPresentationQueueCreate(device, pq_target, &pq)); ASSERT_OK(vdpOutputSurfaceCreate(device, VDP_RGBA_FORMAT_B8G8R8A8, 300, 150, &out_surface)); ASSERT_OK(vdpOutputSurfaceCreate(device, VDP_RGBA_FORMAT_B8G8R8A8, 300, 150, &out_surface_2)); ASSERT_OK(vdpBitmapSurfaceCreate(device, VDP_RGBA_FORMAT_B8G8R8A8, 300, 150, 1, &bmp_surface)); uint32_t buf[300*150]; for (uint32_t k = 0; k < 300*150; k ++) buf[k] = 0xff000000u + (k & 0xffffffu); const void * const source_data[] = { buf }; uint32_t source_pitches[] = { 4 * 300 }; ASSERT_OK(vdpBitmapSurfacePutBitsNative(bmp_surface, source_data, source_pitches, NULL)); VdpTime vdpTime = 0; ASSERT_OK(vdpPresentationQueueBlockUntilSurfaceIdle(pq, out_surface, &vdpTime)); ASSERT_OK(vdpPresentationQueueGetTime(pq, &vdpTime)); VdpOutputSurfaceRenderBlendState blend_state = { .blend_factor_source_color=VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ONE, .blend_factor_destination_color=VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ZERO, .blend_factor_source_alpha=VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ONE, .blend_factor_destination_alpha=VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ZERO, .blend_equation_color=VDP_OUTPUT_SURFACE_RENDER_BLEND_EQUATION_ADD, .blend_equation_alpha=VDP_OUTPUT_SURFACE_RENDER_BLEND_EQUATION_ADD, .blend_constant = { 0, 0, 0, 0} }; VdpRect source_rect = {0, 0, 300, 150}; VdpRect destination_rect = {0, 0, 300, 150}; ASSERT_OK(vdpOutputSurfaceRenderBitmapSurface(out_surface, &destination_rect, bmp_surface, &source_rect, NULL, &blend_state, VDP_OUTPUT_SURFACE_RENDER_ROTATE_0)); ASSERT_OK(vdpPresentationQueueDisplay(pq, out_surface, 0, 0, 0)); ASSERT_OK(vdpOutputSurfaceDestroy(out_surface)); ASSERT_OK(vdpOutputSurfaceDestroy(out_surface_2)); ASSERT_OK(vdpPresentationQueueDestroy(pq)); ASSERT_OK(vdpPresentationQueueTargetDestroy(pq_target)); ASSERT_OK(vdpBitmapSurfaceDestroy(bmp_surface)); return NULL; } int main(void) { window = get_wnd(); pthread_t pt[THREAD_COUNT]; device = create_vdp_device(); for (int k = 0; k < THREAD_COUNT; k ++) pthread_create(&pt[k], NULL, thread_1_func, NULL); for (int k = 0; k < THREAD_COUNT; k ++) pthread_join(pt[k], NULL); ASSERT_OK(vdpDeviceDestroy(device)); return 0; } libvdpau-va-gl-0.4.2/tests/test-007.c000066400000000000000000000107321277566164500171460ustar00rootroot00000000000000// test-007 // TOUCHES: VdpBitmapSurfaceCreate // TOUCHES: VdpBitmapSurfaceDestroy // TOUCHES: VdpBitmapSurfaceGetParameters // TOUCHES: VdpBitmapSurfaceQueryCapabilities #include "tests-common.h" #include int main(void) { VdpDevice device = create_vdp_device(); VdpBitmapSurface bmp_surf1; VdpBitmapSurface bmp_surf2; // invalid device handle assert(VDP_STATUS_INVALID_HANDLE == vdpBitmapSurfaceCreate(device + 1, VDP_RGBA_FORMAT_A8, 13, 13, 1, &bmp_surf1)); // invalid rgba format assert(VDP_STATUS_INVALID_RGBA_FORMAT == vdpBitmapSurfaceCreate(device, -2, 13, 13, 1, &bmp_surf1)); // normal paratemers ASSERT_OK(vdpBitmapSurfaceCreate(device, VDP_RGBA_FORMAT_B8G8R8A8, 123, 234, 1, &bmp_surf1)); ASSERT_OK(vdpBitmapSurfaceCreate(device, VDP_RGBA_FORMAT_R8G8B8A8, 345, 456, 0, &bmp_surf2)); uint32_t width, height; VdpBool fa; VdpRGBAFormat rgba_f; // test that getParameters get actual supplied parameters ASSERT_OK(vdpBitmapSurfaceGetParameters(bmp_surf1, &rgba_f, &width, &height, &fa)); assert(VDP_RGBA_FORMAT_B8G8R8A8 == rgba_f); assert(width == 123); assert(height == 234); assert(fa == 1); // test with other surface ASSERT_OK(vdpBitmapSurfaceGetParameters(bmp_surf2, &rgba_f, &width, &height, &fa)); assert(VDP_RGBA_FORMAT_R8G8B8A8 == rgba_f); assert(width == 345); assert(height == 456); assert(fa == 0); // test getParameters with NULLs assert(VDP_STATUS_INVALID_POINTER == vdpBitmapSurfaceGetParameters(bmp_surf1, NULL, &width, &height, &fa)); assert(VDP_STATUS_INVALID_POINTER == vdpBitmapSurfaceGetParameters(bmp_surf1, &rgba_f, NULL, &height, &fa)); assert(VDP_STATUS_INVALID_POINTER == vdpBitmapSurfaceGetParameters(bmp_surf1, &rgba_f, &width, NULL, &fa)); assert(VDP_STATUS_INVALID_POINTER == vdpBitmapSurfaceGetParameters(bmp_surf1, &rgba_f, &width, &height, NULL)); // test with invalid bitmap handle assert(VDP_STATUS_INVALID_HANDLE == vdpBitmapSurfaceGetParameters(device, &rgba_f, &width, &height, &fa)); VdpBool is_supported; // testing query capabilities assert(VDP_STATUS_INVALID_HANDLE == vdpBitmapSurfaceQueryCapabilities(device+1, VDP_RGBA_FORMAT_A8, &is_supported, &width, &height)); assert(VDP_STATUS_INVALID_POINTER == vdpBitmapSurfaceQueryCapabilities(device, VDP_RGBA_FORMAT_A8, NULL, &width, &height)); assert(VDP_STATUS_INVALID_POINTER == vdpBitmapSurfaceQueryCapabilities(device, VDP_RGBA_FORMAT_A8, &is_supported, NULL, &height)); assert(VDP_STATUS_INVALID_POINTER == vdpBitmapSurfaceQueryCapabilities(device, VDP_RGBA_FORMAT_A8, &is_supported, &width, NULL)); // querying various formats ASSERT_OK(vdpBitmapSurfaceQueryCapabilities(device, VDP_RGBA_FORMAT_B8G8R8A8, &is_supported, &width, &height)); assert(is_supported); assert(width > 0); assert(height > 0); ASSERT_OK(vdpBitmapSurfaceQueryCapabilities(device, VDP_RGBA_FORMAT_R8G8B8A8, &is_supported, &width, &height)); assert(is_supported); assert(width > 0); assert(height > 0); ASSERT_OK(vdpBitmapSurfaceQueryCapabilities(device, VDP_RGBA_FORMAT_R10G10B10A2, &is_supported, &width, &height)); assert(is_supported); assert(width > 0); assert(height > 0); ASSERT_OK(vdpBitmapSurfaceQueryCapabilities(device, VDP_RGBA_FORMAT_B10G10R10A2, &is_supported, &width, &height)); assert(is_supported); assert(width > 0); assert(height > 0); ASSERT_OK(vdpBitmapSurfaceQueryCapabilities(device, VDP_RGBA_FORMAT_A8, &is_supported, &width, &height)); assert(is_supported); assert(width > 0); assert(height > 0); // query wrong format ASSERT_OK(vdpBitmapSurfaceQueryCapabilities(device, 9000, &is_supported, &width, &height)); assert (0 == is_supported); // try to destroy wrong surface assert (VDP_STATUS_INVALID_HANDLE == vdpBitmapSurfaceDestroy(-2)); assert (VDP_STATUS_INVALID_HANDLE == vdpBitmapSurfaceDestroy(device)); assert (VDP_STATUS_INVALID_HANDLE == vdpBitmapSurfaceDestroy(bmp_surf1 + 43000)); // really destroy surfaces ASSERT_OK(vdpBitmapSurfaceDestroy(bmp_surf1)); ASSERT_OK(vdpBitmapSurfaceDestroy(bmp_surf2)); ASSERT_OK(vdpDeviceDestroy(device)); printf("pass\n"); return 0; } libvdpau-va-gl-0.4.2/tests/test-008.c000066400000000000000000000062261277566164500171520ustar00rootroot00000000000000// test-008 // Progressively create larger and larger bitmap surfaces, up to maximum // allowed size. First square ones, then stretched in both directions // in turn // TOUCHES: VdpBitmapSurfaceCreate // TOUCHES: VdpBitmapSurfaceQueryCapabilities #include "tests-common.h" #include void test_bitmaps_of_format(VdpDevice device, int fmt, const char *fmt_name, uint32_t max_width, uint32_t max_height) { VdpBitmapSurface bmp_surf1; const uint32_t max_square_size = MIN(max_width, max_height); const uint32_t step = 128; // trying square surface for (uint32_t k = 0; k < max_square_size + step; (k < step) ? (k++) : (k+=step)) { for (uint32_t freq = 0; freq <= 1; freq ++) { const uint32_t size = MAX(1, MIN(k, max_square_size)); printf("trying square %s bitmap %d x %d (%d)\n", fmt_name, size, size, freq); ASSERT_OK(vdpBitmapSurfaceCreate(device, fmt, size, size, freq, &bmp_surf1)); ASSERT_OK(vdpBitmapSurfaceDestroy(bmp_surf1)); } } // width stretched for (uint32_t k = 0; k < max_width + step; (k < step) ? (k++) : (k+=step)) { for (uint32_t freq = 0; freq <= 1; freq ++) { const uint32_t size = MAX(1, MIN(k, max_width)); printf("trying width stretched %s bitmap %d x %d (%d)\n", fmt_name, size, 128, freq); ASSERT_OK(vdpBitmapSurfaceCreate(device, fmt, size, 128, freq, &bmp_surf1)); ASSERT_OK(vdpBitmapSurfaceDestroy(bmp_surf1)); } } // height stretched for (uint32_t k = 0; k < max_height + step; (k < step) ? (k++) : (k+=step)) { for (uint32_t freq = 0; freq <= 1; freq ++) { const uint32_t size = MAX(1, MIN(k, max_height)); printf("trying height stretched %s bitmap %d x %d (%d)\n", fmt_name, 128, size, freq); ASSERT_OK(vdpBitmapSurfaceCreate(device, fmt, 128, size, freq, &bmp_surf1)); ASSERT_OK(vdpBitmapSurfaceDestroy(bmp_surf1)); } } } int main(void) { VdpDevice device = create_vdp_device(); uint32_t max_width, max_height; VdpBool is_supported; // querying max_size ASSERT_OK(vdpBitmapSurfaceQueryCapabilities(device, VDP_RGBA_FORMAT_B8G8R8A8, &is_supported, &max_width, &max_height)); assert(is_supported); assert(max_width > 0); assert(max_height > 0); test_bitmaps_of_format(device, VDP_RGBA_FORMAT_B8G8R8A8, "VDP_RGBA_FORMAT_B8G8R8A8", max_width, max_height); test_bitmaps_of_format(device, VDP_RGBA_FORMAT_R8G8B8A8, "VDP_RGBA_FORMAT_R8G8B8A8", max_width, max_height); test_bitmaps_of_format(device, VDP_RGBA_FORMAT_R10G10B10A2, "VDP_RGBA_FORMAT_R10G10B10A2", max_width, max_height); test_bitmaps_of_format(device, VDP_RGBA_FORMAT_B10G10R10A2, "VDP_RGBA_FORMAT_B10G10R10A2", max_width, max_height); test_bitmaps_of_format(device, VDP_RGBA_FORMAT_A8, "VDP_RGBA_FORMAT_A8", max_width, max_height); ASSERT_OK(vdpDeviceDestroy(device)); printf("pass\n"); return 0; } libvdpau-va-gl-0.4.2/tests/test-009.c000066400000000000000000000005551277566164500171520ustar00rootroot00000000000000// test-009 // Create and destroy vdp device many times a row. // Intended to check X resource leakage introduced by library. #include "tests-common.h" #include int main(void) { for (int k = 0; k < 30; k ++) { VdpDevice device = create_vdp_device(); ASSERT_OK(vdpDeviceDestroy(device)); } printf("pass\n"); return 0; } libvdpau-va-gl-0.4.2/tests/test-010.c000066400000000000000000000011551277566164500171370ustar00rootroot00000000000000// test-010 // creating and destroying couple of VdpDevice from different threads // caused deadlocks and crashes #include #include #include "tests-common.h" VdpDevice device1; VdpDevice device2; void * thread_1(void *param) { (void)param; ASSERT_OK(vdpDeviceDestroy(device1)); return NULL; } int main(void) { device1 = create_vdp_device(); device2 = create_vdp_device(); pthread_t thread_id_1; pthread_create(&thread_id_1, NULL, thread_1, NULL); pthread_join(thread_id_1, NULL); ASSERT_OK(vdpDeviceDestroy(device2)); printf("pass\n"); return 0; } libvdpau-va-gl-0.4.2/tests/tests-common.c000066400000000000000000000253571277566164500203240ustar00rootroot00000000000000#include #include #include #include "tests-common.h" #ifndef DRIVER_NAME #error no DRIVER_NAME #endif Display * get_dpy(void) { static Display *cached_dpy = NULL; if (cached_dpy) return cached_dpy; cached_dpy = XOpenDisplay(NULL); return cached_dpy; } Window get_wnd(void) { Display *dpy = get_dpy(); Window root = XDefaultRootWindow(dpy); Window wnd = XCreateSimpleWindow(dpy, root, 0, 0, 300, 300, 0, 0, 0); XSync(dpy, False); return wnd; } static inline int max2(int a, int b) { return (a > b) ? a : b; } int calc_difference_a8(uint8_t *src1, uint8_t *src2, int count) { int max_diff = 0; for (int k = 0; k < count; k ++) max_diff = max2(max_diff, abs(src1[k] - src2[k])); return max_diff; } int calc_difference_r8g8b8a8(uint32_t *src1, uint32_t *src2, int count) { int max_diff = 0; for (int k = 0; k < count; k ++) { const uint8_t r1 = (src1[k] >> 24) & 0xffu; const uint8_t g1 = (src1[k] >> 16) & 0xffu; const uint8_t b1 = (src1[k] >> 8) & 0xffu; const uint8_t a1 = (src1[k] >> 0) & 0xffu; const uint8_t r2 = (src2[k] >> 24) & 0xffu; const uint8_t g2 = (src2[k] >> 16) & 0xffu; const uint8_t b2 = (src2[k] >> 8) & 0xffu; const uint8_t a2 = (src2[k] >> 0) & 0xffu; const int tmp1 = max2(abs(r1 - r2), abs(g1 - g2)); const int tmp2 = max2(abs(b1 - b2), abs(a1 - a2)); max_diff = max2(max_diff, max2(tmp1, tmp2)); } return max_diff; } VdpDevice create_vdp_device(void) { void *dl = dlopen(DRIVER_NAME, RTLD_NOW); if (!dl) { printf("failed to open %s\n", DRIVER_NAME); exit(1); } vdpDeviceCreateX11 = dlsym(dl, "vdp_imp_device_create_x11"); if (!vdpDeviceCreateX11) { fprintf(stderr, "no vdp_imp_device_create_x11 in %s\n", DRIVER_NAME); exit(1); } VdpGetProcAddress *get_proc_address; VdpDevice device; ASSERT_OK(vdpDeviceCreateX11(get_dpy(), 0, &device, &get_proc_address)); #define GET_FUNC(macroname, funcptr) \ ASSERT_OK(get_proc_address(device, VDP_FUNC_ID_##macroname, (void **)&funcptr)); GET_FUNC(GET_ERROR_STRING, vdpGetErrorString); GET_FUNC(GET_API_VERSION, vdpGetApiVersion); GET_FUNC(GET_INFORMATION_STRING, vdpGetInformationString); GET_FUNC(DEVICE_DESTROY, vdpDeviceDestroy); GET_FUNC(GENERATE_CSC_MATRIX, vdpGenerateCSCMatrix); GET_FUNC(VIDEO_SURFACE_QUERY_CAPABILITIES, vdpVideoSurfaceQueryCapabilities); GET_FUNC(VIDEO_SURFACE_QUERY_GET_PUT_BITS_Y_CB_CR_CAPABILITIES, vdpVideoSurfaceQueryGetPutBitsYCbCrCapabilities); GET_FUNC(VIDEO_SURFACE_CREATE, vdpVideoSurfaceCreate); GET_FUNC(VIDEO_SURFACE_DESTROY, vdpVideoSurfaceDestroy); GET_FUNC(VIDEO_SURFACE_GET_PARAMETERS, vdpVideoSurfaceGetParameters); GET_FUNC(VIDEO_SURFACE_GET_BITS_Y_CB_CR, vdpVideoSurfaceGetBitsYCbCr); GET_FUNC(VIDEO_SURFACE_PUT_BITS_Y_CB_CR, vdpVideoSurfacePutBitsYCbCr); GET_FUNC(OUTPUT_SURFACE_QUERY_CAPABILITIES, vdpOutputSurfaceQueryCapabilities); GET_FUNC(OUTPUT_SURFACE_QUERY_GET_PUT_BITS_NATIVE_CAPABILITIES, vdpOutputSurfaceQueryGetPutBitsNativeCapabilities); GET_FUNC(OUTPUT_SURFACE_QUERY_PUT_BITS_INDEXED_CAPABILITIES, vdpOutputSurfaceQueryPutBitsIndexedCapabilities); GET_FUNC(OUTPUT_SURFACE_QUERY_PUT_BITS_Y_CB_CR_CAPABILITIES, vdpOutputSurfaceQueryPutBitsYCbCrCapabilities); GET_FUNC(OUTPUT_SURFACE_CREATE, vdpOutputSurfaceCreate); GET_FUNC(OUTPUT_SURFACE_DESTROY, vdpOutputSurfaceDestroy); GET_FUNC(OUTPUT_SURFACE_GET_PARAMETERS, vdpOutputSurfaceGetParameters); GET_FUNC(OUTPUT_SURFACE_GET_BITS_NATIVE, vdpOutputSurfaceGetBitsNative); GET_FUNC(OUTPUT_SURFACE_PUT_BITS_NATIVE, vdpOutputSurfacePutBitsNative); GET_FUNC(OUTPUT_SURFACE_PUT_BITS_INDEXED, vdpOutputSurfacePutBitsIndexed); GET_FUNC(OUTPUT_SURFACE_PUT_BITS_Y_CB_CR, vdpOutputSurfacePutBitsYCbCr); GET_FUNC(BITMAP_SURFACE_QUERY_CAPABILITIES, vdpBitmapSurfaceQueryCapabilities); GET_FUNC(BITMAP_SURFACE_CREATE, vdpBitmapSurfaceCreate); GET_FUNC(BITMAP_SURFACE_DESTROY, vdpBitmapSurfaceDestroy); GET_FUNC(BITMAP_SURFACE_GET_PARAMETERS, vdpBitmapSurfaceGetParameters); GET_FUNC(BITMAP_SURFACE_PUT_BITS_NATIVE, vdpBitmapSurfacePutBitsNative); GET_FUNC(OUTPUT_SURFACE_RENDER_OUTPUT_SURFACE, vdpOutputSurfaceRenderOutputSurface); GET_FUNC(OUTPUT_SURFACE_RENDER_BITMAP_SURFACE, vdpOutputSurfaceRenderBitmapSurface); GET_FUNC(DECODER_QUERY_CAPABILITIES, vdpDecoderQueryCapabilities); GET_FUNC(DECODER_CREATE, vdpDecoderCreate); GET_FUNC(DECODER_DESTROY, vdpDecoderDestroy); GET_FUNC(DECODER_GET_PARAMETERS, vdpDecoderGetParameters); GET_FUNC(DECODER_RENDER, vdpDecoderRender); GET_FUNC(VIDEO_MIXER_QUERY_FEATURE_SUPPORT, vdpVideoMixerQueryFeatureSupport); GET_FUNC(VIDEO_MIXER_QUERY_PARAMETER_SUPPORT, vdpVideoMixerQueryParameterSupport); GET_FUNC(VIDEO_MIXER_QUERY_ATTRIBUTE_SUPPORT, vdpVideoMixerQueryAttributeSupport); GET_FUNC(VIDEO_MIXER_QUERY_PARAMETER_VALUE_RANGE, vdpVideoMixerQueryParameterValueRange); GET_FUNC(VIDEO_MIXER_QUERY_ATTRIBUTE_VALUE_RANGE, vdpVideoMixerQueryAttributeValueRange); GET_FUNC(VIDEO_MIXER_CREATE, vdpVideoMixerCreate); GET_FUNC(VIDEO_MIXER_SET_FEATURE_ENABLES, vdpVideoMixerSetFeatureEnables); GET_FUNC(VIDEO_MIXER_SET_ATTRIBUTE_VALUES, vdpVideoMixerSetAttributeValues); GET_FUNC(VIDEO_MIXER_GET_FEATURE_SUPPORT, vdpVideoMixerGetFeatureSupport); GET_FUNC(VIDEO_MIXER_GET_FEATURE_ENABLES, vdpVideoMixerGetFeatureEnables); GET_FUNC(VIDEO_MIXER_GET_PARAMETER_VALUES, vdpVideoMixerGetParameterValues); GET_FUNC(VIDEO_MIXER_GET_ATTRIBUTE_VALUES, vdpVideoMixerGetAttributeValues); GET_FUNC(VIDEO_MIXER_DESTROY, vdpVideoMixerDestroy); GET_FUNC(VIDEO_MIXER_RENDER, vdpVideoMixerRender); GET_FUNC(PRESENTATION_QUEUE_TARGET_DESTROY, vdpPresentationQueueTargetDestroy); GET_FUNC(PRESENTATION_QUEUE_CREATE, vdpPresentationQueueCreate); GET_FUNC(PRESENTATION_QUEUE_DESTROY, vdpPresentationQueueDestroy); GET_FUNC(PRESENTATION_QUEUE_SET_BACKGROUND_COLOR, vdpPresentationQueueSetBackgroundColor); GET_FUNC(PRESENTATION_QUEUE_GET_BACKGROUND_COLOR, vdpPresentationQueueGetBackgroundColor); GET_FUNC(PRESENTATION_QUEUE_GET_TIME, vdpPresentationQueueGetTime); GET_FUNC(PRESENTATION_QUEUE_DISPLAY, vdpPresentationQueueDisplay); GET_FUNC(PRESENTATION_QUEUE_BLOCK_UNTIL_SURFACE_IDLE, vdpPresentationQueueBlockUntilSurfaceIdle); GET_FUNC(PRESENTATION_QUEUE_QUERY_SURFACE_STATUS, vdpPresentationQueueQuerySurfaceStatus); GET_FUNC(PREEMPTION_CALLBACK_REGISTER, vdpPreemptionCallbackRegister); GET_FUNC(PRESENTATION_QUEUE_TARGET_CREATE_X11, vdpPresentationQueueTargetCreateX11); return device; } VdpBitmapSurfaceCreate * vdpBitmapSurfaceCreate; VdpBitmapSurfaceDestroy * vdpBitmapSurfaceDestroy; VdpBitmapSurfaceGetParameters * vdpBitmapSurfaceGetParameters; VdpBitmapSurfacePutBitsNative * vdpBitmapSurfacePutBitsNative; VdpBitmapSurfaceQueryCapabilities * vdpBitmapSurfaceQueryCapabilities; VdpDecoderCreate * vdpDecoderCreate; VdpDecoderDestroy * vdpDecoderDestroy; VdpDecoderGetParameters * vdpDecoderGetParameters; VdpDecoderQueryCapabilities * vdpDecoderQueryCapabilities; VdpDecoderRender * vdpDecoderRender; VdpDeviceCreateX11 * vdpDeviceCreateX11; VdpDeviceDestroy * vdpDeviceDestroy; VdpGenerateCSCMatrix * vdpGenerateCSCMatrix; VdpGetApiVersion * vdpGetApiVersion; VdpGetErrorString * vdpGetErrorString; VdpGetInformationString * vdpGetInformationString; VdpOutputSurfaceCreate * vdpOutputSurfaceCreate; VdpOutputSurfaceDestroy * vdpOutputSurfaceDestroy; VdpOutputSurfaceGetBitsNative * vdpOutputSurfaceGetBitsNative; VdpOutputSurfaceGetParameters * vdpOutputSurfaceGetParameters; VdpOutputSurfacePutBitsIndexed * vdpOutputSurfacePutBitsIndexed; VdpOutputSurfacePutBitsNative * vdpOutputSurfacePutBitsNative; VdpOutputSurfacePutBitsYCbCr * vdpOutputSurfacePutBitsYCbCr; VdpOutputSurfaceQueryCapabilities * vdpOutputSurfaceQueryCapabilities; VdpOutputSurfaceQueryGetPutBitsNativeCapabilities * vdpOutputSurfaceQueryGetPutBitsNativeCapabilities; VdpOutputSurfaceQueryPutBitsIndexedCapabilities * vdpOutputSurfaceQueryPutBitsIndexedCapabilities; VdpOutputSurfaceQueryPutBitsYCbCrCapabilities * vdpOutputSurfaceQueryPutBitsYCbCrCapabilities; VdpOutputSurfaceRenderBitmapSurface * vdpOutputSurfaceRenderBitmapSurface; VdpOutputSurfaceRenderOutputSurface * vdpOutputSurfaceRenderOutputSurface; VdpPreemptionCallbackRegister * vdpPreemptionCallbackRegister; VdpPresentationQueueBlockUntilSurfaceIdle * vdpPresentationQueueBlockUntilSurfaceIdle; VdpPresentationQueueCreate * vdpPresentationQueueCreate; VdpPresentationQueueDestroy * vdpPresentationQueueDestroy; VdpPresentationQueueDisplay * vdpPresentationQueueDisplay; VdpPresentationQueueGetBackgroundColor * vdpPresentationQueueGetBackgroundColor; VdpPresentationQueueGetTime * vdpPresentationQueueGetTime; VdpPresentationQueueQuerySurfaceStatus * vdpPresentationQueueQuerySurfaceStatus; VdpPresentationQueueSetBackgroundColor * vdpPresentationQueueSetBackgroundColor; VdpPresentationQueueTargetCreateX11 * vdpPresentationQueueTargetCreateX11; VdpPresentationQueueTargetDestroy * vdpPresentationQueueTargetDestroy; VdpVideoMixerCreate * vdpVideoMixerCreate; VdpVideoMixerDestroy * vdpVideoMixerDestroy; VdpVideoMixerGetAttributeValues * vdpVideoMixerGetAttributeValues; VdpVideoMixerGetFeatureEnables * vdpVideoMixerGetFeatureEnables; VdpVideoMixerGetFeatureSupport * vdpVideoMixerGetFeatureSupport; VdpVideoMixerGetParameterValues * vdpVideoMixerGetParameterValues; VdpVideoMixerQueryAttributeSupport * vdpVideoMixerQueryAttributeSupport; VdpVideoMixerQueryAttributeValueRange * vdpVideoMixerQueryAttributeValueRange; VdpVideoMixerQueryFeatureSupport * vdpVideoMixerQueryFeatureSupport; VdpVideoMixerQueryParameterSupport * vdpVideoMixerQueryParameterSupport; VdpVideoMixerQueryParameterValueRange * vdpVideoMixerQueryParameterValueRange; VdpVideoMixerRender * vdpVideoMixerRender; VdpVideoMixerSetAttributeValues * vdpVideoMixerSetAttributeValues; VdpVideoMixerSetFeatureEnables * vdpVideoMixerSetFeatureEnables; VdpVideoSurfaceCreate * vdpVideoSurfaceCreate; VdpVideoSurfaceDestroy * vdpVideoSurfaceDestroy; VdpVideoSurfaceGetBitsYCbCr * vdpVideoSurfaceGetBitsYCbCr; VdpVideoSurfaceGetParameters * vdpVideoSurfaceGetParameters; VdpVideoSurfacePutBitsYCbCr * vdpVideoSurfacePutBitsYCbCr; VdpVideoSurfaceQueryCapabilities * vdpVideoSurfaceQueryCapabilities; VdpVideoSurfaceQueryGetPutBitsYCbCrCapabilities * vdpVideoSurfaceQueryGetPutBitsYCbCrCapabilities; libvdpau-va-gl-0.4.2/tests/tests-common.h000066400000000000000000000115101277566164500203130ustar00rootroot00000000000000#pragma once #undef NDEBUG #include #include #include #define ASSERT_OK(expr) \ do { \ VdpStatus status = (expr); \ assert(status == VDP_STATUS_OK); \ } while (0) #define MIN(a, b) ({ typeof(a) _a = a; typeof(b) _b = b; _a < _b ? _a : _b; }) #define MAX(a, b) ({ typeof(a) _a = a; typeof(b) _b = b; _a > _b ? _a : _b; }) Display * get_dpy(void); Window get_wnd(void); int calc_difference_a8(uint8_t *src1, uint8_t *src2, int count); int calc_difference_r8g8b8a8(uint32_t *src1, uint32_t *src2, int count); VdpDevice create_vdp_device(void); // API extern VdpBitmapSurfaceCreate * vdpBitmapSurfaceCreate; extern VdpBitmapSurfaceDestroy * vdpBitmapSurfaceDestroy; extern VdpBitmapSurfaceGetParameters * vdpBitmapSurfaceGetParameters; extern VdpBitmapSurfacePutBitsNative * vdpBitmapSurfacePutBitsNative; extern VdpBitmapSurfaceQueryCapabilities * vdpBitmapSurfaceQueryCapabilities; extern VdpDecoderCreate * vdpDecoderCreate; extern VdpDecoderDestroy * vdpDecoderDestroy; extern VdpDecoderGetParameters * vdpDecoderGetParameters; extern VdpDecoderQueryCapabilities * vdpDecoderQueryCapabilities; extern VdpDecoderRender * vdpDecoderRender; extern VdpDeviceCreateX11 * vdpDeviceCreateX11; extern VdpDeviceDestroy * vdpDeviceDestroy; extern VdpGenerateCSCMatrix * vdpGenerateCSCMatrix; extern VdpGetApiVersion * vdpGetApiVersion; extern VdpGetErrorString * vdpGetErrorString; extern VdpGetInformationString * vdpGetInformationString; extern VdpOutputSurfaceCreate * vdpOutputSurfaceCreate; extern VdpOutputSurfaceDestroy * vdpOutputSurfaceDestroy; extern VdpOutputSurfaceGetBitsNative * vdpOutputSurfaceGetBitsNative; extern VdpOutputSurfaceGetParameters * vdpOutputSurfaceGetParameters; extern VdpOutputSurfacePutBitsIndexed * vdpOutputSurfacePutBitsIndexed; extern VdpOutputSurfacePutBitsNative * vdpOutputSurfacePutBitsNative; extern VdpOutputSurfacePutBitsYCbCr * vdpOutputSurfacePutBitsYCbCr; extern VdpOutputSurfaceQueryCapabilities * vdpOutputSurfaceQueryCapabilities; extern VdpOutputSurfaceQueryGetPutBitsNativeCapabilities * vdpOutputSurfaceQueryGetPutBitsNativeCapabilities; extern VdpOutputSurfaceQueryPutBitsIndexedCapabilities * vdpOutputSurfaceQueryPutBitsIndexedCapabilities; extern VdpOutputSurfaceQueryPutBitsYCbCrCapabilities * vdpOutputSurfaceQueryPutBitsYCbCrCapabilities; extern VdpOutputSurfaceRenderBitmapSurface * vdpOutputSurfaceRenderBitmapSurface; extern VdpOutputSurfaceRenderOutputSurface * vdpOutputSurfaceRenderOutputSurface; extern VdpPreemptionCallbackRegister * vdpPreemptionCallbackRegister; extern VdpPresentationQueueBlockUntilSurfaceIdle * vdpPresentationQueueBlockUntilSurfaceIdle; extern VdpPresentationQueueCreate * vdpPresentationQueueCreate; extern VdpPresentationQueueDestroy * vdpPresentationQueueDestroy; extern VdpPresentationQueueDisplay * vdpPresentationQueueDisplay; extern VdpPresentationQueueGetBackgroundColor * vdpPresentationQueueGetBackgroundColor; extern VdpPresentationQueueGetTime * vdpPresentationQueueGetTime; extern VdpPresentationQueueQuerySurfaceStatus * vdpPresentationQueueQuerySurfaceStatus; extern VdpPresentationQueueSetBackgroundColor * vdpPresentationQueueSetBackgroundColor; extern VdpPresentationQueueTargetCreateX11 * vdpPresentationQueueTargetCreateX11; extern VdpPresentationQueueTargetDestroy * vdpPresentationQueueTargetDestroy; extern VdpVideoMixerCreate * vdpVideoMixerCreate; extern VdpVideoMixerDestroy * vdpVideoMixerDestroy; extern VdpVideoMixerGetAttributeValues * vdpVideoMixerGetAttributeValues; extern VdpVideoMixerGetFeatureEnables * vdpVideoMixerGetFeatureEnables; extern VdpVideoMixerGetFeatureSupport * vdpVideoMixerGetFeatureSupport; extern VdpVideoMixerGetParameterValues * vdpVideoMixerGetParameterValues; extern VdpVideoMixerQueryAttributeSupport * vdpVideoMixerQueryAttributeSupport; extern VdpVideoMixerQueryAttributeValueRange * vdpVideoMixerQueryAttributeValueRange; extern VdpVideoMixerQueryFeatureSupport * vdpVideoMixerQueryFeatureSupport; extern VdpVideoMixerQueryParameterSupport * vdpVideoMixerQueryParameterSupport; extern VdpVideoMixerQueryParameterValueRange * vdpVideoMixerQueryParameterValueRange; extern VdpVideoMixerRender * vdpVideoMixerRender; extern VdpVideoMixerSetAttributeValues * vdpVideoMixerSetAttributeValues; extern VdpVideoMixerSetFeatureEnables * vdpVideoMixerSetFeatureEnables; extern VdpVideoSurfaceCreate * vdpVideoSurfaceCreate; extern VdpVideoSurfaceDestroy * vdpVideoSurfaceDestroy; extern VdpVideoSurfaceGetBitsYCbCr * vdpVideoSurfaceGetBitsYCbCr; extern VdpVideoSurfaceGetParameters * vdpVideoSurfaceGetParameters; extern VdpVideoSurfacePutBitsYCbCr * vdpVideoSurfacePutBitsYCbCr; extern VdpVideoSurfaceQueryCapabilities * vdpVideoSurfaceQueryCapabilities; extern VdpVideoSurfaceQueryGetPutBitsYCbCrCapabilities * vdpVideoSurfaceQueryGetPutBitsYCbCrCapabilities;