pax_global_header00006660000000000000000000000064122414272370014516gustar00rootroot0000000000000052 comment=e09defe3f0246dba5c5204836827cd4f5c2bc620 libvdpau-va-gl-0.2.1/000077500000000000000000000000001224142723700143305ustar00rootroot00000000000000libvdpau-va-gl-0.2.1/.gitignore000066400000000000000000000000071224142723700163150ustar00rootroot00000000000000build/ libvdpau-va-gl-0.2.1/CMakeLists.txt000066400000000000000000000023431224142723700170720ustar00rootroot00000000000000project (libvdpau-va-gl) cmake_minimum_required (VERSION 2.6) add_definitions(-std=gnu99 -Wall -fvisibility=hidden) find_package(PkgConfig REQUIRED) pkg_check_modules(SOMELIBS vdpau glib-2.0 libswscale libva-glx gl glu REQUIRED) add_custom_target(check COMMAND ${CMAKE_CTEST_COMMAND}) add_custom_target(build-tests) add_dependencies(check build-tests) enable_testing() add_subdirectory(tests) link_directories ( ${SOMELIBS_LIBRARY_DIRS} ) include_directories ( ${SOMELIBS_INCLUDE_DIRS} ) set(DRIVER_NAME "vdpau_va_gl" CACHE STRING "driver name") set(LIB_SUFFIX "" CACHE STRING "library path suffix (if needed)") set(LIB_INSTALL_DIR "${CMAKE_INSTALL_PREFIX}/lib${LIB_SUFFIX}/vdpau" CACHE PATH "library installation path") add_library (${DRIVER_NAME} SHARED vdpau-decoder.c vdpau-soft.c vdpau-presentation.c vdpau-entry.c vdpau-trace.c reverse-constant.c handle-storage.c bitstream.c h264-parse.c globals.c watermark.c ctx-stack.c ) target_link_libraries (${DRIVER_NAME} ${SOMELIBS_LIBRARIES} ) # add_library (xinitthreads SHARED xinitthreads.c) # target_link_libraries (xinitthreads -lpthread -lX11) set_target_properties (${DRIVER_NAME} PROPERTIES VERSION 1 ) install (TARGETS ${DRIVER_NAME} DESTINATION ${LIB_INSTALL_DIR}) libvdpau-va-gl-0.2.1/COPYING000066400000000000000000000010741224142723700153650ustar00rootroot00000000000000libvdpau-va-gl is free software: you can redistribute it and/or modify it under the terms of version 3 of the GNU Lesser General Public License as published by the Free Software Foundation. libvdpau-va-gl is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with Foobar. If not, see . libvdpau-va-gl-0.2.1/COPYING.GPLv3000066400000000000000000001045131224142723700162610ustar00rootroot00000000000000 GNU GENERAL PUBLIC LICENSE Version 3, 29 June 2007 Copyright (C) 2007 Free Software Foundation, Inc. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The GNU General Public License is a free, copyleft license for software and other kinds of works. The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users. We, the Free Software Foundation, use the GNU General Public License for most of our software; it applies also to any other work released this way by its authors. You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things. To protect your rights, we need to prevent others from denying you these rights or asking you to surrender the rights. Therefore, you have certain responsibilities if you distribute copies of the software, or if you modify it: responsibilities to respect the freedom of others. For example, if you distribute copies of such a program, whether gratis or for a fee, you must pass on to the recipients the same freedoms that you received. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. Developers that use the GNU GPL protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License giving you legal permission to copy, distribute and/or modify it. For the developers' and authors' protection, the GPL clearly explains that there is no warranty for this free software. For both users' and authors' sake, the GPL requires that modified versions be marked as changed, so that their problems will not be attributed erroneously to authors of previous versions. Some devices are designed to deny users access to install or run modified versions of the software inside them, although the manufacturer can do so. This is fundamentally incompatible with the aim of protecting users' freedom to change the software. The systematic pattern of such abuse occurs in the area of products for individuals to use, which is precisely where it is most unacceptable. Therefore, we have designed this version of the GPL to prohibit the practice for those products. If such problems arise substantially in other domains, we stand ready to extend this provision to those domains in future versions of the GPL, as needed to protect the freedom of users. Finally, every program is threatened constantly by software patents. States should not allow patents to restrict development and use of software on general-purpose computers, but in those that do, we wish to avoid the special danger that patents applied to a free program could make it effectively proprietary. To prevent this, the GPL assures that patents cannot be used to render the program non-free. The precise terms and conditions for copying, distribution and modification follow. TERMS AND CONDITIONS 0. Definitions. "This License" refers to version 3 of the GNU General Public License. "Copyright" also means copyright-like laws that apply to other kinds of works, such as semiconductor masks. "The Program" refers to any copyrightable work licensed under this License. Each licensee is addressed as "you". "Licensees" and "recipients" may be individuals or organizations. To "modify" a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a "modified version" of the earlier work or a work "based on" the earlier work. A "covered work" means either the unmodified Program or a work based on the Program. To "propagate" a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well. To "convey" a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying. An interactive user interface displays "Appropriate Legal Notices" to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion. 1. Source Code. The "source code" for a work means the preferred form of the work for making modifications to it. "Object code" means any non-source form of a work. A "Standard Interface" means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language. The "System Libraries" of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A "Major Component", in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it. The "Corresponding Source" for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work's System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those subprograms and other parts of the work. The Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source. The Corresponding Source for a work in source code form is that same work. 2. Basic Permissions. All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law. You may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you. Conveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary. 3. Protecting Users' Legal Rights From Anti-Circumvention Law. No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures. When you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work's users, your or third parties' legal rights to forbid circumvention of technological measures. 4. Conveying Verbatim Copies. You may convey verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program. You may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee. 5. Conveying Modified Source Versions. You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions: a) The work must carry prominent notices stating that you modified it, and giving a relevant date. b) The work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to "keep intact all notices". c) You must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it. d) If the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so. A compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an "aggregate" if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation's users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate. 6. Conveying Non-Source Forms. You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways: a) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange. b) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2) access to copy the Corresponding Source from a network server at no charge. c) Convey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b. d) Convey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements. e) Convey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d. A separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work. A "User Product" is either (1) a "consumer product", which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, "normally used" refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product. "Installation Information" for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made. If you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM). The requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network. Corresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying. 7. Additional Terms. "Additional permissions" are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions. When you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission. Notwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms: a) Disclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or b) Requiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or c) Prohibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or d) Limiting the use for publicity purposes of names of licensors or authors of the material; or e) Declining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or f) Requiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors. All other non-permissive additional terms are considered "further restrictions" within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying. If you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms. Additional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way. 8. Termination. You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11). However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation. Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice. Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10. 9. Acceptance Not Required for Having Copies. You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so. 10. Automatic Licensing of Downstream Recipients. Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License. An "entity transaction" is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party's predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts. You may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it. 11. Patents. A "contributor" is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor's "contributor version". A contributor's "essential patent claims" are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, "control" includes the right to grant patent sublicenses in a manner consistent with the requirements of this License. Each contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor's essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version. In the following three paragraphs, a "patent license" is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To "grant" such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party. If you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent license to downstream recipients. "Knowingly relying" means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient's use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid. If, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it. A patent license is "discriminatory" if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007. Nothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law. 12. No Surrender of Others' Freedom. If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program. 13. Use with the GNU Affero General Public License. Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU Affero General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the special requirements of the GNU Affero General Public License, section 13, concerning interaction through a network will apply to the combination as such. 14. Revised Versions of this License. The Free Software Foundation may publish revised and/or new versions of the GNU General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU General Public License "or any later version" applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU General Public License, you may choose any version ever published by the Free Software Foundation. If the Program specifies that a proxy can decide which future versions of the GNU General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program. Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version. 15. Disclaimer of Warranty. THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 16. Limitation of Liability. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. 17. Interpretation of Sections 15 and 16. If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively state the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . Also add information on how to contact you by electronic and paper mail. If the program does terminal interaction, make it output a short notice like this when it starts in an interactive mode: Copyright (C) This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, your program's commands might be different; for a GUI interface, you would use an "about box". You should also get your employer (if you work as a programmer) or school, if any, to sign a "copyright disclaimer" for the program, if necessary. For more information on this, and how to apply and follow the GNU GPL, see . The GNU General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. But first, please read . libvdpau-va-gl-0.2.1/COPYING.LGPLv3000066400000000000000000000167431224142723700164040ustar00rootroot00000000000000 GNU LESSER GENERAL PUBLIC LICENSE Version 3, 29 June 2007 Copyright (C) 2007 Free Software Foundation, Inc. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. This version of the GNU Lesser General Public License incorporates the terms and conditions of version 3 of the GNU General Public License, supplemented by the additional permissions listed below. 0. Additional Definitions. As used herein, "this License" refers to version 3 of the GNU Lesser General Public License, and the "GNU GPL" refers to version 3 of the GNU General Public License. "The Library" refers to a covered work governed by this License, other than an Application or a Combined Work as defined below. An "Application" is any work that makes use of an interface provided by the Library, but which is not otherwise based on the Library. Defining a subclass of a class defined by the Library is deemed a mode of using an interface provided by the Library. A "Combined Work" is a work produced by combining or linking an Application with the Library. The particular version of the Library with which the Combined Work was made is also called the "Linked Version". The "Minimal Corresponding Source" for a Combined Work means the Corresponding Source for the Combined Work, excluding any source code for portions of the Combined Work that, considered in isolation, are based on the Application, and not on the Linked Version. The "Corresponding Application Code" for a Combined Work means the object code and/or source code for the Application, including any data and utility programs needed for reproducing the Combined Work from the Application, but excluding the System Libraries of the Combined Work. 1. Exception to Section 3 of the GNU GPL. You may convey a covered work under sections 3 and 4 of this License without being bound by section 3 of the GNU GPL. 2. Conveying Modified Versions. If you modify a copy of the Library, and, in your modifications, a facility refers to a function or data to be supplied by an Application that uses the facility (other than as an argument passed when the facility is invoked), then you may convey a copy of the modified version: a) under this License, provided that you make a good faith effort to ensure that, in the event an Application does not supply the function or data, the facility still operates, and performs whatever part of its purpose remains meaningful, or b) under the GNU GPL, with none of the additional permissions of this License applicable to that copy. 3. Object Code Incorporating Material from Library Header Files. The object code form of an Application may incorporate material from a header file that is part of the Library. You may convey such object code under terms of your choice, provided that, if the incorporated material is not limited to numerical parameters, data structure layouts and accessors, or small macros, inline functions and templates (ten or fewer lines in length), you do both of the following: a) Give prominent notice with each copy of the object code that the Library is used in it and that the Library and its use are covered by this License. b) Accompany the object code with a copy of the GNU GPL and this license document. 4. Combined Works. You may convey a Combined Work under terms of your choice that, taken together, effectively do not restrict modification of the portions of the Library contained in the Combined Work and reverse engineering for debugging such modifications, if you also do each of the following: a) Give prominent notice with each copy of the Combined Work that the Library is used in it and that the Library and its use are covered by this License. b) Accompany the Combined Work with a copy of the GNU GPL and this license document. c) For a Combined Work that displays copyright notices during execution, include the copyright notice for the Library among these notices, as well as a reference directing the user to the copies of the GNU GPL and this license document. d) Do one of the following: 0) Convey the Minimal Corresponding Source under the terms of this License, and the Corresponding Application Code in a form suitable for, and under terms that permit, the user to recombine or relink the Application with a modified version of the Linked Version to produce a modified Combined Work, in the manner specified by section 6 of the GNU GPL for conveying Corresponding Source. 1) Use a suitable shared library mechanism for linking with the Library. A suitable mechanism is one that (a) uses at run time a copy of the Library already present on the user's computer system, and (b) will operate properly with a modified version of the Library that is interface-compatible with the Linked Version. e) Provide Installation Information, but only if you would otherwise be required to provide such information under section 6 of the GNU GPL, and only to the extent that such information is necessary to install and execute a modified version of the Combined Work produced by recombining or relinking the Application with a modified version of the Linked Version. (If you use option 4d0, the Installation Information must accompany the Minimal Corresponding Source and Corresponding Application Code. If you use option 4d1, you must provide the Installation Information in the manner specified by section 6 of the GNU GPL for conveying Corresponding Source.) 5. Combined Libraries. You may place library facilities that are a work based on the Library side by side in a single library together with other library facilities that are not Applications and are not covered by this License, and convey such a combined library under terms of your choice, if you do both of the following: a) Accompany the combined library with a copy of the same work based on the Library, uncombined with any other library facilities, conveyed under the terms of this License. b) Give prominent notice with the combined library that part of it is a work based on the Library, and explaining where to find the accompanying uncombined form of the same work. 6. Revised Versions of the GNU Lesser General Public License. The Free Software Foundation may publish revised and/or new versions of the GNU Lesser General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Library as you received it specifies that a certain numbered version of the GNU Lesser General Public License "or any later version" applies to it, you have the option of following the terms and conditions either of that published version or of any later version published by the Free Software Foundation. If the Library as you received it does not specify a version number of the GNU Lesser General Public License, you may choose any version of the GNU Lesser General Public License ever published by the Free Software Foundation. If the Library as you received it specifies that a proxy can decide whether future versions of the GNU Lesser General Public License shall apply, that proxy's public statement of acceptance of any version is permanent authorization for you to choose that version for the Library. libvdpau-va-gl-0.2.1/ChangeLog000066400000000000000000000021401224142723700160770ustar00rootroot000000000000002013-11-15 Rinat Ibragimov * tag: v0.2.1 * core: fix some NULL dereferences, memcpy warning, deadlock in presentation thread * core: implement rotation and coloring in surface rendering functions * core: reuse render_target entries (VdpDecoder) 2013-09-21 Rinat Ibragimov * core: start splitting vdpau-soft.c into smaller pieces * core: use separate thread for presentation queue * core: drop global locking, try to use fine grained locking instead 2013-07-06 Rinat Ibragimov * tag: v0.1.0 * core: remove miscellaneous unused code * core: add basic documentation on structures * core: fix build system to simplify further packaging * core: enable compiling against new libva 1.2.1 release 2013-05-14 Rinat Ibragimov * libvdpau-va-gl use common GL context pool across several vdp devices. Shrinks memory overhead from ~100% to ~50% * libvdpau-va-gl add new quirk named AvoidVA, which disables using VA-API even on systems that have it * libvdpau-va-gl start writing changes to ChangeLog libvdpau-va-gl-0.2.1/Doxyfile000066400000000000000000002251111224142723700160400ustar00rootroot00000000000000# Doxyfile 1.8.1.2 # This file describes the settings to be used by the documentation system # doxygen (www.doxygen.org) for a project. # # All text after a hash (#) is considered a comment and will be ignored. # The format is: # TAG = value [value, ...] # For lists items can also be appended using: # TAG += value [value, ...] # Values that contain spaces should be placed between quotes (" "). #--------------------------------------------------------------------------- # Project related configuration options #--------------------------------------------------------------------------- # This tag specifies the encoding used for all characters in the config file # that follow. The default is UTF-8 which is also the encoding used for all # text before the first occurrence of this tag. Doxygen uses libiconv (or the # iconv built into libc) for the transcoding. See # http://www.gnu.org/software/libiconv for the list of possible encodings. DOXYFILE_ENCODING = UTF-8 # The PROJECT_NAME tag is a single word (or sequence of words) that should # identify the project. Note that if you do not use Doxywizard you need # to put quotes around the project name if it contains spaces. PROJECT_NAME = "libvdpau-va-gl" # The PROJECT_NUMBER tag can be used to enter a project or revision number. # This could be handy for archiving the generated documentation or # if some version control system is used. PROJECT_NUMBER = # Using the PROJECT_BRIEF tag one can provide an optional one line description # for a project that appears at the top of each page and should give viewer # a quick idea about the purpose of the project. Keep the description short. PROJECT_BRIEF = "VDPAU driver with OpenGL/VA-API backend" # With the PROJECT_LOGO tag one can specify an logo or icon that is # included in the documentation. The maximum height of the logo should not # exceed 55 pixels and the maximum width should not exceed 200 pixels. # Doxygen will copy the logo to the output directory. PROJECT_LOGO = # The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) # base path where the generated documentation will be put. # If a relative path is entered, it will be relative to the location # where doxygen was started. If left blank the current directory will be used. OUTPUT_DIRECTORY = # If the CREATE_SUBDIRS tag is set to YES, then doxygen will create # 4096 sub-directories (in 2 levels) under the output directory of each output # format and will distribute the generated files over these directories. # Enabling this option can be useful when feeding doxygen a huge amount of # source files, where putting all generated files in the same directory would # otherwise cause performance problems for the file system. CREATE_SUBDIRS = NO # The OUTPUT_LANGUAGE tag is used to specify the language in which all # documentation generated by doxygen is written. Doxygen will use this # information to generate all constant output in the proper language. # The default language is English, other supported languages are: # Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-Traditional, # Croatian, Czech, Danish, Dutch, Esperanto, Farsi, Finnish, French, German, # Greek, Hungarian, Italian, Japanese, Japanese-en (Japanese with English # messages), Korean, Korean-en, Lithuanian, Norwegian, Macedonian, Persian, # Polish, Portuguese, Romanian, Russian, Serbian, Serbian-Cyrillic, Slovak, # Slovene, Spanish, Swedish, Ukrainian, and Vietnamese. OUTPUT_LANGUAGE = English # If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will # include brief member descriptions after the members that are listed in # the file and class documentation (similar to JavaDoc). # Set to NO to disable this. BRIEF_MEMBER_DESC = YES # If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend # the brief description of a member or function before the detailed description. # Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the # brief descriptions will be completely suppressed. REPEAT_BRIEF = YES # This tag implements a quasi-intelligent brief description abbreviator # that is used to form the text in various listings. Each string # in this list, if found as the leading text of the brief description, will be # stripped from the text and the result after processing the whole list, is # used as the annotated text. Otherwise, the brief description is used as-is. # If left blank, the following values are used ("$name" is automatically # replaced with the name of the entity): "The $name class" "The $name widget" # "The $name file" "is" "provides" "specifies" "contains" # "represents" "a" "an" "the" ABBREVIATE_BRIEF = # If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then # Doxygen will generate a detailed section even if there is only a brief # description. ALWAYS_DETAILED_SEC = NO # If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all # inherited members of a class in the documentation of that class as if those # members were ordinary class members. Constructors, destructors and assignment # operators of the base classes will not be shown. INLINE_INHERITED_MEMB = NO # If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full # path before files name in the file list and in the header files. If set # to NO the shortest path that makes the file name unique will be used. FULL_PATH_NAMES = YES # If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag # can be used to strip a user-defined part of the path. Stripping is # only done if one of the specified strings matches the left-hand part of # the path. The tag can be used to show relative paths in the file list. # If left blank the directory from which doxygen is run is used as the # path to strip. STRIP_FROM_PATH = # The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of # the path mentioned in the documentation of a class, which tells # the reader which header file to include in order to use a class. # If left blank only the name of the header file containing the class # definition is used. Otherwise one should specify the include paths that # are normally passed to the compiler using the -I flag. STRIP_FROM_INC_PATH = # If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter # (but less readable) file names. This can be useful if your file system # doesn't support long names like on DOS, Mac, or CD-ROM. SHORT_NAMES = NO # If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen # will interpret the first line (until the first dot) of a JavaDoc-style # comment as the brief description. If set to NO, the JavaDoc # comments will behave just like regular Qt-style comments # (thus requiring an explicit @brief command for a brief description.) JAVADOC_AUTOBRIEF = NO # If the QT_AUTOBRIEF tag is set to YES then Doxygen will # interpret the first line (until the first dot) of a Qt-style # comment as the brief description. If set to NO, the comments # will behave just like regular Qt-style comments (thus requiring # an explicit \brief command for a brief description.) QT_AUTOBRIEF = NO # The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen # treat a multi-line C++ special comment block (i.e. a block of //! or /// # comments) as a brief description. This used to be the default behaviour. # The new default is to treat a multi-line C++ comment block as a detailed # description. Set this tag to YES if you prefer the old behaviour instead. MULTILINE_CPP_IS_BRIEF = NO # If the INHERIT_DOCS tag is set to YES (the default) then an undocumented # member inherits the documentation from any documented member that it # re-implements. INHERIT_DOCS = YES # If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce # a new page for each member. If set to NO, the documentation of a member will # be part of the file/class/namespace that contains it. SEPARATE_MEMBER_PAGES = NO # The TAB_SIZE tag can be used to set the number of spaces in a tab. # Doxygen uses this value to replace tabs by spaces in code fragments. TAB_SIZE = 8 # This tag can be used to specify a number of aliases that acts # as commands in the documentation. An alias has the form "name=value". # For example adding "sideeffect=\par Side Effects:\n" will allow you to # put the command \sideeffect (or @sideeffect) in the documentation, which # will result in a user-defined paragraph with heading "Side Effects:". # You can put \n's in the value part of an alias to insert newlines. ALIASES = # This tag can be used to specify a number of word-keyword mappings (TCL only). # A mapping has the form "name=value". For example adding # "class=itcl::class" will allow you to use the command class in the # itcl::class meaning. TCL_SUBST = # Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C # sources only. Doxygen will then generate output that is more tailored for C. # For instance, some of the names that are used will be different. The list # of all members will be omitted, etc. OPTIMIZE_OUTPUT_FOR_C = NO # Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java # sources only. Doxygen will then generate output that is more tailored for # Java. For instance, namespaces will be presented as packages, qualified # scopes will look different, etc. OPTIMIZE_OUTPUT_JAVA = NO # Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran # sources only. Doxygen will then generate output that is more tailored for # Fortran. OPTIMIZE_FOR_FORTRAN = NO # Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL # sources. Doxygen will then generate output that is tailored for # VHDL. OPTIMIZE_OUTPUT_VHDL = NO # Doxygen selects the parser to use depending on the extension of the files it # parses. With this tag you can assign which parser to use for a given extension. # Doxygen has a built-in mapping, but you can override or extend it using this # tag. The format is ext=language, where ext is a file extension, and language # is one of the parsers supported by doxygen: IDL, Java, Javascript, CSharp, C, # C++, D, PHP, Objective-C, Python, Fortran, VHDL, C, C++. For instance to make # doxygen treat .inc files as Fortran files (default is PHP), and .f files as C # (default is Fortran), use: inc=Fortran f=C. Note that for custom extensions # you also need to set FILE_PATTERNS otherwise the files are not read by doxygen. EXTENSION_MAPPING = # If MARKDOWN_SUPPORT is enabled (the default) then doxygen pre-processes all # comments according to the Markdown format, which allows for more readable # documentation. See http://daringfireball.net/projects/markdown/ for details. # The output of markdown processing is further processed by doxygen, so you # can mix doxygen, HTML, and XML commands with Markdown formatting. # Disable only in case of backward compatibilities issues. MARKDOWN_SUPPORT = YES # If you use STL classes (i.e. std::string, std::vector, etc.) but do not want # to include (a tag file for) the STL sources as input, then you should # set this tag to YES in order to let doxygen match functions declarations and # definitions whose arguments contain STL classes (e.g. func(std::string); v.s. # func(std::string) {}). This also makes the inheritance and collaboration # diagrams that involve STL classes more complete and accurate. BUILTIN_STL_SUPPORT = NO # If you use Microsoft's C++/CLI language, you should set this option to YES to # enable parsing support. CPP_CLI_SUPPORT = NO # Set the SIP_SUPPORT tag to YES if your project consists of sip sources only. # Doxygen will parse them like normal C++ but will assume all classes use public # instead of private inheritance when no explicit protection keyword is present. SIP_SUPPORT = NO # For Microsoft's IDL there are propget and propput attributes to indicate getter # and setter methods for a property. Setting this option to YES (the default) # will make doxygen replace the get and set methods by a property in the # documentation. This will only work if the methods are indeed getting or # setting a simple type. If this is not the case, or you want to show the # methods anyway, you should set this option to NO. IDL_PROPERTY_SUPPORT = YES # If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC # tag is set to YES, then doxygen will reuse the documentation of the first # member in the group (if any) for the other members of the group. By default # all members of a group must be documented explicitly. DISTRIBUTE_GROUP_DOC = NO # Set the SUBGROUPING tag to YES (the default) to allow class member groups of # the same type (for instance a group of public functions) to be put as a # subgroup of that type (e.g. under the Public Functions section). Set it to # NO to prevent subgrouping. Alternatively, this can be done per class using # the \nosubgrouping command. SUBGROUPING = YES # When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and # unions are shown inside the group in which they are included (e.g. using # @ingroup) instead of on a separate page (for HTML and Man pages) or # section (for LaTeX and RTF). INLINE_GROUPED_CLASSES = NO # When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and # unions with only public data fields will be shown inline in the documentation # of the scope in which they are defined (i.e. file, namespace, or group # documentation), provided this scope is documented. If set to NO (the default), # structs, classes, and unions are shown on a separate page (for HTML and Man # pages) or section (for LaTeX and RTF). INLINE_SIMPLE_STRUCTS = NO # When TYPEDEF_HIDES_STRUCT is enabled, a typedef of a struct, union, or enum # is documented as struct, union, or enum with the name of the typedef. So # typedef struct TypeS {} TypeT, will appear in the documentation as a struct # with name TypeT. When disabled the typedef will appear as a member of a file, # namespace, or class. And the struct will be named TypeS. This can typically # be useful for C code in case the coding convention dictates that all compound # types are typedef'ed and only the typedef is referenced, never the tag name. TYPEDEF_HIDES_STRUCT = NO # The SYMBOL_CACHE_SIZE determines the size of the internal cache use to # determine which symbols to keep in memory and which to flush to disk. # When the cache is full, less often used symbols will be written to disk. # For small to medium size projects (<1000 input files) the default value is # probably good enough. For larger projects a too small cache size can cause # doxygen to be busy swapping symbols to and from disk most of the time # causing a significant performance penalty. # If the system has enough physical memory increasing the cache will improve the # performance by keeping more symbols in memory. Note that the value works on # a logarithmic scale so increasing the size by one will roughly double the # memory usage. The cache size is given by this formula: # 2^(16+SYMBOL_CACHE_SIZE). The valid range is 0..9, the default is 0, # corresponding to a cache size of 2^16 = 65536 symbols. SYMBOL_CACHE_SIZE = 0 # Similar to the SYMBOL_CACHE_SIZE the size of the symbol lookup cache can be # set using LOOKUP_CACHE_SIZE. This cache is used to resolve symbols given # their name and scope. Since this can be an expensive process and often the # same symbol appear multiple times in the code, doxygen keeps a cache of # pre-resolved symbols. If the cache is too small doxygen will become slower. # If the cache is too large, memory is wasted. The cache size is given by this # formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range is 0..9, the default is 0, # corresponding to a cache size of 2^16 = 65536 symbols. LOOKUP_CACHE_SIZE = 0 #--------------------------------------------------------------------------- # Build related configuration options #--------------------------------------------------------------------------- # If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in # documentation are documented, even if no documentation was available. # Private class members and static file members will be hidden unless # the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES EXTRACT_ALL = NO # If the EXTRACT_PRIVATE tag is set to YES all private members of a class # will be included in the documentation. EXTRACT_PRIVATE = NO # If the EXTRACT_PACKAGE tag is set to YES all members with package or internal scope will be included in the documentation. EXTRACT_PACKAGE = NO # If the EXTRACT_STATIC tag is set to YES all static members of a file # will be included in the documentation. EXTRACT_STATIC = NO # If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) # defined locally in source files will be included in the documentation. # If set to NO only classes defined in header files are included. EXTRACT_LOCAL_CLASSES = YES # This flag is only useful for Objective-C code. When set to YES local # methods, which are defined in the implementation section but not in # the interface are included in the documentation. # If set to NO (the default) only methods in the interface are included. EXTRACT_LOCAL_METHODS = NO # If this flag is set to YES, the members of anonymous namespaces will be # extracted and appear in the documentation as a namespace called # 'anonymous_namespace{file}', where file will be replaced with the base # name of the file that contains the anonymous namespace. By default # anonymous namespaces are hidden. EXTRACT_ANON_NSPACES = NO # If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all # undocumented members of documented classes, files or namespaces. # If set to NO (the default) these members will be included in the # various overviews, but no documentation section is generated. # This option has no effect if EXTRACT_ALL is enabled. HIDE_UNDOC_MEMBERS = NO # If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all # undocumented classes that are normally visible in the class hierarchy. # If set to NO (the default) these classes will be included in the various # overviews. This option has no effect if EXTRACT_ALL is enabled. HIDE_UNDOC_CLASSES = NO # If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all # friend (class|struct|union) declarations. # If set to NO (the default) these declarations will be included in the # documentation. HIDE_FRIEND_COMPOUNDS = NO # If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any # documentation blocks found inside the body of a function. # If set to NO (the default) these blocks will be appended to the # function's detailed documentation block. HIDE_IN_BODY_DOCS = NO # The INTERNAL_DOCS tag determines if documentation # that is typed after a \internal command is included. If the tag is set # to NO (the default) then the documentation will be excluded. # Set it to YES to include the internal documentation. INTERNAL_DOCS = NO # If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate # file names in lower-case letters. If set to YES upper-case letters are also # allowed. This is useful if you have classes or files whose names only differ # in case and if your file system supports case sensitive file names. Windows # and Mac users are advised to set this option to NO. CASE_SENSE_NAMES = YES # If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen # will show members with their full class and namespace scopes in the # documentation. If set to YES the scope will be hidden. HIDE_SCOPE_NAMES = NO # If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen # will put a list of the files that are included by a file in the documentation # of that file. SHOW_INCLUDE_FILES = YES # If the FORCE_LOCAL_INCLUDES tag is set to YES then Doxygen # will list include files with double quotes in the documentation # rather than with sharp brackets. FORCE_LOCAL_INCLUDES = NO # If the INLINE_INFO tag is set to YES (the default) then a tag [inline] # is inserted in the documentation for inline members. INLINE_INFO = YES # If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen # will sort the (detailed) documentation of file and class members # alphabetically by member name. If set to NO the members will appear in # declaration order. SORT_MEMBER_DOCS = YES # If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the # brief documentation of file, namespace and class members alphabetically # by member name. If set to NO (the default) the members will appear in # declaration order. SORT_BRIEF_DOCS = NO # If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen # will sort the (brief and detailed) documentation of class members so that # constructors and destructors are listed first. If set to NO (the default) # the constructors will appear in the respective orders defined by # SORT_MEMBER_DOCS and SORT_BRIEF_DOCS. # This tag will be ignored for brief docs if SORT_BRIEF_DOCS is set to NO # and ignored for detailed docs if SORT_MEMBER_DOCS is set to NO. SORT_MEMBERS_CTORS_1ST = NO # If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the # hierarchy of group names into alphabetical order. If set to NO (the default) # the group names will appear in their defined order. SORT_GROUP_NAMES = NO # If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be # sorted by fully-qualified names, including namespaces. If set to # NO (the default), the class list will be sorted only by class name, # not including the namespace part. # Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. # Note: This option applies only to the class list, not to the # alphabetical list. SORT_BY_SCOPE_NAME = NO # If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to # do proper type resolution of all parameters of a function it will reject a # match between the prototype and the implementation of a member function even # if there is only one candidate or it is obvious which candidate to choose # by doing a simple string match. By disabling STRICT_PROTO_MATCHING doxygen # will still accept a match between prototype and implementation in such cases. STRICT_PROTO_MATCHING = NO # The GENERATE_TODOLIST tag can be used to enable (YES) or # disable (NO) the todo list. This list is created by putting \todo # commands in the documentation. GENERATE_TODOLIST = YES # The GENERATE_TESTLIST tag can be used to enable (YES) or # disable (NO) the test list. This list is created by putting \test # commands in the documentation. GENERATE_TESTLIST = YES # The GENERATE_BUGLIST tag can be used to enable (YES) or # disable (NO) the bug list. This list is created by putting \bug # commands in the documentation. GENERATE_BUGLIST = YES # The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or # disable (NO) the deprecated list. This list is created by putting # \deprecated commands in the documentation. GENERATE_DEPRECATEDLIST= YES # The ENABLED_SECTIONS tag can be used to enable conditional # documentation sections, marked by \if sectionname ... \endif. ENABLED_SECTIONS = # The MAX_INITIALIZER_LINES tag determines the maximum number of lines # the initial value of a variable or macro consists of for it to appear in # the documentation. If the initializer consists of more lines than specified # here it will be hidden. Use a value of 0 to hide initializers completely. # The appearance of the initializer of individual variables and macros in the # documentation can be controlled using \showinitializer or \hideinitializer # command in the documentation regardless of this setting. MAX_INITIALIZER_LINES = 30 # Set the SHOW_USED_FILES tag to NO to disable the list of files generated # at the bottom of the documentation of classes and structs. If set to YES the # list will mention the files that were used to generate the documentation. SHOW_USED_FILES = YES # Set the SHOW_FILES tag to NO to disable the generation of the Files page. # This will remove the Files entry from the Quick Index and from the # Folder Tree View (if specified). The default is YES. SHOW_FILES = YES # Set the SHOW_NAMESPACES tag to NO to disable the generation of the # Namespaces page. # This will remove the Namespaces entry from the Quick Index # and from the Folder Tree View (if specified). The default is YES. SHOW_NAMESPACES = YES # The FILE_VERSION_FILTER tag can be used to specify a program or script that # doxygen should invoke to get the current version for each file (typically from # the version control system). Doxygen will invoke the program by executing (via # popen()) the command , where is the value of # the FILE_VERSION_FILTER tag, and is the name of an input file # provided by doxygen. Whatever the program writes to standard output # is used as the file version. See the manual for examples. FILE_VERSION_FILTER = # The LAYOUT_FILE tag can be used to specify a layout file which will be parsed # by doxygen. The layout file controls the global structure of the generated # output files in an output format independent way. To create the layout file # that represents doxygen's defaults, run doxygen with the -l option. # You can optionally specify a file name after the option, if omitted # DoxygenLayout.xml will be used as the name of the layout file. LAYOUT_FILE = # The CITE_BIB_FILES tag can be used to specify one or more bib files # containing the references data. This must be a list of .bib files. The # .bib extension is automatically appended if omitted. Using this command # requires the bibtex tool to be installed. See also # http://en.wikipedia.org/wiki/BibTeX for more info. For LaTeX the style # of the bibliography can be controlled using LATEX_BIB_STYLE. To use this # feature you need bibtex and perl available in the search path. CITE_BIB_FILES = #--------------------------------------------------------------------------- # configuration options related to warning and progress messages #--------------------------------------------------------------------------- # The QUIET tag can be used to turn on/off the messages that are generated # by doxygen. Possible values are YES and NO. If left blank NO is used. QUIET = NO # The WARNINGS tag can be used to turn on/off the warning messages that are # generated by doxygen. Possible values are YES and NO. If left blank # NO is used. WARNINGS = YES # If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings # for undocumented members. If EXTRACT_ALL is set to YES then this flag will # automatically be disabled. WARN_IF_UNDOCUMENTED = YES # If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for # potential errors in the documentation, such as not documenting some # parameters in a documented function, or documenting parameters that # don't exist or using markup commands wrongly. WARN_IF_DOC_ERROR = YES # The WARN_NO_PARAMDOC option can be enabled to get warnings for # functions that are documented, but have no documentation for their parameters # or return value. If set to NO (the default) doxygen will only warn about # wrong or incomplete parameter documentation, but not about the absence of # documentation. WARN_NO_PARAMDOC = NO # The WARN_FORMAT tag determines the format of the warning messages that # doxygen can produce. The string should contain the $file, $line, and $text # tags, which will be replaced by the file and line number from which the # warning originated and the warning text. Optionally the format may contain # $version, which will be replaced by the version of the file (if it could # be obtained via FILE_VERSION_FILTER) WARN_FORMAT = "$file:$line: $text" # The WARN_LOGFILE tag can be used to specify a file to which warning # and error messages should be written. If left blank the output is written # to stderr. WARN_LOGFILE = #--------------------------------------------------------------------------- # configuration options related to the input files #--------------------------------------------------------------------------- # The INPUT tag can be used to specify the files and/or directories that contain # documented source files. You may enter file names like "myfile.cpp" or # directories like "/usr/src/myproject". Separate the files or directories # with spaces. INPUT = # This tag can be used to specify the character encoding of the source files # that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is # also the default input encoding. Doxygen uses libiconv (or the iconv built # into libc) for the transcoding. See http://www.gnu.org/software/libiconv for # the list of possible encodings. INPUT_ENCODING = UTF-8 # If the value of the INPUT tag contains directories, you can use the # FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp # and *.h) to filter out the source-files in the directories. If left # blank the following patterns are tested: # *.c *.cc *.cxx *.cpp *.c++ *.d *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh # *.hxx *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.dox *.py # *.f90 *.f *.for *.vhd *.vhdl FILE_PATTERNS = # The RECURSIVE tag can be used to turn specify whether or not subdirectories # should be searched for input files as well. Possible values are YES and NO. # If left blank NO is used. RECURSIVE = NO # The EXCLUDE tag can be used to specify files and/or directories that should be # excluded from the INPUT source files. This way you can easily exclude a # subdirectory from a directory tree whose root is specified with the INPUT tag. # Note that relative paths are relative to the directory from which doxygen is # run. EXCLUDE = # The EXCLUDE_SYMLINKS tag can be used to select whether or not files or # directories that are symbolic links (a Unix file system feature) are excluded # from the input. EXCLUDE_SYMLINKS = NO # If the value of the INPUT tag contains directories, you can use the # EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude # certain files from those directories. Note that the wildcards are matched # against the file with absolute path, so to exclude all test directories # for example use the pattern */test/* EXCLUDE_PATTERNS = # The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names # (namespaces, classes, functions, etc.) that should be excluded from the # output. The symbol name can be a fully qualified name, a word, or if the # wildcard * is used, a substring. Examples: ANamespace, AClass, # AClass::ANamespace, ANamespace::*Test EXCLUDE_SYMBOLS = # The EXAMPLE_PATH tag can be used to specify one or more files or # directories that contain example code fragments that are included (see # the \include command). EXAMPLE_PATH = # If the value of the EXAMPLE_PATH tag contains directories, you can use the # EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp # and *.h) to filter out the source-files in the directories. If left # blank all files are included. EXAMPLE_PATTERNS = # If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be # searched for input files to be used with the \include or \dontinclude # commands irrespective of the value of the RECURSIVE tag. # Possible values are YES and NO. If left blank NO is used. EXAMPLE_RECURSIVE = NO # The IMAGE_PATH tag can be used to specify one or more files or # directories that contain image that are included in the documentation (see # the \image command). IMAGE_PATH = # The INPUT_FILTER tag can be used to specify a program that doxygen should # invoke to filter for each input file. Doxygen will invoke the filter program # by executing (via popen()) the command , where # is the value of the INPUT_FILTER tag, and is the name of an # input file. Doxygen will then use the output that the filter program writes # to standard output. # If FILTER_PATTERNS is specified, this tag will be # ignored. INPUT_FILTER = # The FILTER_PATTERNS tag can be used to specify filters on a per file pattern # basis. # Doxygen will compare the file name with each pattern and apply the # filter if there is a match. # The filters are a list of the form: # pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further # info on how filters are used. If FILTER_PATTERNS is empty or if # non of the patterns match the file name, INPUT_FILTER is applied. FILTER_PATTERNS = # If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using # INPUT_FILTER) will be used to filter the input files when producing source # files to browse (i.e. when SOURCE_BROWSER is set to YES). FILTER_SOURCE_FILES = NO # The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file # pattern. A pattern will override the setting for FILTER_PATTERN (if any) # and it is also possible to disable source filtering for a specific pattern # using *.ext= (so without naming a filter). This option only has effect when # FILTER_SOURCE_FILES is enabled. FILTER_SOURCE_PATTERNS = #--------------------------------------------------------------------------- # configuration options related to source browsing #--------------------------------------------------------------------------- # If the SOURCE_BROWSER tag is set to YES then a list of source files will # be generated. Documented entities will be cross-referenced with these sources. # Note: To get rid of all source code in the generated output, make sure also # VERBATIM_HEADERS is set to NO. SOURCE_BROWSER = NO # Setting the INLINE_SOURCES tag to YES will include the body # of functions and classes directly in the documentation. INLINE_SOURCES = NO # Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct # doxygen to hide any special comment blocks from generated source code # fragments. Normal C, C++ and Fortran comments will always remain visible. STRIP_CODE_COMMENTS = YES # If the REFERENCED_BY_RELATION tag is set to YES # then for each documented function all documented # functions referencing it will be listed. REFERENCED_BY_RELATION = NO # If the REFERENCES_RELATION tag is set to YES # then for each documented function all documented entities # called/used by that function will be listed. REFERENCES_RELATION = NO # If the REFERENCES_LINK_SOURCE tag is set to YES (the default) # and SOURCE_BROWSER tag is set to YES, then the hyperlinks from # functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will # link to the source code. # Otherwise they will link to the documentation. REFERENCES_LINK_SOURCE = YES # If the USE_HTAGS tag is set to YES then the references to source code # will point to the HTML generated by the htags(1) tool instead of doxygen # built-in source browser. The htags tool is part of GNU's global source # tagging system (see http://www.gnu.org/software/global/global.html). You # will need version 4.8.6 or higher. USE_HTAGS = NO # If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen # will generate a verbatim copy of the header file for each class for # which an include is specified. Set to NO to disable this. VERBATIM_HEADERS = YES #--------------------------------------------------------------------------- # configuration options related to the alphabetical class index #--------------------------------------------------------------------------- # If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index # of all compounds will be generated. Enable this if the project # contains a lot of classes, structs, unions or interfaces. ALPHABETICAL_INDEX = YES # If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then # the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns # in which this list will be split (can be a number in the range [1..20]) COLS_IN_ALPHA_INDEX = 5 # In case all classes in a project start with a common prefix, all # classes will be put under the same header in the alphabetical index. # The IGNORE_PREFIX tag can be used to specify one or more prefixes that # should be ignored while generating the index headers. IGNORE_PREFIX = #--------------------------------------------------------------------------- # configuration options related to the HTML output #--------------------------------------------------------------------------- # If the GENERATE_HTML tag is set to YES (the default) Doxygen will # generate HTML output. GENERATE_HTML = YES # The HTML_OUTPUT tag is used to specify where the HTML docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `html' will be used as the default path. HTML_OUTPUT = html # The HTML_FILE_EXTENSION tag can be used to specify the file extension for # each generated HTML page (for example: .htm,.php,.asp). If it is left blank # doxygen will generate files with .html extension. HTML_FILE_EXTENSION = .html # The HTML_HEADER tag can be used to specify a personal HTML header for # each generated HTML page. If it is left blank doxygen will generate a # standard header. Note that when using a custom header you are responsible # for the proper inclusion of any scripts and style sheets that doxygen # needs, which is dependent on the configuration options used. # It is advised to generate a default header using "doxygen -w html # header.html footer.html stylesheet.css YourConfigFile" and then modify # that header. Note that the header is subject to change so you typically # have to redo this when upgrading to a newer version of doxygen or when # changing the value of configuration settings such as GENERATE_TREEVIEW! HTML_HEADER = # The HTML_FOOTER tag can be used to specify a personal HTML footer for # each generated HTML page. If it is left blank doxygen will generate a # standard footer. HTML_FOOTER = # The HTML_STYLESHEET tag can be used to specify a user-defined cascading # style sheet that is used by each HTML page. It can be used to # fine-tune the look of the HTML output. If the tag is left blank doxygen # will generate a default style sheet. Note that doxygen will try to copy # the style sheet file to the HTML output directory, so don't put your own # style sheet in the HTML output directory as well, or it will be erased! HTML_STYLESHEET = # The HTML_EXTRA_FILES tag can be used to specify one or more extra images or # other source files which should be copied to the HTML output directory. Note # that these files will be copied to the base HTML output directory. Use the # $relpath$ marker in the HTML_HEADER and/or HTML_FOOTER files to load these # files. In the HTML_STYLESHEET file, use the file name only. Also note that # the files will be copied as-is; there are no commands or markers available. HTML_EXTRA_FILES = # The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. # Doxygen will adjust the colors in the style sheet and background images # according to this color. Hue is specified as an angle on a colorwheel, # see http://en.wikipedia.org/wiki/Hue for more information. # For instance the value 0 represents red, 60 is yellow, 120 is green, # 180 is cyan, 240 is blue, 300 purple, and 360 is red again. # The allowed range is 0 to 359. HTML_COLORSTYLE_HUE = 220 # The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of # the colors in the HTML output. For a value of 0 the output will use # grayscales only. A value of 255 will produce the most vivid colors. HTML_COLORSTYLE_SAT = 100 # The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to # the luminance component of the colors in the HTML output. Values below # 100 gradually make the output lighter, whereas values above 100 make # the output darker. The value divided by 100 is the actual gamma applied, # so 80 represents a gamma of 0.8, The value 220 represents a gamma of 2.2, # and 100 does not change the gamma. HTML_COLORSTYLE_GAMMA = 80 # If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML # page will contain the date and time when the page was generated. Setting # this to NO can help when comparing the output of multiple runs. HTML_TIMESTAMP = YES # If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML # documentation will contain sections that can be hidden and shown after the # page has loaded. HTML_DYNAMIC_SECTIONS = NO # With HTML_INDEX_NUM_ENTRIES one can control the preferred number of # entries shown in the various tree structured indices initially; the user # can expand and collapse entries dynamically later on. Doxygen will expand # the tree to such a level that at most the specified number of entries are # visible (unless a fully collapsed tree already exceeds this amount). # So setting the number of entries 1 will produce a full collapsed tree by # default. 0 is a special value representing an infinite number of entries # and will result in a full expanded tree by default. HTML_INDEX_NUM_ENTRIES = 100 # If the GENERATE_DOCSET tag is set to YES, additional index files # will be generated that can be used as input for Apple's Xcode 3 # integrated development environment, introduced with OSX 10.5 (Leopard). # To create a documentation set, doxygen will generate a Makefile in the # HTML output directory. Running make will produce the docset in that # directory and running "make install" will install the docset in # ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find # it at startup. # See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html # for more information. GENERATE_DOCSET = NO # When GENERATE_DOCSET tag is set to YES, this tag determines the name of the # feed. A documentation feed provides an umbrella under which multiple # documentation sets from a single provider (such as a company or product suite) # can be grouped. DOCSET_FEEDNAME = "Doxygen generated docs" # When GENERATE_DOCSET tag is set to YES, this tag specifies a string that # should uniquely identify the documentation set bundle. This should be a # reverse domain-name style string, e.g. com.mycompany.MyDocSet. Doxygen # will append .docset to the name. DOCSET_BUNDLE_ID = org.doxygen.Project # When GENERATE_PUBLISHER_ID tag specifies a string that should uniquely identify # the documentation publisher. This should be a reverse domain-name style # string, e.g. com.mycompany.MyDocSet.documentation. DOCSET_PUBLISHER_ID = org.doxygen.Publisher # The GENERATE_PUBLISHER_NAME tag identifies the documentation publisher. DOCSET_PUBLISHER_NAME = Publisher # If the GENERATE_HTMLHELP tag is set to YES, additional index files # will be generated that can be used as input for tools like the # Microsoft HTML help workshop to generate a compiled HTML help file (.chm) # of the generated HTML documentation. GENERATE_HTMLHELP = NO # If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can # be used to specify the file name of the resulting .chm file. You # can add a path in front of the file if the result should not be # written to the html output directory. CHM_FILE = # If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can # be used to specify the location (absolute path including file name) of # the HTML help compiler (hhc.exe). If non-empty doxygen will try to run # the HTML help compiler on the generated index.hhp. HHC_LOCATION = # If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag # controls if a separate .chi index file is generated (YES) or that # it should be included in the master .chm file (NO). GENERATE_CHI = NO # If the GENERATE_HTMLHELP tag is set to YES, the CHM_INDEX_ENCODING # is used to encode HtmlHelp index (hhk), content (hhc) and project file # content. CHM_INDEX_ENCODING = # If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag # controls whether a binary table of contents is generated (YES) or a # normal table of contents (NO) in the .chm file. BINARY_TOC = NO # The TOC_EXPAND flag can be set to YES to add extra items for group members # to the contents of the HTML help documentation and to the tree view. TOC_EXPAND = NO # If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and # QHP_VIRTUAL_FOLDER are set, an additional index file will be generated # that can be used as input for Qt's qhelpgenerator to generate a # Qt Compressed Help (.qch) of the generated HTML documentation. GENERATE_QHP = NO # If the QHG_LOCATION tag is specified, the QCH_FILE tag can # be used to specify the file name of the resulting .qch file. # The path specified is relative to the HTML output folder. QCH_FILE = # The QHP_NAMESPACE tag specifies the namespace to use when generating # Qt Help Project output. For more information please see # http://doc.trolltech.com/qthelpproject.html#namespace QHP_NAMESPACE = org.doxygen.Project # The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating # Qt Help Project output. For more information please see # http://doc.trolltech.com/qthelpproject.html#virtual-folders QHP_VIRTUAL_FOLDER = doc # If QHP_CUST_FILTER_NAME is set, it specifies the name of a custom filter to # add. For more information please see # http://doc.trolltech.com/qthelpproject.html#custom-filters QHP_CUST_FILTER_NAME = # The QHP_CUST_FILT_ATTRS tag specifies the list of the attributes of the # custom filter to add. For more information please see # # Qt Help Project / Custom Filters. QHP_CUST_FILTER_ATTRS = # The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this # project's # filter section matches. # # Qt Help Project / Filter Attributes. QHP_SECT_FILTER_ATTRS = # If the GENERATE_QHP tag is set to YES, the QHG_LOCATION tag can # be used to specify the location of Qt's qhelpgenerator. # If non-empty doxygen will try to run qhelpgenerator on the generated # .qhp file. QHG_LOCATION = # If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files # will be generated, which together with the HTML files, form an Eclipse help # plugin. To install this plugin and make it available under the help contents # menu in Eclipse, the contents of the directory containing the HTML and XML # files needs to be copied into the plugins directory of eclipse. The name of # the directory within the plugins directory should be the same as # the ECLIPSE_DOC_ID value. After copying Eclipse needs to be restarted before # the help appears. GENERATE_ECLIPSEHELP = NO # A unique identifier for the eclipse help plugin. When installing the plugin # the directory name containing the HTML and XML files should also have # this name. ECLIPSE_DOC_ID = org.doxygen.Project # The DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) # at top of each HTML page. The value NO (the default) enables the index and # the value YES disables it. Since the tabs have the same information as the # navigation tree you can set this option to NO if you already set # GENERATE_TREEVIEW to YES. DISABLE_INDEX = NO # The GENERATE_TREEVIEW tag is used to specify whether a tree-like index # structure should be generated to display hierarchical information. # If the tag value is set to YES, a side panel will be generated # containing a tree-like index structure (just like the one that # is generated for HTML Help). For this to work a browser that supports # JavaScript, DHTML, CSS and frames is required (i.e. any modern browser). # Windows users are probably better off using the HTML help feature. # Since the tree basically has the same information as the tab index you # could consider to set DISABLE_INDEX to NO when enabling this option. GENERATE_TREEVIEW = NO # The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values # (range [0,1..20]) that doxygen will group on one line in the generated HTML # documentation. Note that a value of 0 will completely suppress the enum # values from appearing in the overview section. ENUM_VALUES_PER_LINE = 4 # If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be # used to set the initial width (in pixels) of the frame in which the tree # is shown. TREEVIEW_WIDTH = 250 # When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open # links to external symbols imported via tag files in a separate window. EXT_LINKS_IN_WINDOW = NO # Use this tag to change the font size of Latex formulas included # as images in the HTML documentation. The default is 10. Note that # when you change the font size after a successful doxygen run you need # to manually remove any form_*.png images from the HTML output directory # to force them to be regenerated. FORMULA_FONTSIZE = 10 # Use the FORMULA_TRANPARENT tag to determine whether or not the images # generated for formulas are transparent PNGs. Transparent PNGs are # not supported properly for IE 6.0, but are supported on all modern browsers. # Note that when changing this option you need to delete any form_*.png files # in the HTML output before the changes have effect. FORMULA_TRANSPARENT = YES # Enable the USE_MATHJAX option to render LaTeX formulas using MathJax # (see http://www.mathjax.org) which uses client side Javascript for the # rendering instead of using prerendered bitmaps. Use this if you do not # have LaTeX installed or if you want to formulas look prettier in the HTML # output. When enabled you may also need to install MathJax separately and # configure the path to it using the MATHJAX_RELPATH option. USE_MATHJAX = NO # When MathJax is enabled you need to specify the location relative to the # HTML output directory using the MATHJAX_RELPATH option. The destination # directory should contain the MathJax.js script. For instance, if the mathjax # directory is located at the same level as the HTML output directory, then # MATHJAX_RELPATH should be ../mathjax. The default value points to # the MathJax Content Delivery Network so you can quickly see the result without # installing MathJax. # However, it is strongly recommended to install a local # copy of MathJax from http://www.mathjax.org before deployment. MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest # The MATHJAX_EXTENSIONS tag can be used to specify one or MathJax extension # names that should be enabled during MathJax rendering. MATHJAX_EXTENSIONS = # When the SEARCHENGINE tag is enabled doxygen will generate a search box # for the HTML output. The underlying search engine uses javascript # and DHTML and should work on any modern browser. Note that when using # HTML help (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets # (GENERATE_DOCSET) there is already a search function so this one should # typically be disabled. For large projects the javascript based search engine # can be slow, then enabling SERVER_BASED_SEARCH may provide a better solution. SEARCHENGINE = YES # When the SERVER_BASED_SEARCH tag is enabled the search engine will be # implemented using a PHP enabled web server instead of at the web client # using Javascript. Doxygen will generate the search PHP script and index # file to put on the web server. The advantage of the server # based approach is that it scales better to large projects and allows # full text search. The disadvantages are that it is more difficult to setup # and does not have live searching capabilities. SERVER_BASED_SEARCH = NO #--------------------------------------------------------------------------- # configuration options related to the LaTeX output #--------------------------------------------------------------------------- # If the GENERATE_LATEX tag is set to YES (the default) Doxygen will # generate Latex output. GENERATE_LATEX = YES # The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `latex' will be used as the default path. LATEX_OUTPUT = latex # The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be # invoked. If left blank `latex' will be used as the default command name. # Note that when enabling USE_PDFLATEX this option is only used for # generating bitmaps for formulas in the HTML output, but not in the # Makefile that is written to the output directory. LATEX_CMD_NAME = latex # The MAKEINDEX_CMD_NAME tag can be used to specify the command name to # generate index for LaTeX. If left blank `makeindex' will be used as the # default command name. MAKEINDEX_CMD_NAME = makeindex # If the COMPACT_LATEX tag is set to YES Doxygen generates more compact # LaTeX documents. This may be useful for small projects and may help to # save some trees in general. COMPACT_LATEX = NO # The PAPER_TYPE tag can be used to set the paper type that is used # by the printer. Possible values are: a4, letter, legal and # executive. If left blank a4wide will be used. PAPER_TYPE = a4 # The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX # packages that should be included in the LaTeX output. EXTRA_PACKAGES = # The LATEX_HEADER tag can be used to specify a personal LaTeX header for # the generated latex document. The header should contain everything until # the first chapter. If it is left blank doxygen will generate a # standard header. Notice: only use this tag if you know what you are doing! LATEX_HEADER = # The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for # the generated latex document. The footer should contain everything after # the last chapter. If it is left blank doxygen will generate a # standard footer. Notice: only use this tag if you know what you are doing! LATEX_FOOTER = # If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated # is prepared for conversion to pdf (using ps2pdf). The pdf file will # contain links (just like the HTML output) instead of page references # This makes the output suitable for online browsing using a pdf viewer. PDF_HYPERLINKS = YES # If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of # plain latex in the generated Makefile. Set this option to YES to get a # higher quality PDF documentation. USE_PDFLATEX = YES # If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode. # command to the generated LaTeX files. This will instruct LaTeX to keep # running if errors occur, instead of asking the user for help. # This option is also used when generating formulas in HTML. LATEX_BATCHMODE = NO # If LATEX_HIDE_INDICES is set to YES then doxygen will not # include the index chapters (such as File Index, Compound Index, etc.) # in the output. LATEX_HIDE_INDICES = NO # If LATEX_SOURCE_CODE is set to YES then doxygen will include # source code with syntax highlighting in the LaTeX output. # Note that which sources are shown also depends on other settings # such as SOURCE_BROWSER. LATEX_SOURCE_CODE = NO # The LATEX_BIB_STYLE tag can be used to specify the style to use for the # bibliography, e.g. plainnat, or ieeetr. The default style is "plain". See # http://en.wikipedia.org/wiki/BibTeX for more info. LATEX_BIB_STYLE = plain #--------------------------------------------------------------------------- # configuration options related to the RTF output #--------------------------------------------------------------------------- # If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output # The RTF output is optimized for Word 97 and may not look very pretty with # other RTF readers or editors. GENERATE_RTF = NO # The RTF_OUTPUT tag is used to specify where the RTF docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `rtf' will be used as the default path. RTF_OUTPUT = rtf # If the COMPACT_RTF tag is set to YES Doxygen generates more compact # RTF documents. This may be useful for small projects and may help to # save some trees in general. COMPACT_RTF = NO # If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated # will contain hyperlink fields. The RTF file will # contain links (just like the HTML output) instead of page references. # This makes the output suitable for online browsing using WORD or other # programs which support those fields. # Note: wordpad (write) and others do not support links. RTF_HYPERLINKS = NO # Load style sheet definitions from file. Syntax is similar to doxygen's # config file, i.e. a series of assignments. You only have to provide # replacements, missing definitions are set to their default value. RTF_STYLESHEET_FILE = # Set optional variables used in the generation of an rtf document. # Syntax is similar to doxygen's config file. RTF_EXTENSIONS_FILE = #--------------------------------------------------------------------------- # configuration options related to the man page output #--------------------------------------------------------------------------- # If the GENERATE_MAN tag is set to YES (the default) Doxygen will # generate man pages GENERATE_MAN = NO # The MAN_OUTPUT tag is used to specify where the man pages will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `man' will be used as the default path. MAN_OUTPUT = man # The MAN_EXTENSION tag determines the extension that is added to # the generated man pages (default is the subroutine's section .3) MAN_EXTENSION = .3 # If the MAN_LINKS tag is set to YES and Doxygen generates man output, # then it will generate one additional man file for each entity # documented in the real man page(s). These additional files # only source the real man page, but without them the man command # would be unable to find the correct page. The default is NO. MAN_LINKS = NO #--------------------------------------------------------------------------- # configuration options related to the XML output #--------------------------------------------------------------------------- # If the GENERATE_XML tag is set to YES Doxygen will # generate an XML file that captures the structure of # the code including all documentation. GENERATE_XML = NO # The XML_OUTPUT tag is used to specify where the XML pages will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `xml' will be used as the default path. XML_OUTPUT = xml # The XML_SCHEMA tag can be used to specify an XML schema, # which can be used by a validating XML parser to check the # syntax of the XML files. XML_SCHEMA = # The XML_DTD tag can be used to specify an XML DTD, # which can be used by a validating XML parser to check the # syntax of the XML files. XML_DTD = # If the XML_PROGRAMLISTING tag is set to YES Doxygen will # dump the program listings (including syntax highlighting # and cross-referencing information) to the XML output. Note that # enabling this will significantly increase the size of the XML output. XML_PROGRAMLISTING = YES #--------------------------------------------------------------------------- # configuration options for the AutoGen Definitions output #--------------------------------------------------------------------------- # If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will # generate an AutoGen Definitions (see autogen.sf.net) file # that captures the structure of the code including all # documentation. Note that this feature is still experimental # and incomplete at the moment. GENERATE_AUTOGEN_DEF = NO #--------------------------------------------------------------------------- # configuration options related to the Perl module output #--------------------------------------------------------------------------- # If the GENERATE_PERLMOD tag is set to YES Doxygen will # generate a Perl module file that captures the structure of # the code including all documentation. Note that this # feature is still experimental and incomplete at the # moment. GENERATE_PERLMOD = NO # If the PERLMOD_LATEX tag is set to YES Doxygen will generate # the necessary Makefile rules, Perl scripts and LaTeX code to be able # to generate PDF and DVI output from the Perl module output. PERLMOD_LATEX = NO # If the PERLMOD_PRETTY tag is set to YES the Perl module output will be # nicely formatted so it can be parsed by a human reader. # This is useful # if you want to understand what is going on. # On the other hand, if this # tag is set to NO the size of the Perl module output will be much smaller # and Perl will parse it just the same. PERLMOD_PRETTY = YES # The names of the make variables in the generated doxyrules.make file # are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. # This is useful so different doxyrules.make files included by the same # Makefile don't overwrite each other's variables. PERLMOD_MAKEVAR_PREFIX = #--------------------------------------------------------------------------- # Configuration options related to the preprocessor #--------------------------------------------------------------------------- # If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will # evaluate all C-preprocessor directives found in the sources and include # files. ENABLE_PREPROCESSING = YES # If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro # names in the source code. If set to NO (the default) only conditional # compilation will be performed. Macro expansion can be done in a controlled # way by setting EXPAND_ONLY_PREDEF to YES. MACRO_EXPANSION = NO # If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES # then the macro expansion is limited to the macros specified with the # PREDEFINED and EXPAND_AS_DEFINED tags. EXPAND_ONLY_PREDEF = NO # If the SEARCH_INCLUDES tag is set to YES (the default) the includes files # pointed to by INCLUDE_PATH will be searched when a #include is found. SEARCH_INCLUDES = YES # The INCLUDE_PATH tag can be used to specify one or more directories that # contain include files that are not input files but should be processed by # the preprocessor. INCLUDE_PATH = # You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard # patterns (like *.h and *.hpp) to filter out the header-files in the # directories. If left blank, the patterns specified with FILE_PATTERNS will # be used. INCLUDE_FILE_PATTERNS = # The PREDEFINED tag can be used to specify one or more macro names that # are defined before the preprocessor is started (similar to the -D option of # gcc). The argument of the tag is a list of macros of the form: name # or name=definition (no spaces). If the definition and the = are # omitted =1 is assumed. To prevent a macro definition from being # undefined via #undef or recursively expanded use the := operator # instead of the = operator. PREDEFINED = # If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then # this tag can be used to specify a list of macro names that should be expanded. # The macro definition that is found in the sources will be used. # Use the PREDEFINED tag if you want to use a different macro definition that # overrules the definition found in the source code. EXPAND_AS_DEFINED = # If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then # doxygen's preprocessor will remove all references to function-like macros # that are alone on a line, have an all uppercase name, and do not end with a # semicolon, because these will confuse the parser if not removed. SKIP_FUNCTION_MACROS = YES #--------------------------------------------------------------------------- # Configuration::additions related to external references #--------------------------------------------------------------------------- # The TAGFILES option can be used to specify one or more tagfiles. For each # tag file the location of the external documentation should be added. The # format of a tag file without this location is as follows: # # TAGFILES = file1 file2 ... # Adding location for the tag files is done as follows: # # TAGFILES = file1=loc1 "file2 = loc2" ... # where "loc1" and "loc2" can be relative or absolute paths # or URLs. Note that each tag file must have a unique name (where the name does # NOT include the path). If a tag file is not located in the directory in which # doxygen is run, you must also specify the path to the tagfile here. TAGFILES = # When a file name is specified after GENERATE_TAGFILE, doxygen will create # a tag file that is based on the input files it reads. GENERATE_TAGFILE = # If the ALLEXTERNALS tag is set to YES all external classes will be listed # in the class index. If set to NO only the inherited external classes # will be listed. ALLEXTERNALS = NO # If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed # in the modules index. If set to NO, only the current project's groups will # be listed. EXTERNAL_GROUPS = YES # The PERL_PATH should be the absolute path and name of the perl script # interpreter (i.e. the result of `which perl'). PERL_PATH = /usr/bin/perl #--------------------------------------------------------------------------- # Configuration options related to the dot tool #--------------------------------------------------------------------------- # If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will # generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base # or super classes. Setting the tag to NO turns the diagrams off. Note that # this option also works with HAVE_DOT disabled, but it is recommended to # install and use dot, since it yields more powerful graphs. CLASS_DIAGRAMS = YES # You can define message sequence charts within doxygen comments using the \msc # command. Doxygen will then run the mscgen tool (see # http://www.mcternan.me.uk/mscgen/) to produce the chart and insert it in the # documentation. The MSCGEN_PATH tag allows you to specify the directory where # the mscgen tool resides. If left empty the tool is assumed to be found in the # default search path. MSCGEN_PATH = # If set to YES, the inheritance and collaboration graphs will hide # inheritance and usage relations if the target is undocumented # or is not a class. HIDE_UNDOC_RELATIONS = YES # If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is # available from the path. This tool is part of Graphviz, a graph visualization # toolkit from AT&T and Lucent Bell Labs. The other options in this section # have no effect if this option is set to NO (the default) HAVE_DOT = NO # The DOT_NUM_THREADS specifies the number of dot invocations doxygen is # allowed to run in parallel. When set to 0 (the default) doxygen will # base this on the number of processors available in the system. You can set it # explicitly to a value larger than 0 to get control over the balance # between CPU load and processing speed. DOT_NUM_THREADS = 0 # By default doxygen will use the Helvetica font for all dot files that # doxygen generates. When you want a differently looking font you can specify # the font name using DOT_FONTNAME. You need to make sure dot is able to find # the font, which can be done by putting it in a standard location or by setting # the DOTFONTPATH environment variable or by setting DOT_FONTPATH to the # directory containing the font. DOT_FONTNAME = Helvetica # The DOT_FONTSIZE tag can be used to set the size of the font of dot graphs. # The default size is 10pt. DOT_FONTSIZE = 10 # By default doxygen will tell dot to use the Helvetica font. # If you specify a different font using DOT_FONTNAME you can use DOT_FONTPATH to # set the path where dot can find it. DOT_FONTPATH = # If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen # will generate a graph for each documented class showing the direct and # indirect inheritance relations. Setting this tag to YES will force the # CLASS_DIAGRAMS tag to NO. CLASS_GRAPH = YES # If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen # will generate a graph for each documented class showing the direct and # indirect implementation dependencies (inheritance, containment, and # class references variables) of the class with other documented classes. COLLABORATION_GRAPH = YES # If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen # will generate a graph for groups, showing the direct groups dependencies GROUP_GRAPHS = YES # If the UML_LOOK tag is set to YES doxygen will generate inheritance and # collaboration diagrams in a style similar to the OMG's Unified Modeling # Language. UML_LOOK = NO # If the UML_LOOK tag is enabled, the fields and methods are shown inside # the class node. If there are many fields or methods and many nodes the # graph may become too big to be useful. The UML_LIMIT_NUM_FIELDS # threshold limits the number of items for each type to make the size more # managable. Set this to 0 for no limit. Note that the threshold may be # exceeded by 50% before the limit is enforced. UML_LIMIT_NUM_FIELDS = 10 # If set to YES, the inheritance and collaboration graphs will show the # relations between templates and their instances. TEMPLATE_RELATIONS = NO # If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT # tags are set to YES then doxygen will generate a graph for each documented # file showing the direct and indirect include dependencies of the file with # other documented files. INCLUDE_GRAPH = YES # If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and # HAVE_DOT tags are set to YES then doxygen will generate a graph for each # documented header file showing the documented files that directly or # indirectly include this file. INCLUDED_BY_GRAPH = YES # If the CALL_GRAPH and HAVE_DOT options are set to YES then # doxygen will generate a call dependency graph for every global function # or class method. Note that enabling this option will significantly increase # the time of a run. So in most cases it will be better to enable call graphs # for selected functions only using the \callgraph command. CALL_GRAPH = NO # If the CALLER_GRAPH and HAVE_DOT tags are set to YES then # doxygen will generate a caller dependency graph for every global function # or class method. Note that enabling this option will significantly increase # the time of a run. So in most cases it will be better to enable caller # graphs for selected functions only using the \callergraph command. CALLER_GRAPH = NO # If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen # will generate a graphical hierarchy of all classes instead of a textual one. GRAPHICAL_HIERARCHY = YES # If the DIRECTORY_GRAPH and HAVE_DOT tags are set to YES # then doxygen will show the dependencies a directory has on other directories # in a graphical way. The dependency relations are determined by the #include # relations between the files in the directories. DIRECTORY_GRAPH = YES # The DOT_IMAGE_FORMAT tag can be used to set the image format of the images # generated by dot. Possible values are svg, png, jpg, or gif. # If left blank png will be used. If you choose svg you need to set # HTML_FILE_EXTENSION to xhtml in order to make the SVG files # visible in IE 9+ (other browsers do not have this requirement). DOT_IMAGE_FORMAT = png # If DOT_IMAGE_FORMAT is set to svg, then this option can be set to YES to # enable generation of interactive SVG images that allow zooming and panning. # Note that this requires a modern browser other than Internet Explorer. # Tested and working are Firefox, Chrome, Safari, and Opera. For IE 9+ you # need to set HTML_FILE_EXTENSION to xhtml in order to make the SVG files # visible. Older versions of IE do not have SVG support. INTERACTIVE_SVG = NO # The tag DOT_PATH can be used to specify the path where the dot tool can be # found. If left blank, it is assumed the dot tool can be found in the path. DOT_PATH = # The DOTFILE_DIRS tag can be used to specify one or more directories that # contain dot files that are included in the documentation (see the # \dotfile command). DOTFILE_DIRS = # The MSCFILE_DIRS tag can be used to specify one or more directories that # contain msc files that are included in the documentation (see the # \mscfile command). MSCFILE_DIRS = # The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of # nodes that will be shown in the graph. If the number of nodes in a graph # becomes larger than this value, doxygen will truncate the graph, which is # visualized by representing a node as a red box. Note that doxygen if the # number of direct children of the root node in a graph is already larger than # DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note # that the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH. DOT_GRAPH_MAX_NODES = 50 # The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the # graphs generated by dot. A depth value of 3 means that only nodes reachable # from the root by following a path via at most 3 edges will be shown. Nodes # that lay further from the root node will be omitted. Note that setting this # option to 1 or 2 may greatly reduce the computation time needed for large # code bases. Also note that the size of a graph can be further restricted by # DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction. MAX_DOT_GRAPH_DEPTH = 0 # Set the DOT_TRANSPARENT tag to YES to generate images with a transparent # background. This is disabled by default, because dot on Windows does not # seem to support this out of the box. Warning: Depending on the platform used, # enabling this option may lead to badly anti-aliased labels on the edges of # a graph (i.e. they become hard to read). DOT_TRANSPARENT = NO # Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output # files in one run (i.e. multiple -o and -T options on the command line). This # makes dot run faster, but since only newer versions of dot (>1.8.10) # support this, this feature is disabled by default. DOT_MULTI_TARGETS = YES # If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will # generate a legend page explaining the meaning of the various boxes and # arrows in the dot generated graphs. GENERATE_LEGEND = YES # If the DOT_CLEANUP tag is set to YES (the default) Doxygen will # remove the intermediate dot files that are used to generate # the various graphs. DOT_CLEANUP = YES libvdpau-va-gl-0.2.1/README.md000066400000000000000000000060261224142723700156130ustar00rootroot00000000000000About ===== Briefly, this is the [VDPAU](http://en.wikipedia.org/wiki/VDPAU) driver with [VA-API](http://en.wikipedia.org/wiki/Video_Acceleration_API)/OpenGL backend. There are applications exists that can use VDPAU. Amongst them are Adobe Flash Player and Mplayer. They both can use VDPAU, but since there is no VDPAU available on Intel chips, they fall back to different drawing techniques. And while Mplayer can use XVideo extension to offload scaling to GPU, Flash Player can not and does all scaling in software. If there was VDPAU available, CPU usage could be significantly lower. VDPAU is not vendor-locked technology. Even official documentation mentions possibility of other drivers. They should be named as `libvdpau_drivername.so.1` and placed where linker could find them. `/usr/lib` usually works fine. Which driver to use is determined by asking X server about current driver name or by using `VDPAU_DRIVER` environment variable. Here is one. Named libvdpau_va_gl.so.1, it uses OpenGL under the hood to accelerate drawing and scaling and VA-API (if available) to accelerate video decoding. For now VA-API is available on some Intel chips, and on some AMD video adapters with help of [xvba-va-driver](http://cgit.freedesktop.org/vaapi/xvba-driver/). OpenGL is available, you know, on systems with OpenGL available. Install ======= 1. `sudo apt-get install cmake libvdpau-dev libva-dev libglib2.0-dev libswscale-dev libgl1-mesa-dev libglu1-mesa-dev` 2. `mkdir build; cd build` 3. `cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr ..` 4. `sudo make install` 5. Add `VDPAU_DRIVER=va_gl` to your environment Commands above should work for any Debian-based distro. Fedora names packages in a different way, so package installation step will look like: `sudo yum install cmake libvdpau-devel libva-devel glib2-devel ffmpeg-devel mesa-libGL-devel mesa-libGLU-devel`. Note you need to add rpmfusion repo to get `ffmpeg-devel`. Run time configuration ====================== Besides `VDPAU_DRIVER` variable which selects which driver to use there are other variables that control runtime behavior of va_gl driver. `VDPAU_LOG` enables or disables tracing. `0` disables, `1` enables. `VDPAU_QUIRKS` contains comma-separated list of enabled quirks. Here is the list: * `XCloseDisplay` Disables calling of XCloseDisplay which may segfault on systems with some AMD cards * `ShowWatermark` Enables displaying string "va_gl" in bottom-right corner of window * `LogThreadId` Adds thread id to trace output * `LogCallDuration` Adds call duration to trace output * `LogPqDelay` Adds presentation queue introduced delay to trace output * `LogTimestamp` Displays timestamps * `AvoidVA` Makes libvdpau-va-gl NOT use VA-API Parameters of VDPAU_QUIRKS are case-insensetive. Copying ======= libvdpau-va-gl is distributed under the terms of the LGPLv3. See files COPYING, COPYING.GPLv3, and COPYING.LGPLv3 for details. Contact ======= Author can be reached at email `ibragimovrinat-at-mail.ru` or at github: https://github.com/i-rinat/ libvdpau-va-gl-0.2.1/bitstream.c000066400000000000000000000060731224142723700164740ustar00rootroot00000000000000/* * Copyright 2013 Rinat Ibragimov * * This file is part of libvdpau-va-gl * * libvdpau-va-gl is distributed under the terms of the LGPLv3. See COPYING for details. */ #include "bitstream.h" #include #include inline void rbsp_attach_buffer(rbsp_state_t *state, const uint8_t *buf, size_t byte_count) { state->buf_ptr = buf; state->byte_count = byte_count; state->cur_ptr = buf; state->bit_ptr = 7; state->zeros_in_row = 0; state->bits_eaten = 0; } rbsp_state_t rbsp_copy_state(rbsp_state_t *state) { return *state; } inline int rbsp_navigate_to_nal_unit(rbsp_state_t *state) { int found = 1; int window[3] = {-1, -1, -1}; do { if (state->cur_ptr >= state->buf_ptr + state->byte_count) { found = 0; // no bytes left, no nal unit found break; } int c = *state->cur_ptr++; window[0] = window[1]; window[1] = window[2]; window[2] = c; } while (0 != window[0] || 0 != window[1] || 1 != window[2]); if (found) return (int)(state->cur_ptr - state->buf_ptr); return -1; } inline void rbsp_reset_bit_counter(rbsp_state_t *state) { state->bits_eaten = 0; } inline int rbsp_consume_byte(rbsp_state_t *state) { if (state->cur_ptr >= state->buf_ptr + state->byte_count) return -1; uint8_t c = *state->cur_ptr++; if (0 == c) state->zeros_in_row ++; else state->zeros_in_row = 0; if (state->zeros_in_row >= 2) { uint8_t epb = *state->cur_ptr++; if (0 != epb) state->zeros_in_row = 0; // if epb is not actually have 0x03 value, it's not an emulation prevention if (0x03 != epb) state->cur_ptr--; // so rewind } return c; } inline int rbsp_consume_bit(rbsp_state_t *state) { assert (state->cur_ptr < state->buf_ptr + state->byte_count); int value = !!(*state->cur_ptr & (1 << state->bit_ptr)); if (state->bit_ptr > 0) { state->bit_ptr --; } else { state->bit_ptr = 7; rbsp_consume_byte(state); // handles emulation prevention bytes } state->bits_eaten += 1; return value; } inline unsigned int rbsp_get_u(rbsp_state_t *state, int bitcount) { unsigned int value = 0; for (int k = 0; k < bitcount; k ++) value = (value << 1) + rbsp_consume_bit(state); return value; } inline unsigned int rbsp_get_uev(rbsp_state_t *state) { int zerobit_count = -1; int current_bit = 0; do { zerobit_count ++; current_bit = rbsp_consume_bit(state); } while (0 == current_bit); if (0 == zerobit_count) return 0; return (1 << zerobit_count) - 1 + rbsp_get_u(state, zerobit_count); } inline int rbsp_get_sev(rbsp_state_t *state) { int zerobit_count = -1; int current_bit = 0; do { zerobit_count ++; current_bit = rbsp_consume_bit(state); } while (0 == current_bit); if (0 == zerobit_count) return 0; int value = (1 << zerobit_count) + rbsp_get_u(state, zerobit_count); if (value & 1) return -value/2; return value/2; } libvdpau-va-gl-0.2.1/bitstream.h000066400000000000000000000033001224142723700164670ustar00rootroot00000000000000/* * Copyright 2013 Rinat Ibragimov * * This file is part of libvdpau-va-gl * * libvdpau-va-gl is distributed under the terms of the LGPLv3. See COPYING for details. */ #ifndef __BITSTREAM_H #define __BITSTREAM_H #include #include /** @brief State of raw byte stream payload comsumer */ typedef struct _rbsp_state_struct { const uint8_t *buf_ptr; ///< pointer to beginning of the buffer size_t byte_count; ///< size of buffer const uint8_t *cur_ptr; ///< pointer to currently processed byte int bit_ptr; ///< pointer to currently processed bit int zeros_in_row; ///< number of consequetive zero bytes so far int bits_eaten; ///< bit offset of current position not including EPB } rbsp_state_t; /** @brief Initialize rbsp state * * @param [out] state * @param [in] buf pointer to byte string * @param [in] byte_count number of bytes in @param buf * * @retval void */ void rbsp_attach_buffer(rbsp_state_t *state, const uint8_t *buf, size_t byte_count); /** @brief Consumes and returns one byte from rbsp * * This function handles emulation prevention bytes internally, without their * exposure to caller. Returns value of successfully consumed byte. */ int rbsp_consume_byte(rbsp_state_t *state); rbsp_state_t rbsp_copy_state(rbsp_state_t *state); int rbsp_navigate_to_nal_unit(rbsp_state_t *state); void rbsp_reset_bit_counter(rbsp_state_t *state); int rbsp_consume_bit(rbsp_state_t *state); unsigned int rbsp_get_u(rbsp_state_t *state, int bitcount); unsigned int rbsp_get_uev(rbsp_state_t *state); int rbsp_get_sev(rbsp_state_t *state); #endif libvdpau-va-gl-0.2.1/ctx-stack.c000066400000000000000000000104111224142723700163720ustar00rootroot00000000000000/* * Copyright 2013 Rinat Ibragimov * * This file is part of libvdpau-va-gl * * libvdpau-va-gl is distributed under the terms of the LGPLv3. See COPYING for details. */ /* * glx context stack */ #define _GNU_SOURCE #include "ctx-stack.h" #include "globals.h" #include #include "vdpau-trace.h" #include #include static __thread Display *glx_ctx_stack_display; static __thread Drawable glx_ctx_stack_wnd; static __thread GLXContext glx_ctx_stack_glc; static __thread int glx_ctx_stack_same; static __thread int glx_ctx_stack_element_count = 0; GHashTable *glc_hash_table = NULL; int glc_hash_table_ref_count = 0; GLXContext root_glc; XVisualInfo *root_vi; void glx_context_push_global(Display *dpy, Drawable wnd, GLXContext glc) { pthread_mutex_lock(&global.glx_ctx_stack_mutex); assert(0 == glx_ctx_stack_element_count); glx_ctx_stack_display = glXGetCurrentDisplay(); glx_ctx_stack_wnd = glXGetCurrentDrawable(); glx_ctx_stack_glc = glXGetCurrentContext(); glx_ctx_stack_same = 0; glx_ctx_stack_element_count ++; if (dpy == glx_ctx_stack_display && wnd == glx_ctx_stack_wnd && glc == glx_ctx_stack_glc) { // Same context. Don't call MakeCurrent. glx_ctx_stack_same = 1; } else { glx_ctx_stack_same = 0; glXMakeCurrent(dpy, wnd, glc); } } void glx_context_push_thread_local(VdpDeviceData *deviceData) { pthread_mutex_lock(&global.glx_ctx_stack_mutex); Display *dpy = deviceData->display; const Window wnd = deviceData->root; const gint thread_id = (gint) syscall(__NR_gettid); GLXContext glc = g_hash_table_lookup(glc_hash_table, GINT_TO_POINTER(thread_id)); if (!glc) { glc = glXCreateContext(dpy, root_vi, root_glc, GL_TRUE); assert(glc); g_hash_table_insert(glc_hash_table, GINT_TO_POINTER(thread_id), glc); } glx_ctx_stack_display = glXGetCurrentDisplay(); glx_ctx_stack_wnd = glXGetCurrentDrawable(); glx_ctx_stack_glc = glXGetCurrentContext(); glx_ctx_stack_element_count ++; if (dpy == glx_ctx_stack_display && wnd == glx_ctx_stack_wnd && glc == glx_ctx_stack_glc) { // Same context. Don't call MakeCurrent. glx_ctx_stack_same = 1; } else { glx_ctx_stack_same = 0; glXMakeCurrent(dpy, wnd, glc); } } void glx_context_pop() { assert(1 == glx_ctx_stack_element_count); if (!glx_ctx_stack_same) { if (glx_ctx_stack_display) glXMakeCurrent(glx_ctx_stack_display, glx_ctx_stack_wnd, glx_ctx_stack_glc); } glx_ctx_stack_element_count --; pthread_mutex_unlock(&global.glx_ctx_stack_mutex); } void glx_context_lock(void) { pthread_mutex_lock(&global.glx_ctx_stack_mutex); } void glx_context_unlock(void) { pthread_mutex_unlock(&global.glx_ctx_stack_mutex); } void glx_context_ref_glc_hash_table(Display *dpy, int screen) { pthread_mutex_lock(&global.glx_ctx_stack_mutex); if (0 == glc_hash_table_ref_count) { glc_hash_table = g_hash_table_new(g_direct_hash, g_direct_equal); glc_hash_table_ref_count = 1; GLint att[] = { GLX_RGBA, GLX_DEPTH_SIZE, 24, GLX_DOUBLEBUFFER, None }; root_vi = glXChooseVisual(dpy, screen, att); if (NULL == root_vi) { traceError("error (glx_context_ref_glc_hash_table): glXChooseVisual failed\n"); return; } root_glc = glXCreateContext(dpy, root_vi, NULL, GL_TRUE); } else { glc_hash_table_ref_count ++; } pthread_mutex_unlock(&global.glx_ctx_stack_mutex); } static void glc_hash_destroy_func(gpointer key, gpointer value, gpointer user_data) { (void)key; GLXContext glc = value; Display *dpy = user_data; glXDestroyContext(dpy, glc); } void glx_context_unref_glc_hash_table(Display *dpy) { pthread_mutex_lock(&global.glx_ctx_stack_mutex); glc_hash_table_ref_count --; if (0 == glc_hash_table_ref_count) { g_hash_table_foreach(glc_hash_table, glc_hash_destroy_func, dpy); g_hash_table_unref(glc_hash_table); glc_hash_table = NULL; glXDestroyContext(dpy, root_glc); XFree(root_vi); } pthread_mutex_unlock(&global.glx_ctx_stack_mutex); } GLXContext glx_context_get_root_context(void) { return root_glc; } libvdpau-va-gl-0.2.1/ctx-stack.h000066400000000000000000000012311224142723700163770ustar00rootroot00000000000000/* * Copyright 2013 Rinat Ibragimov * * This file is part of libvdpau-va-gl * * libvdpau-va-gl is distributed under the terms of the LGPLv3. See COPYING for details. */ #ifndef __CTX_STACK_H #define __CTX_STACK_H #include "vdpau-soft.h" void glx_context_push_global(Display *dpy, Drawable wnd, GLXContext glc); void glx_context_push_thread_local(VdpDeviceData *deviceData); void glx_context_pop(void); void glx_context_ref_glc_hash_table(Display *dpy, int screen); void glx_context_unref_glc_hash_table(Display *dpy); GLXContext glx_context_get_root_context(void); void glx_context_lock(void); void glx_context_unlock(void); #endif /* __CTX_STACK_H */ libvdpau-va-gl-0.2.1/globals.c000066400000000000000000000003431224142723700161170ustar00rootroot00000000000000/* * Copyright 2013 Rinat Ibragimov * * This file is part of libvdpau-va-gl * * libvdpau-va-gl is distributed under the terms of the LGPLv3. See COPYING for details. */ #include "globals.h" struct global_data global; libvdpau-va-gl-0.2.1/globals.h000066400000000000000000000022301224142723700161210ustar00rootroot00000000000000/* * Copyright 2013 Rinat Ibragimov * * This file is part of libvdpau-va-gl * * libvdpau-va-gl is distributed under the terms of the LGPLv3. See COPYING for details. */ #ifndef __GLOBALS_H #define __GLOBALS_H #include /** @brief place where all shared global variables live */ struct global_data { pthread_mutex_t glx_ctx_stack_mutex; ///< mutex for GLX context management functions /** @brief tunables */ struct { int buggy_XCloseDisplay; ///< avoid calling XCloseDisplay int show_watermark; ///< show picture over output int log_thread_id; ///< include thread id into the log output int log_call_duration; ///< measure call duration int log_pq_delay; ///< measure delay between queueing and displaying presentation ///< queue introduces int log_timestamp; ///< display timestamps int avoid_va; ///< do not use VA-API video decoding acceleration even if ///< available } quirks; }; extern struct global_data global; #endif /* __GLOBALS_H */ libvdpau-va-gl-0.2.1/h264-parse.c000066400000000000000000000557471224142723700163110ustar00rootroot00000000000000/* * Copyright 2013 Rinat Ibragimov * * This file is part of libvdpau-va-gl * * libvdpau-va-gl is distributed under the terms of the LGPLv3. See COPYING for details. */ #define _GNU_SOURCE #include #include #include #include "h264-parse.h" #define NOT_IMPLEMENTED(str) assert(0 && "not implemented" && str) #define DESCRIBE(xparam, format) fprintf(stderr, #xparam " = %" #format "\n", xparam) struct slice_parameters { int nal_ref_idc; int nal_unit_type; int first_mb_in_slice; int slice_type; int pic_parameter_set_id; int frame_num; int field_pic_flag; int bottom_field_flag; int idr_pic_id; int pic_order_cnt_lsb; int delta_pic_order_cnt_bottom; int delta_pic_order_cnt[2]; int redundant_pic_cnt; int direct_spatial_mv_pred_flag; int num_ref_idx_active_override_flag; int num_ref_idx_l0_active_minus1; int num_ref_idx_l1_active_minus1; int luma_log2_weight_denom; int chroma_log2_weight_denom; unsigned int luma_weight_l0_flag; int luma_weight_l0[32]; int luma_offset_l0[32]; unsigned int chroma_weight_l0_flag; int chroma_weight_l0[32][2]; int chroma_offset_l0[32][2]; unsigned int luma_weight_l1_flag; int luma_weight_l1[32]; int luma_offset_l1[32]; unsigned int chroma_weight_l1_flag; int chroma_weight_l1[32][2]; int chroma_offset_l1[32][2]; unsigned int no_output_of_prior_pics_flag; unsigned int long_term_reference_flag; unsigned int cabac_init_idc; int slice_qp_delta; unsigned int sp_for_switch_flag; int slice_qs_delta; unsigned int disable_deblocking_filter_idc; int slice_alpha_c0_offset_div2; int slice_beta_offset_div2; VAPictureH264 RefPicList0[32]; VAPictureH264 RefPicList1[32]; }; static void parse_ref_pic_list_modification(rbsp_state_t *st, const VAPictureParameterBufferH264 *vapp, struct slice_parameters *sp); static void parse_pred_weight_table(rbsp_state_t *st, const int ChromaArrayType, struct slice_parameters *sp); static void parse_dec_ref_pic_marking(rbsp_state_t *st, struct slice_parameters *sp); static void do_fill_va_slice_parameter_buffer(struct slice_parameters const * const sp, VASliceParameterBufferH264 *vasp, int bit_offset) { vasp->slice_data_bit_offset = bit_offset; vasp->first_mb_in_slice = sp->first_mb_in_slice; vasp->slice_type = sp->slice_type; vasp->direct_spatial_mv_pred_flag = sp->direct_spatial_mv_pred_flag; vasp->num_ref_idx_l0_active_minus1 = sp->num_ref_idx_l0_active_minus1; vasp->num_ref_idx_l1_active_minus1 = sp->num_ref_idx_l1_active_minus1; vasp->cabac_init_idc = sp->cabac_init_idc; vasp->slice_qp_delta = sp->slice_qp_delta; vasp->disable_deblocking_filter_idc = sp->disable_deblocking_filter_idc; vasp->slice_alpha_c0_offset_div2 = sp->slice_alpha_c0_offset_div2; vasp->slice_beta_offset_div2 = sp->slice_beta_offset_div2; for (int k = 0; k < 32; k ++) { vasp->RefPicList0[k] = sp->RefPicList0[k]; vasp->RefPicList1[k] = sp->RefPicList1[k]; } vasp->luma_log2_weight_denom = sp->luma_log2_weight_denom; vasp->chroma_log2_weight_denom = sp->chroma_log2_weight_denom; vasp->luma_weight_l0_flag = sp->luma_weight_l0_flag; for (int k = 0; k < 32; k ++) vasp->luma_weight_l0[k] = sp->luma_weight_l0[k]; for (int k = 0; k < 32; k ++) vasp->luma_offset_l0[k] = sp->luma_offset_l0[k]; vasp->chroma_weight_l0_flag = sp->chroma_weight_l0_flag; for (int k = 0; k < 32; k ++) vasp->chroma_weight_l0[k][0] = sp->chroma_weight_l0[k][0]; for (int k = 0; k < 32; k ++) vasp->chroma_weight_l0[k][1] = sp->chroma_weight_l0[k][1]; for (int k = 0; k < 32; k ++) vasp->chroma_offset_l0[k][0] = sp->chroma_offset_l0[k][0]; for (int k = 0; k < 32; k ++) vasp->chroma_offset_l0[k][1] = sp->chroma_offset_l0[k][1]; vasp->luma_weight_l1_flag = sp->luma_weight_l1_flag; for (int k = 0; k < 32; k ++) vasp->luma_weight_l1[k] = sp->luma_weight_l1[k]; for (int k = 0; k < 32; k ++) vasp->luma_offset_l1[k] = sp->luma_offset_l1[k]; vasp->chroma_weight_l1_flag = sp->chroma_weight_l1_flag; for (int k = 0; k < 32; k ++) vasp->chroma_weight_l1[k][0] = sp->chroma_weight_l1[k][0]; for (int k = 0; k < 32; k ++) vasp->chroma_weight_l1[k][1] = sp->chroma_weight_l1[k][1]; for (int k = 0; k < 32; k ++) vasp->chroma_offset_l1[k][0] = sp->chroma_offset_l1[k][0]; for (int k = 0; k < 32; k ++) vasp->chroma_offset_l1[k][1] = sp->chroma_offset_l1[k][1]; } void reset_va_picture_h264(VAPictureH264 *p) { p->picture_id = VA_INVALID_SURFACE; p->frame_idx = 0; p->flags = VA_PICTURE_H264_INVALID; p->TopFieldOrderCnt = 0; p->BottomFieldOrderCnt = 0; } static int comparison_function_1(const void *p1, const void *p2, void *context) { const int idx_1 = *(const int *)p1; const int idx_2 = *(const int *)p2; struct { int descending; int what; const VAPictureH264 *ReferenceFrames; } *ctx = context; int value1 = 0, value2 = 0; switch (ctx->what) { case 1: // top field value1 = ctx->ReferenceFrames[idx_1].TopFieldOrderCnt; value2 = ctx->ReferenceFrames[idx_2].TopFieldOrderCnt; break; case 2: // bottom field value1 = ctx->ReferenceFrames[idx_1].BottomFieldOrderCnt; value2 = ctx->ReferenceFrames[idx_2].BottomFieldOrderCnt; break; case 3: // frame_idx value1 = ctx->ReferenceFrames[idx_1].frame_idx; value2 = ctx->ReferenceFrames[idx_2].frame_idx; break; default: assert(0 && "wrong what field"); } int result; if (value1 < value2) result = -1; else if (value1 > value2) result = 1; else result = 0; if (ctx->descending) return -result; return result; } static void fill_ref_pic_list(struct slice_parameters *sp, const VAPictureParameterBufferH264 *vapp) { int idcs_asc[32], idcs_desc[32]; struct { int descending; int what; const VAPictureH264 *ReferenceFrames; } ctx; if (SLICE_TYPE_I == sp->slice_type || SLICE_TYPE_SI == sp->slice_type) return; ctx.ReferenceFrames = vapp->ReferenceFrames; int frame_count = 0; for (int k = 0; k < vapp->num_ref_frames; k ++) { if (vapp->ReferenceFrames[k].flags & VA_PICTURE_H264_INVALID) continue; sp->RefPicList0[frame_count] = vapp->ReferenceFrames[k]; idcs_asc[frame_count] = idcs_desc[frame_count] = k; frame_count ++; } if (SLICE_TYPE_P == sp->slice_type || SLICE_TYPE_SP == sp->slice_type) { // TODO: implement interlaced P slices ctx.what = 1; ctx.descending = 0; qsort_r(idcs_asc, frame_count, sizeof(idcs_asc[0]), &comparison_function_1, &ctx); ctx.descending = 1; qsort_r(idcs_desc, frame_count, sizeof(idcs_desc[0]), &comparison_function_1, &ctx); int ptr = 0; for (int k = 0; k < frame_count; k ++) if (vapp->ReferenceFrames[idcs_desc[k]].flags & VA_PICTURE_H264_SHORT_TERM_REFERENCE) sp->RefPicList0[ptr++] = vapp->ReferenceFrames[idcs_desc[k]]; for (int k = 0; k < frame_count; k ++) if (vapp->ReferenceFrames[idcs_asc[k]].flags & VA_PICTURE_H264_LONG_TERM_REFERENCE) sp->RefPicList0[ptr++] = vapp->ReferenceFrames[idcs_asc[k]]; } else if (SLICE_TYPE_B == sp->slice_type && !vapp->pic_fields.bits.field_pic_flag) { ctx.what = 1; ctx.descending = 0; qsort_r(idcs_asc, frame_count, sizeof(idcs_asc[0]), &comparison_function_1, &ctx); ctx.descending = 1; qsort_r(idcs_desc, frame_count, sizeof(idcs_desc[0]), &comparison_function_1, &ctx); int ptr0 = 0; int ptr1 = 0; for (int k = 0; k < frame_count; k ++) { const VAPictureH264 *rf = &vapp->ReferenceFrames[idcs_desc[k]]; if (rf->flags & VA_PICTURE_H264_SHORT_TERM_REFERENCE) if (rf->TopFieldOrderCnt < vapp->CurrPic.TopFieldOrderCnt) sp->RefPicList0[ptr0++] = *rf; rf = &vapp->ReferenceFrames[idcs_asc[k]]; if (rf->flags & VA_PICTURE_H264_SHORT_TERM_REFERENCE) if (rf->TopFieldOrderCnt >= vapp->CurrPic.TopFieldOrderCnt) sp->RefPicList1[ptr1++] = *rf; } for (int k = 0; k < frame_count; k ++) { const VAPictureH264 *rf = &vapp->ReferenceFrames[idcs_asc[k]]; if (rf->flags & VA_PICTURE_H264_SHORT_TERM_REFERENCE) if (rf->TopFieldOrderCnt >= vapp->CurrPic.TopFieldOrderCnt) sp->RefPicList0[ptr0++] = *rf; rf = &vapp->ReferenceFrames[idcs_desc[k]]; if (rf->flags & VA_PICTURE_H264_SHORT_TERM_REFERENCE) if (rf->TopFieldOrderCnt < vapp->CurrPic.TopFieldOrderCnt) sp->RefPicList1[ptr1++] = *rf; } for (int k = 0; k < frame_count; k ++) { const VAPictureH264 *rf = &vapp->ReferenceFrames[idcs_asc[k]]; if (rf->flags & VA_PICTURE_H264_LONG_TERM_REFERENCE) { sp->RefPicList0[ptr0++] = *rf; sp->RefPicList1[ptr1++] = *rf; } } } else { // TODO: implement interlaced B slices assert(0 && "not implemeted: interlaced SLICE_TYPE_B sorting"); } } void parse_slice_header(rbsp_state_t *st, const VAPictureParameterBufferH264 *vapp, const int ChromaArrayType, unsigned int p_num_ref_idx_l0_active_minus1, unsigned int p_num_ref_idx_l1_active_minus1, VASliceParameterBufferH264 *vasp) { struct slice_parameters sp; for (int k = 0; k < 32; k ++) { reset_va_picture_h264(&sp.RefPicList0[k]); reset_va_picture_h264(&sp.RefPicList1[k]); } rbsp_get_u(st, 1); // forbidden_zero_bit sp.nal_ref_idc = rbsp_get_u(st, 2); sp.nal_unit_type = rbsp_get_u(st, 5); if (14 == sp.nal_unit_type || 20 == sp.nal_unit_type) { NOT_IMPLEMENTED("nal unit types 14 and 20"); } sp.first_mb_in_slice = rbsp_get_uev(st); sp.slice_type = rbsp_get_uev(st); if (sp.slice_type > 4) sp.slice_type -= 5; // wrap 5-9 to 0-4 // as now we know slice_type, time to fill RefPicListX fill_ref_pic_list(&sp, vapp); sp.pic_parameter_set_id = rbsp_get_uev(st); // TODO: separate_colour_plane_flag is 0 for all but YUV444. Now ok, but should detect properly. // See 7.3.3 sp.frame_num = rbsp_get_u(st, vapp->seq_fields.bits.log2_max_frame_num_minus4 + 4); sp.field_pic_flag = 0; sp.bottom_field_flag = 0; if (!vapp->seq_fields.bits.frame_mbs_only_flag) { sp.field_pic_flag = rbsp_get_u(st, 1); if (sp.field_pic_flag) { sp.bottom_field_flag = rbsp_get_u(st, 1); } } sp.idr_pic_id = 0; if (NAL_IDR_SLICE == sp.nal_unit_type) // IDR picture sp.idr_pic_id = rbsp_get_uev(st); sp.pic_order_cnt_lsb = 0; sp.delta_pic_order_cnt_bottom = 0; if (0 == vapp->seq_fields.bits.pic_order_cnt_type) { sp.pic_order_cnt_lsb = rbsp_get_u(st, vapp->seq_fields.bits.log2_max_pic_order_cnt_lsb_minus4 + 4); if (vapp->pic_fields.bits.pic_order_present_flag && !vapp->pic_fields.bits.field_pic_flag) { sp.delta_pic_order_cnt_bottom = rbsp_get_sev(st); } } sp.delta_pic_order_cnt[0] = sp.delta_pic_order_cnt[1] = 0; if (1 == vapp->seq_fields.bits.pic_order_cnt_type && !vapp->seq_fields.bits.delta_pic_order_always_zero_flag) { sp.delta_pic_order_cnt[0] = rbsp_get_sev(st); if (vapp->pic_fields.bits.pic_order_present_flag && !vapp->pic_fields.bits.field_pic_flag) sp.delta_pic_order_cnt[1] = rbsp_get_sev(st); } sp.redundant_pic_cnt = 0; if (vapp->pic_fields.bits.redundant_pic_cnt_present_flag) sp.redundant_pic_cnt = rbsp_get_uev(st); sp.direct_spatial_mv_pred_flag = 0; if (SLICE_TYPE_B == sp.slice_type) sp.direct_spatial_mv_pred_flag = rbsp_get_u(st, 1); sp.num_ref_idx_active_override_flag = 0; sp.num_ref_idx_l0_active_minus1 = 0; sp.num_ref_idx_l1_active_minus1 = 0; if (SLICE_TYPE_P == sp.slice_type || SLICE_TYPE_SP == sp.slice_type || SLICE_TYPE_B == sp.slice_type) { sp.num_ref_idx_l0_active_minus1 = p_num_ref_idx_l0_active_minus1; if (SLICE_TYPE_P != sp.slice_type) sp.num_ref_idx_l1_active_minus1 = p_num_ref_idx_l1_active_minus1; sp.num_ref_idx_active_override_flag = rbsp_get_u(st, 1); if (sp.num_ref_idx_active_override_flag) { sp.num_ref_idx_l0_active_minus1 = rbsp_get_uev(st); if (SLICE_TYPE_B == sp.slice_type) sp.num_ref_idx_l1_active_minus1 = rbsp_get_uev(st); } } if (20 == sp.nal_unit_type) { NOT_IMPLEMENTED("nal unit type 20"); } else { parse_ref_pic_list_modification(st, vapp, &sp); } // here fields {luma,chroma}_weight_l{0,1}_flag differ from same-named flags from // H.264 recommendation. Each of those flags should be set to 1 if any of // weight tables differ from default sp.luma_weight_l0_flag = 0; sp.luma_weight_l1_flag = 0; sp.chroma_weight_l0_flag = 0; sp.chroma_weight_l1_flag = 0; if ((vapp->pic_fields.bits.weighted_pred_flag && (SLICE_TYPE_P == sp.slice_type || SLICE_TYPE_SP == sp.slice_type)) || (1 == vapp->pic_fields.bits.weighted_bipred_idc && SLICE_TYPE_B == sp.slice_type)) { parse_pred_weight_table(st, ChromaArrayType, &sp); } if (sp.nal_ref_idc != 0) { parse_dec_ref_pic_marking(st, &sp); } sp.cabac_init_idc = 0; if (vapp->pic_fields.bits.entropy_coding_mode_flag && SLICE_TYPE_I != sp.slice_type && SLICE_TYPE_SI != sp.slice_type) sp.cabac_init_idc = rbsp_get_uev(st); sp.slice_qp_delta = rbsp_get_sev(st); sp.sp_for_switch_flag = 0; sp.slice_qs_delta = 0; if (SLICE_TYPE_SP == sp.slice_type || SLICE_TYPE_SI == sp.slice_type) { if (SLICE_TYPE_SP == sp.slice_type) sp.sp_for_switch_flag = rbsp_get_u(st, 1); sp.slice_qs_delta = rbsp_get_sev(st); } sp.disable_deblocking_filter_idc = 0; sp.slice_alpha_c0_offset_div2 = 0; sp.slice_beta_offset_div2 = 0; if (vapp->pic_fields.bits.deblocking_filter_control_present_flag) { sp.disable_deblocking_filter_idc = rbsp_get_uev(st); if (1 != sp.disable_deblocking_filter_idc) { sp.slice_alpha_c0_offset_div2 = rbsp_get_sev(st); sp.slice_beta_offset_div2 = rbsp_get_sev(st); } } if (vapp->num_slice_groups_minus1 > 0 && vapp->slice_group_map_type >= 3 && vapp->slice_group_map_type <= 5) { NOT_IMPLEMENTED("don't know what length to consume\n"); } do_fill_va_slice_parameter_buffer(&sp, vasp, st->bits_eaten); } static void parse_ref_pic_list_modification(rbsp_state_t *st, const VAPictureParameterBufferH264 *vapp, struct slice_parameters *sp) { const int MaxFrameNum = 1 << (vapp->seq_fields.bits.log2_max_frame_num_minus4 + 4); const int MaxPicNum = (vapp->pic_fields.bits.field_pic_flag) ? 2*MaxFrameNum : MaxFrameNum; if (2 != sp->slice_type && 4 != sp->slice_type) { int ref_pic_list_modification_flag_l0 = rbsp_get_u(st, 1); if (ref_pic_list_modification_flag_l0) { int modification_of_pic_nums_idc; int refIdxL0 = 0; unsigned int picNumL0 = vapp->frame_num; do { modification_of_pic_nums_idc = rbsp_get_uev(st); if (modification_of_pic_nums_idc < 2) { int abs_diff_pic_num_minus1 = rbsp_get_uev(st); if (0 == modification_of_pic_nums_idc) { picNumL0 -= (abs_diff_pic_num_minus1 + 1); } else { // 1 == modification_of_pic_nums_idc picNumL0 += (abs_diff_pic_num_minus1 + 1); } // wrap picNumL0 picNumL0 &= (MaxPicNum - 1); // there is no need to subtract MaxPicNum as in (8-36) in 8.2.4.3.1 // because frame_num already wrapped int j; for (j = 0; j < vapp->num_ref_frames; j ++) { if (vapp->ReferenceFrames[j].flags & VA_PICTURE_H264_INVALID) continue; if (vapp->ReferenceFrames[j].frame_idx == picNumL0 && (vapp->ReferenceFrames[j].flags & VA_PICTURE_H264_SHORT_TERM_REFERENCE)) break; } assert (j < vapp->num_ref_frames); VAPictureH264 swp = vapp->ReferenceFrames[j]; for (int k = sp->num_ref_idx_l0_active_minus1; k > refIdxL0; k --) sp->RefPicList0[k] = sp->RefPicList0[k-1]; sp->RefPicList0[refIdxL0 ++] = swp; j = refIdxL0; for (int k = refIdxL0; k <= sp->num_ref_idx_l0_active_minus1 + 1; k ++) { if (sp->RefPicList0[k].frame_idx != picNumL0 && (sp->RefPicList0[k].flags & VA_PICTURE_H264_SHORT_TERM_REFERENCE)) sp->RefPicList0[j++] = sp->RefPicList0[k]; } } else if (2 == modification_of_pic_nums_idc) { NOT_IMPLEMENTED("long"); fprintf(stderr, "long_term_pic_num = %d\n", rbsp_get_uev(st)); } } while (modification_of_pic_nums_idc != 3); } } if (1 == sp->slice_type) { int ref_pic_list_modification_flag_l1 = rbsp_get_u(st, 1); if (ref_pic_list_modification_flag_l1) { NOT_IMPLEMENTED("ref pic list modification 1"); // TODO: implement this int modification_of_pic_nums_idc; do { modification_of_pic_nums_idc = rbsp_get_uev(st); if (0 == modification_of_pic_nums_idc || 1 == modification_of_pic_nums_idc) { fprintf(stderr, "abs_diff_pic_num_minus1 = %d\n", rbsp_get_uev(st)); } else if (2 == modification_of_pic_nums_idc) { fprintf(stderr, "long_term_pic_num = %d\n", rbsp_get_uev(st)); } } while (modification_of_pic_nums_idc != 3); } } } static void fill_default_pred_weight_table(struct slice_parameters *sp) { const int default_luma_weight = (1 << sp->luma_log2_weight_denom); const int default_chroma_weight = (1 << sp->chroma_log2_weight_denom); for (int k = 0; k < sp->num_ref_idx_l0_active_minus1 + 1; k ++) { sp->luma_weight_l0[k] = default_luma_weight; sp->luma_offset_l0[k] = 0; sp->chroma_weight_l0[k][0] = sp->chroma_weight_l0[k][1] = default_chroma_weight; sp->chroma_offset_l0[k][0] = sp->chroma_offset_l0[k][1] = 0; } for (int k = 0; k < sp->num_ref_idx_l1_active_minus1 + 1; k ++) { sp->luma_weight_l1[k] = default_luma_weight; sp->luma_offset_l1[k] = 0; sp->chroma_weight_l1[k][0] = sp->chroma_weight_l1[k][1] = default_chroma_weight; sp->chroma_offset_l1[k][0] = sp->chroma_offset_l1[k][1] = 0; } } static void parse_pred_weight_table(rbsp_state_t *st, const int ChromaArrayType, struct slice_parameters *sp) { sp->luma_log2_weight_denom = rbsp_get_uev(st); sp->chroma_log2_weight_denom = 0; if (0 != ChromaArrayType) sp->chroma_log2_weight_denom = rbsp_get_uev(st); fill_default_pred_weight_table(sp); const int default_luma_weight = (1 << sp->luma_log2_weight_denom); const int default_chroma_weight = (1 << sp->chroma_log2_weight_denom); for (int k = 0; k <= sp->num_ref_idx_l0_active_minus1; k ++) { int luma_weight_l0_flag = rbsp_get_u(st, 1); if (luma_weight_l0_flag) { sp->luma_weight_l0[k] = rbsp_get_sev(st); sp->luma_offset_l0[k] = rbsp_get_sev(st); if (default_luma_weight != sp->luma_weight_l0[k]) sp->luma_weight_l0_flag = 1; } if (0 != ChromaArrayType) { int chroma_weight_l0_flag = rbsp_get_u(st, 1); if (chroma_weight_l0_flag) { for (int j = 0; j < 2; j ++) { sp->chroma_weight_l0[k][j] = rbsp_get_sev(st); sp->chroma_offset_l0[k][j] = rbsp_get_sev(st); if (default_chroma_weight != sp->chroma_weight_l0[k][j]) sp->chroma_weight_l0_flag = 1; } } } } if (1 == sp->slice_type) { for (int k = 0; k <= sp->num_ref_idx_l1_active_minus1; k ++) { int luma_weight_l1_flag = rbsp_get_u(st, 1); if (luma_weight_l1_flag) { sp->luma_weight_l1[k] = rbsp_get_sev(st); sp->luma_offset_l1[k] = rbsp_get_sev(st); if (default_luma_weight != sp->luma_weight_l1[k]) sp->luma_weight_l1_flag = 1; } if (0 != ChromaArrayType) { int chroma_weight_l1_flag = rbsp_get_u(st, 1); if (chroma_weight_l1_flag) { for (int j = 0; j < 2; j ++) { sp->chroma_weight_l1[k][j] = rbsp_get_sev(st); sp->chroma_offset_l1[k][j] = rbsp_get_sev(st); if (default_chroma_weight != sp->chroma_weight_l1[k][j]) sp->chroma_weight_l1_flag = 1; } } } } } } static void parse_dec_ref_pic_marking(rbsp_state_t *st, struct slice_parameters *sp) { if (NAL_IDR_SLICE == sp->nal_unit_type) { sp->no_output_of_prior_pics_flag = rbsp_get_u(st, 1); sp->long_term_reference_flag = rbsp_get_u(st, 1); } else { int adaptive_ref_pic_marking_mode_flag = rbsp_get_u(st, 1); if (adaptive_ref_pic_marking_mode_flag) { // no need to do any action, just consume bits. All management should be done // on client side int memory_management_control_operation; do { memory_management_control_operation = rbsp_get_uev(st); if (1 == memory_management_control_operation || 3 == memory_management_control_operation) { rbsp_get_uev(st); // difference_of_pic_nums_minus1 } if (2 == memory_management_control_operation) { rbsp_get_uev(st); // long_term_pic_num } if (3 == memory_management_control_operation || 6 == memory_management_control_operation) { rbsp_get_uev(st); // long_term_frame_idx } if (4 == memory_management_control_operation) { rbsp_get_uev(st); // max_long_term_frame_idx_plus1 } } while (memory_management_control_operation != 0); } } } libvdpau-va-gl-0.2.1/h264-parse.h000066400000000000000000000016151224142723700162770ustar00rootroot00000000000000/* * Copyright 2013 Rinat Ibragimov * * This file is part of libvdpau-va-gl * * libvdpau-va-gl is distributed under the terms of the LGPLv3. See COPYING for details. */ #ifndef __H264_PARSE_H #define __H264_PARSE_H #include #include "bitstream.h" #define SLICE_TYPE_P 0 #define SLICE_TYPE_B 1 #define SLICE_TYPE_I 2 #define SLICE_TYPE_SP 3 #define SLICE_TYPE_SI 4 #define NAL_UNSPECIFIED 0 #define NAL_SLICE 1 #define NAL_SLICE_DATA_A 2 #define NAL_SLICE_DATA_B 3 #define NAL_SLICE_DATA_C 4 #define NAL_IDR_SLICE 5 void parse_slice_header(rbsp_state_t *st, const VAPictureParameterBufferH264 *vapp, const int ChromaArrayType, unsigned int p_num_ref_idx_l0_active_minus1, unsigned int p_num_ref_idx_l1_active_minus1, VASliceParameterBufferH264 *vasp); void reset_va_picture_h264(VAPictureH264 *p); #endif libvdpau-va-gl-0.2.1/handle-storage.c000066400000000000000000000117561224142723700174030ustar00rootroot00000000000000/* * Copyright 2013 Rinat Ibragimov * * This file is part of libvdpau-va-gl * * libvdpau-va-gl is distributed under the terms of the LGPLv3. See COPYING for details. */ #define _XOPEN_SOURCE 500 #include "handle-storage.h" #include #include #include static GHashTable *vdp_handles; static GHashTable *xdpy_copies; //< Copies of X Display connections static GHashTable *xdpy_copies_refcount; //< Reference count of X Display connection copy static uint32_t next_handle_id; static pthread_mutex_t lock = PTHREAD_MUTEX_INITIALIZER; void handle_initialize_storage(void) { pthread_mutex_lock(&lock); vdp_handles = g_hash_table_new(g_direct_hash, g_direct_equal); next_handle_id = 1; xdpy_copies = g_hash_table_new(g_direct_hash, g_direct_equal); xdpy_copies_refcount = g_hash_table_new(g_direct_hash, g_direct_equal); pthread_mutex_unlock(&lock); } int handle_insert(void *data) { int id; pthread_mutex_lock(&lock); while (g_hash_table_lookup(vdp_handles, GINT_TO_POINTER(next_handle_id))) next_handle_id ++; id = next_handle_id ++; g_hash_table_insert(vdp_handles, GINT_TO_POINTER(id), data); pthread_mutex_unlock(&lock); return id; } // lock unsafe function static int _is_valid(int handle, HandleType type) { VdpGenericHandle *gh; gh = g_hash_table_lookup(vdp_handles, GINT_TO_POINTER(handle)); if (!gh) return 0; // return true if caller wants any handle type if (HANDLETYPE_ANY == type) return 1; // check handle type if (gh->type == type) return 1; return 0; } void * handle_acquire(int handle, HandleType type) { VdpGenericHandle *res = NULL; while (1) { pthread_mutex_lock(&lock); if (!_is_valid(handle, type)) break; res = g_hash_table_lookup(vdp_handles, GINT_TO_POINTER(handle)); if (pthread_mutex_trylock(&res->lock) == 0) break; pthread_mutex_unlock(&lock); usleep(1); } pthread_mutex_unlock(&lock); return res; } void handle_release(int handle) { pthread_mutex_lock(&lock); VdpGenericHandle *gh = g_hash_table_lookup(vdp_handles, GINT_TO_POINTER(handle)); if (gh) pthread_mutex_unlock(&gh->lock); pthread_mutex_unlock(&lock); } void handle_expunge(int handle) { pthread_mutex_lock(&lock); if (_is_valid(handle, HANDLETYPE_ANY)) { VdpGenericHandle *gh = g_hash_table_lookup(vdp_handles, GINT_TO_POINTER(handle)); if (gh) pthread_mutex_unlock(&gh->lock); g_hash_table_remove(vdp_handles, GINT_TO_POINTER(handle)); } pthread_mutex_unlock(&lock); } void handle_destory_storage(void) { pthread_mutex_lock(&lock); g_hash_table_unref(vdp_handles); vdp_handles = NULL; g_hash_table_unref(xdpy_copies); xdpy_copies = NULL; g_hash_table_unref(xdpy_copies_refcount); xdpy_copies_refcount = NULL; pthread_mutex_unlock(&lock); } void handle_execute_for_all(void (*callback)(int idx, void *entry, void *p), void *param) { pthread_mutex_lock(&lock); GList *tmp = g_hash_table_get_keys(vdp_handles); GList *keys = g_list_copy(tmp); g_list_free(tmp); GList *ptr = g_list_first(keys); while (ptr) { HandleType handle = GPOINTER_TO_INT(ptr->data); void *item = g_hash_table_lookup(vdp_handles, GINT_TO_POINTER(handle)); if (item) { pthread_mutex_unlock(&lock); // TODO: race condition. Supply integer handle instead of pointer to fix. callback(handle, item, param); pthread_mutex_lock(&lock); } ptr = g_list_next(ptr); } g_list_free(keys); pthread_mutex_unlock(&lock); } void * handle_xdpy_ref(void *dpy_orig) { pthread_mutex_lock(&lock); Display *dpy = g_hash_table_lookup(xdpy_copies, dpy_orig); if (NULL == dpy) { dpy = XOpenDisplay(XDisplayString(dpy_orig)); if (!dpy) goto quit; g_hash_table_replace(xdpy_copies, dpy_orig, dpy); g_hash_table_replace(xdpy_copies_refcount, dpy_orig, GINT_TO_POINTER(1)); } else { int refcount = GPOINTER_TO_INT(g_hash_table_lookup(xdpy_copies_refcount, dpy_orig)); g_hash_table_replace(xdpy_copies_refcount, dpy_orig, GINT_TO_POINTER(refcount+1)); } quit: pthread_mutex_unlock(&lock); return dpy; } void handle_xdpy_unref(void *dpy_orig) { pthread_mutex_lock(&lock); int refcount = GPOINTER_TO_INT(g_hash_table_lookup(xdpy_copies_refcount, dpy_orig)); refcount = refcount - 1; if (0 == refcount) { // do close connection, nobody refers it anymore Display *dpy = g_hash_table_lookup(xdpy_copies, dpy_orig); XCloseDisplay(dpy); g_hash_table_remove(xdpy_copies, dpy_orig); g_hash_table_remove(xdpy_copies_refcount, dpy_orig); } else { // just update refcount g_hash_table_replace(xdpy_copies_refcount, dpy_orig, GINT_TO_POINTER(refcount)); } pthread_mutex_unlock(&lock); } libvdpau-va-gl-0.2.1/handle-storage.h000066400000000000000000000037551224142723700174100ustar00rootroot00000000000000/* * Copyright 2013 Rinat Ibragimov * * This file is part of libvdpau-va-gl * * libvdpau-va-gl is distributed under the terms of the LGPLv3. See COPYING for details. */ #pragma once #ifndef HANDLE_STORAGE_H_ #define HANDLE_STORAGE_H_ #include #include typedef uint32_t HandleType; #define HANDLETYPE_ANY (HandleType)0 #define HANDLETYPE_DEVICE (HandleType)1 #define HANDLETYPE_PRESENTATION_QUEUE_TARGET (HandleType)2 #define HANDLETYPE_PRESENTATION_QUEUE (HandleType)3 #define HANDLETYPE_VIDEO_MIXER (HandleType)4 #define HANDLETYPE_OUTPUT_SURFACE (HandleType)5 #define HANDLETYPE_VIDEO_SURFACE (HandleType)6 #define HANDLETYPE_BITMAP_SURFACE (HandleType)7 #define HANDLETYPE_DECODER (HandleType)8 /** @brief Generic handle struct. Every other handle struct has same members at same place so it's possible to use type casting to determine handle type and parent. */ typedef struct { HandleType type; ///< handle type void *parent; ///< link to parent pthread_mutex_t lock; } VdpGenericHandle; void handle_initialize_storage(void); int handle_insert(void *data); void *handle_acquire(int handle, HandleType type); void handle_release(int handle); void handle_expunge(int handle); void handle_destory_storage(void); void handle_execute_for_all(void (*callback)(int idx, void *entry, void *p), void *param); void *handle_xdpy_ref(void *dpy_orig); void handle_xdpy_unref(void *dpy_orig); static inline void free_list_push(int32_t *free_list, int32_t *free_list_head, int32_t value) { free_list[value] = *free_list_head; *free_list_head = value; } static inline int32_t free_list_pop(int32_t *free_list, int32_t *free_list_head) { int32_t value = *free_list_head; if (value >= 0) *free_list_head = free_list[value]; return value; } #endif /* HANDLE_STORAGE_H_ */ libvdpau-va-gl-0.2.1/reverse-constant.c000066400000000000000000000530521224142723700200030ustar00rootroot00000000000000/* * Copyright 2013 Rinat Ibragimov * * This file is part of libvdpau-va-gl * * libvdpau-va-gl is distributed under the terms of the LGPLv3. See COPYING for details. */ #include "reverse-constant.h" #include #include const char * reverse_func_id(VdpFuncId func_id) { switch (func_id) { case VDP_FUNC_ID_GET_ERROR_STRING: return "VDP_FUNC_ID_GET_ERROR_STRING"; case VDP_FUNC_ID_GET_PROC_ADDRESS: return "VDP_FUNC_ID_GET_PROC_ADDRESS"; case VDP_FUNC_ID_GET_API_VERSION: return "VDP_FUNC_ID_GET_API_VERSION"; case VDP_FUNC_ID_GET_INFORMATION_STRING: return "VDP_FUNC_ID_GET_INFORMATION_STRING"; case VDP_FUNC_ID_DEVICE_DESTROY: return "VDP_FUNC_ID_DEVICE_DESTROY"; case VDP_FUNC_ID_GENERATE_CSC_MATRIX: return "VDP_FUNC_ID_GENERATE_CSC_MATRIX"; case VDP_FUNC_ID_VIDEO_SURFACE_QUERY_CAPABILITIES: return "VDP_FUNC_ID_VIDEO_SURFACE_QUERY_CAPABILITIES"; case VDP_FUNC_ID_VIDEO_SURFACE_QUERY_GET_PUT_BITS_Y_CB_CR_CAPABILITIES: return "VDP_FUNC_ID_VIDEO_SURFACE_QUERY_GET_PUT_BITS_Y_CB_CR_CAPABILITIES"; case VDP_FUNC_ID_VIDEO_SURFACE_CREATE: return "VDP_FUNC_ID_VIDEO_SURFACE_CREATE"; case VDP_FUNC_ID_VIDEO_SURFACE_DESTROY: return "VDP_FUNC_ID_VIDEO_SURFACE_DESTROY"; case VDP_FUNC_ID_VIDEO_SURFACE_GET_PARAMETERS: return "VDP_FUNC_ID_VIDEO_SURFACE_GET_PARAMETERS"; case VDP_FUNC_ID_VIDEO_SURFACE_GET_BITS_Y_CB_CR: return "VDP_FUNC_ID_VIDEO_SURFACE_GET_BITS_Y_CB_CR"; case VDP_FUNC_ID_VIDEO_SURFACE_PUT_BITS_Y_CB_CR: return "VDP_FUNC_ID_VIDEO_SURFACE_PUT_BITS_Y_CB_CR"; case VDP_FUNC_ID_OUTPUT_SURFACE_QUERY_CAPABILITIES: return "VDP_FUNC_ID_OUTPUT_SURFACE_QUERY_CAPABILITIES"; case VDP_FUNC_ID_OUTPUT_SURFACE_QUERY_GET_PUT_BITS_NATIVE_CAPABILITIES: return "VDP_FUNC_ID_OUTPUT_SURFACE_QUERY_GET_PUT_BITS_NATIVE_CAPABILITIES"; case VDP_FUNC_ID_OUTPUT_SURFACE_QUERY_PUT_BITS_INDEXED_CAPABILITIES: return "VDP_FUNC_ID_OUTPUT_SURFACE_QUERY_PUT_BITS_INDEXED_CAPABILITIES"; case VDP_FUNC_ID_OUTPUT_SURFACE_QUERY_PUT_BITS_Y_CB_CR_CAPABILITIES: return "VDP_FUNC_ID_OUTPUT_SURFACE_QUERY_PUT_BITS_Y_CB_CR_CAPABILITIES"; case VDP_FUNC_ID_OUTPUT_SURFACE_CREATE: return "VDP_FUNC_ID_OUTPUT_SURFACE_CREATE"; case VDP_FUNC_ID_OUTPUT_SURFACE_DESTROY: return "VDP_FUNC_ID_OUTPUT_SURFACE_DESTROY"; case VDP_FUNC_ID_OUTPUT_SURFACE_GET_PARAMETERS: return "VDP_FUNC_ID_OUTPUT_SURFACE_GET_PARAMETERS"; case VDP_FUNC_ID_OUTPUT_SURFACE_GET_BITS_NATIVE: return "VDP_FUNC_ID_OUTPUT_SURFACE_GET_BITS_NATIVE"; case VDP_FUNC_ID_OUTPUT_SURFACE_PUT_BITS_NATIVE: return "VDP_FUNC_ID_OUTPUT_SURFACE_PUT_BITS_NATIVE"; case VDP_FUNC_ID_OUTPUT_SURFACE_PUT_BITS_INDEXED: return "VDP_FUNC_ID_OUTPUT_SURFACE_PUT_BITS_INDEXED"; case VDP_FUNC_ID_OUTPUT_SURFACE_PUT_BITS_Y_CB_CR: return "VDP_FUNC_ID_OUTPUT_SURFACE_PUT_BITS_Y_CB_CR"; case VDP_FUNC_ID_BITMAP_SURFACE_QUERY_CAPABILITIES: return "VDP_FUNC_ID_BITMAP_SURFACE_QUERY_CAPABILITIES"; case VDP_FUNC_ID_BITMAP_SURFACE_CREATE: return "VDP_FUNC_ID_BITMAP_SURFACE_CREATE"; case VDP_FUNC_ID_BITMAP_SURFACE_DESTROY: return "VDP_FUNC_ID_BITMAP_SURFACE_DESTROY"; case VDP_FUNC_ID_BITMAP_SURFACE_GET_PARAMETERS: return "VDP_FUNC_ID_BITMAP_SURFACE_GET_PARAMETERS"; case VDP_FUNC_ID_BITMAP_SURFACE_PUT_BITS_NATIVE: return "VDP_FUNC_ID_BITMAP_SURFACE_PUT_BITS_NATIVE"; case VDP_FUNC_ID_OUTPUT_SURFACE_RENDER_OUTPUT_SURFACE: return "VDP_FUNC_ID_OUTPUT_SURFACE_RENDER_OUTPUT_SURFACE"; case VDP_FUNC_ID_OUTPUT_SURFACE_RENDER_BITMAP_SURFACE: return "VDP_FUNC_ID_OUTPUT_SURFACE_RENDER_BITMAP_SURFACE"; case VDP_FUNC_ID_OUTPUT_SURFACE_RENDER_VIDEO_SURFACE_LUMA: return "VDP_FUNC_ID_OUTPUT_SURFACE_RENDER_VIDEO_SURFACE_LUMA"; case VDP_FUNC_ID_DECODER_QUERY_CAPABILITIES: return "VDP_FUNC_ID_DECODER_QUERY_CAPABILITIES"; case VDP_FUNC_ID_DECODER_CREATE: return "VDP_FUNC_ID_DECODER_CREATE"; case VDP_FUNC_ID_DECODER_DESTROY: return "VDP_FUNC_ID_DECODER_DESTROY"; case VDP_FUNC_ID_DECODER_GET_PARAMETERS: return "VDP_FUNC_ID_DECODER_GET_PARAMETERS"; case VDP_FUNC_ID_DECODER_RENDER: return "VDP_FUNC_ID_DECODER_RENDER"; case VDP_FUNC_ID_VIDEO_MIXER_QUERY_FEATURE_SUPPORT: return "VDP_FUNC_ID_VIDEO_MIXER_QUERY_FEATURE_SUPPORT"; case VDP_FUNC_ID_VIDEO_MIXER_QUERY_PARAMETER_SUPPORT: return "VDP_FUNC_ID_VIDEO_MIXER_QUERY_PARAMETER_SUPPORT"; case VDP_FUNC_ID_VIDEO_MIXER_QUERY_ATTRIBUTE_SUPPORT: return "VDP_FUNC_ID_VIDEO_MIXER_QUERY_ATTRIBUTE_SUPPORT"; case VDP_FUNC_ID_VIDEO_MIXER_QUERY_PARAMETER_VALUE_RANGE: return "VDP_FUNC_ID_VIDEO_MIXER_QUERY_PARAMETER_VALUE_RANGE"; case VDP_FUNC_ID_VIDEO_MIXER_QUERY_ATTRIBUTE_VALUE_RANGE: return "VDP_FUNC_ID_VIDEO_MIXER_QUERY_ATTRIBUTE_VALUE_RANGE"; case VDP_FUNC_ID_VIDEO_MIXER_CREATE: return "VDP_FUNC_ID_VIDEO_MIXER_CREATE"; case VDP_FUNC_ID_VIDEO_MIXER_SET_FEATURE_ENABLES: return "VDP_FUNC_ID_VIDEO_MIXER_SET_FEATURE_ENABLES"; case VDP_FUNC_ID_VIDEO_MIXER_SET_ATTRIBUTE_VALUES: return "VDP_FUNC_ID_VIDEO_MIXER_SET_ATTRIBUTE_VALUES"; case VDP_FUNC_ID_VIDEO_MIXER_GET_FEATURE_SUPPORT: return "VDP_FUNC_ID_VIDEO_MIXER_GET_FEATURE_SUPPORT"; case VDP_FUNC_ID_VIDEO_MIXER_GET_FEATURE_ENABLES: return "VDP_FUNC_ID_VIDEO_MIXER_GET_FEATURE_ENABLES"; case VDP_FUNC_ID_VIDEO_MIXER_GET_PARAMETER_VALUES: return "VDP_FUNC_ID_VIDEO_MIXER_GET_PARAMETER_VALUES"; case VDP_FUNC_ID_VIDEO_MIXER_GET_ATTRIBUTE_VALUES: return "VDP_FUNC_ID_VIDEO_MIXER_GET_ATTRIBUTE_VALUES"; case VDP_FUNC_ID_VIDEO_MIXER_DESTROY: return "VDP_FUNC_ID_VIDEO_MIXER_DESTROY"; case VDP_FUNC_ID_VIDEO_MIXER_RENDER: return "VDP_FUNC_ID_VIDEO_MIXER_RENDER"; case VDP_FUNC_ID_PRESENTATION_QUEUE_TARGET_DESTROY: return "VDP_FUNC_ID_PRESENTATION_QUEUE_TARGET_DESTROY"; case VDP_FUNC_ID_PRESENTATION_QUEUE_CREATE: return "VDP_FUNC_ID_PRESENTATION_QUEUE_CREATE"; case VDP_FUNC_ID_PRESENTATION_QUEUE_DESTROY: return "VDP_FUNC_ID_PRESENTATION_QUEUE_DESTROY"; case VDP_FUNC_ID_PRESENTATION_QUEUE_SET_BACKGROUND_COLOR: return "VDP_FUNC_ID_PRESENTATION_QUEUE_SET_BACKGROUND_COLOR"; case VDP_FUNC_ID_PRESENTATION_QUEUE_GET_BACKGROUND_COLOR: return "VDP_FUNC_ID_PRESENTATION_QUEUE_GET_BACKGROUND_COLOR"; case VDP_FUNC_ID_PRESENTATION_QUEUE_GET_TIME: return "VDP_FUNC_ID_PRESENTATION_QUEUE_GET_TIME"; case VDP_FUNC_ID_PRESENTATION_QUEUE_DISPLAY: return "VDP_FUNC_ID_PRESENTATION_QUEUE_DISPLAY"; case VDP_FUNC_ID_PRESENTATION_QUEUE_BLOCK_UNTIL_SURFACE_IDLE: return "VDP_FUNC_ID_PRESENTATION_QUEUE_BLOCK_UNTIL_SURFACE_IDLE"; case VDP_FUNC_ID_PRESENTATION_QUEUE_QUERY_SURFACE_STATUS: return "VDP_FUNC_ID_PRESENTATION_QUEUE_QUERY_SURFACE_STATUS"; case VDP_FUNC_ID_PREEMPTION_CALLBACK_REGISTER: return "VDP_FUNC_ID_PREEMPTION_CALLBACK_REGISTER"; case VDP_FUNC_ID_PRESENTATION_QUEUE_TARGET_CREATE_X11: return "VDP_FUNC_ID_PRESENTATION_QUEUE_TARGET_CREATE_X11"; default: return "Unknown"; } } const char * reverse_video_mixer_feature(VdpVideoMixerFeature mixer_feature) { switch (mixer_feature) { case VDP_VIDEO_MIXER_FEATURE_DEINTERLACE_TEMPORAL: return "VDP_VIDEO_MIXER_FEATURE_DEINTERLACE_TEMPORAL"; case VDP_VIDEO_MIXER_FEATURE_DEINTERLACE_TEMPORAL_SPATIAL: return "VDP_VIDEO_MIXER_FEATURE_DEINTERLACE_TEMPORAL_SPATIAL"; case VDP_VIDEO_MIXER_FEATURE_INVERSE_TELECINE: return "VDP_VIDEO_MIXER_FEATURE_INVERSE_TELECINE"; case VDP_VIDEO_MIXER_FEATURE_NOISE_REDUCTION: return "VDP_VIDEO_MIXER_FEATURE_NOISE_REDUCTION"; case VDP_VIDEO_MIXER_FEATURE_SHARPNESS: return "VDP_VIDEO_MIXER_FEATURE_SHARPNESS"; case VDP_VIDEO_MIXER_FEATURE_LUMA_KEY: return "VDP_VIDEO_MIXER_FEATURE_LUMA_KEY"; case VDP_VIDEO_MIXER_FEATURE_HIGH_QUALITY_SCALING_L1: return "VDP_VIDEO_MIXER_FEATURE_HIGH_QUALITY_SCALING_L1"; case VDP_VIDEO_MIXER_FEATURE_HIGH_QUALITY_SCALING_L2: return "VDP_VIDEO_MIXER_FEATURE_HIGH_QUALITY_SCALING_L2"; case VDP_VIDEO_MIXER_FEATURE_HIGH_QUALITY_SCALING_L3: return "VDP_VIDEO_MIXER_FEATURE_HIGH_QUALITY_SCALING_L3"; case VDP_VIDEO_MIXER_FEATURE_HIGH_QUALITY_SCALING_L4: return "VDP_VIDEO_MIXER_FEATURE_HIGH_QUALITY_SCALING_L4"; case VDP_VIDEO_MIXER_FEATURE_HIGH_QUALITY_SCALING_L5: return "VDP_VIDEO_MIXER_FEATURE_HIGH_QUALITY_SCALING_L5"; case VDP_VIDEO_MIXER_FEATURE_HIGH_QUALITY_SCALING_L6: return "VDP_VIDEO_MIXER_FEATURE_HIGH_QUALITY_SCALING_L6"; case VDP_VIDEO_MIXER_FEATURE_HIGH_QUALITY_SCALING_L7: return "VDP_VIDEO_MIXER_FEATURE_HIGH_QUALITY_SCALING_L7"; case VDP_VIDEO_MIXER_FEATURE_HIGH_QUALITY_SCALING_L8: return "VDP_VIDEO_MIXER_FEATURE_HIGH_QUALITY_SCALING_L8"; case VDP_VIDEO_MIXER_FEATURE_HIGH_QUALITY_SCALING_L9: return "VDP_VIDEO_MIXER_FEATURE_HIGH_QUALITY_SCALING_L9"; default: return "Unknown video mixer feature"; } } const char * reverse_video_mixer_attribute(VdpVideoMixerAttribute attr) { switch (attr) { case VDP_VIDEO_MIXER_ATTRIBUTE_BACKGROUND_COLOR: return "VDP_VIDEO_MIXER_ATTRIBUTE_BACKGROUND_COLOR"; case VDP_VIDEO_MIXER_ATTRIBUTE_CSC_MATRIX: return "VDP_VIDEO_MIXER_ATTRIBUTE_CSC_MATRIX"; case VDP_VIDEO_MIXER_ATTRIBUTE_NOISE_REDUCTION_LEVEL: return "VDP_VIDEO_MIXER_ATTRIBUTE_NOISE_REDUCTION_LEVEL"; case VDP_VIDEO_MIXER_ATTRIBUTE_SHARPNESS_LEVEL: return "VDP_VIDEO_MIXER_ATTRIBUTE_SHARPNESS_LEVEL"; case VDP_VIDEO_MIXER_ATTRIBUTE_LUMA_KEY_MIN_LUMA: return "VDP_VIDEO_MIXER_ATTRIBUTE_LUMA_KEY_MIN_LUMA"; case VDP_VIDEO_MIXER_ATTRIBUTE_LUMA_KEY_MAX_LUMA: return "VDP_VIDEO_MIXER_ATTRIBUTE_LUMA_KEY_MAX_LUMA"; case VDP_VIDEO_MIXER_ATTRIBUTE_SKIP_CHROMA_DEINTERLACE: return "VDP_VIDEO_MIXER_ATTRIBUTE_SKIP_CHROMA_DEINTERLACE"; default: return "Unknown video mixer attribute"; } } const char * reverse_rgba_format(VdpRGBAFormat rgba_format) { switch (rgba_format) { case VDP_RGBA_FORMAT_B8G8R8A8: return "VDP_RGBA_FORMAT_B8G8R8A8"; case VDP_RGBA_FORMAT_R8G8B8A8: return "VDP_RGBA_FORMAT_R8G8B8A8"; case VDP_RGBA_FORMAT_R10G10B10A2: return "VDP_RGBA_FORMAT_R10G10B10A2"; case VDP_RGBA_FORMAT_B10G10R10A2: return "VDP_RGBA_FORMAT_B10G10R10A2"; case VDP_RGBA_FORMAT_A8: return "VDP_RGBA_FORMAT_A8"; default: return "Unknown RGBA format"; } } const char * reverse_chroma_type(VdpChromaType chroma_type) { switch (chroma_type) { case VDP_CHROMA_TYPE_420: return "VDP_CHROMA_TYPE_420"; case VDP_CHROMA_TYPE_422: return "VDP_CHROMA_TYPE_422"; case VDP_CHROMA_TYPE_444: return "VDP_CHROMA_TYPE_444"; default: return "Unknown chroma type"; } } const char * reverse_ycbcr_format(VdpYCbCrFormat ycbcr_format) { switch (ycbcr_format) { case VDP_YCBCR_FORMAT_NV12: return "VDP_YCBCR_FORMAT_NV12"; case VDP_YCBCR_FORMAT_YV12: return "VDP_YCBCR_FORMAT_YV12"; case VDP_YCBCR_FORMAT_UYVY: return "VDP_YCBCR_FORMAT_UYVY"; case VDP_YCBCR_FORMAT_YUYV: return "VDP_YCBCR_FORMAT_YUYV"; case VDP_YCBCR_FORMAT_Y8U8V8A8: return "VDP_YCBCR_FORMAT_Y8U8V8A8"; case VDP_YCBCR_FORMAT_V8U8Y8A8: return "VDP_YCBCR_FORMAT_V8U8Y8A8"; default: return "Unknown YCbCr format"; } } const char * reverser_video_mixer_picture_structure(VdpVideoMixerPictureStructure s) { switch (s) { case VDP_VIDEO_MIXER_PICTURE_STRUCTURE_TOP_FIELD: return "VDP_VIDEO_MIXER_PICTURE_STRUCTURE_TOP_FIELD"; case VDP_VIDEO_MIXER_PICTURE_STRUCTURE_BOTTOM_FIELD: return "VDP_VIDEO_MIXER_PICTURE_STRUCTURE_BOTTOM_FIELD"; case VDP_VIDEO_MIXER_PICTURE_STRUCTURE_FRAME: return "VDP_VIDEO_MIXER_PICTURE_STRUCTURE_FRAME"; default: return "Unknown video mixer picture structure"; } } const char * reverse_blend_factor(VdpOutputSurfaceRenderBlendFactor blend_factor) { switch (blend_factor) { case VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ZERO: return "VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ZERO"; case VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ONE: return "VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ONE"; case VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_SRC_COLOR: return "VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_SRC_COLOR"; case VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ONE_MINUS_SRC_COLOR: return "VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ONE_MINUS_SRC_COLOR"; case VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_SRC_ALPHA: return "VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_SRC_ALPHA"; case VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA: return "VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA"; case VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_DST_ALPHA: return "VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_DST_ALPHA"; case VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ONE_MINUS_DST_ALPHA: return "VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ONE_MINUS_DST_ALPHA"; case VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_DST_COLOR: return "VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_DST_COLOR"; case VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ONE_MINUS_DST_COLOR: return "VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ONE_MINUS_DST_COLOR"; case VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_SRC_ALPHA_SATURATE: return "VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_SRC_ALPHA_SATURATE"; case VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_CONSTANT_COLOR: return "VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_CONSTANT_COLOR"; case VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR: return "VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR"; case VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_CONSTANT_ALPHA: return "VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_CONSTANT_ALPHA"; case VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA: return "VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA"; default: return "Unknown blend factor"; } } const char * reverse_blend_equation(VdpOutputSurfaceRenderBlendEquation blend_equation) { switch (blend_equation) { case VDP_OUTPUT_SURFACE_RENDER_BLEND_EQUATION_SUBTRACT: return "VDP_OUTPUT_SURFACE_RENDER_BLEND_EQUATION_SUBTRACT"; case VDP_OUTPUT_SURFACE_RENDER_BLEND_EQUATION_REVERSE_SUBTRACT: return "VDP_OUTPUT_SURFACE_RENDER_BLEND_EQUATION_REVERSE_SUBTRACT"; case VDP_OUTPUT_SURFACE_RENDER_BLEND_EQUATION_ADD: return "VDP_OUTPUT_SURFACE_RENDER_BLEND_EQUATION_ADD"; case VDP_OUTPUT_SURFACE_RENDER_BLEND_EQUATION_MIN: return "VDP_OUTPUT_SURFACE_RENDER_BLEND_EQUATION_MIN"; case VDP_OUTPUT_SURFACE_RENDER_BLEND_EQUATION_MAX: return "VDP_OUTPUT_SURFACE_RENDER_BLEND_EQUATION_MAX"; default: return "Unknown blend equation"; } } const char * reverse_decoder_profile(VdpDecoderProfile profile) { switch (profile) { case VDP_DECODER_PROFILE_MPEG1: return "VDP_DECODER_PROFILE_MPEG1"; case VDP_DECODER_PROFILE_MPEG2_SIMPLE: return "VDP_DECODER_PROFILE_MPEG2_SIMPLE"; case VDP_DECODER_PROFILE_MPEG2_MAIN: return "VDP_DECODER_PROFILE_MPEG2_MAIN"; case VDP_DECODER_PROFILE_H264_BASELINE: return "VDP_DECODER_PROFILE_H264_BASELINE"; case VDP_DECODER_PROFILE_H264_MAIN: return "VDP_DECODER_PROFILE_H264_MAIN"; case VDP_DECODER_PROFILE_H264_HIGH: return "VDP_DECODER_PROFILE_H264_HIGH"; case VDP_DECODER_PROFILE_VC1_SIMPLE: return "VDP_DECODER_PROFILE_VC1_SIMPLE"; case VDP_DECODER_PROFILE_VC1_MAIN: return "VDP_DECODER_PROFILE_VC1_MAIN"; case VDP_DECODER_PROFILE_VC1_ADVANCED: return "VDP_DECODER_PROFILE_VC1_ADVANCED"; case VDP_DECODER_PROFILE_MPEG4_PART2_SP: return "VDP_DECODER_PROFILE_MPEG4_PART2_SP"; case VDP_DECODER_PROFILE_MPEG4_PART2_ASP: return "VDP_DECODER_PROFILE_MPEG4_PART2_ASP"; case VDP_DECODER_PROFILE_DIVX4_QMOBILE: return "VDP_DECODER_PROFILE_DIVX4_QMOBILE"; case VDP_DECODER_PROFILE_DIVX4_MOBILE: return "VDP_DECODER_PROFILE_DIVX4_MOBILE"; case VDP_DECODER_PROFILE_DIVX4_HOME_THEATER: return "VDP_DECODER_PROFILE_DIVX4_HOME_THEATER"; case VDP_DECODER_PROFILE_DIVX4_HD_1080P: return "VDP_DECODER_PROFILE_DIVX4_HD_1080P"; case VDP_DECODER_PROFILE_DIVX5_QMOBILE: return "VDP_DECODER_PROFILE_DIVX5_QMOBILE"; case VDP_DECODER_PROFILE_DIVX5_MOBILE: return "VDP_DECODER_PROFILE_DIVX5_MOBILE"; case VDP_DECODER_PROFILE_DIVX5_HOME_THEATER: return "VDP_DECODER_PROFILE_DIVX5_HOME_THEATER"; case VDP_DECODER_PROFILE_DIVX5_HD_1080P: return "VDP_DECODER_PROFILE_DIVX5_HD_1080P"; default: return "Unknown decoder profile"; } } const char * reverse_status(VdpStatus status) { switch (status) { case VDP_STATUS_OK: return "VDP_STATUS_OK"; case VDP_STATUS_NO_IMPLEMENTATION: return "VDP_STATUS_NO_IMPLEMENTATION"; case VDP_STATUS_DISPLAY_PREEMPTED: return "VDP_STATUS_DISPLAY_PREEMPTED"; case VDP_STATUS_INVALID_HANDLE: return "VDP_STATUS_INVALID_HANDLE"; case VDP_STATUS_INVALID_POINTER: return "VDP_STATUS_INVALID_POINTER"; case VDP_STATUS_INVALID_CHROMA_TYPE: return "VDP_STATUS_INVALID_CHROMA_TYPE"; case VDP_STATUS_INVALID_Y_CB_CR_FORMAT: return "VDP_STATUS_INVALID_Y_CB_CR_FORMAT"; case VDP_STATUS_INVALID_RGBA_FORMAT: return "VDP_STATUS_INVALID_RGBA_FORMAT"; case VDP_STATUS_INVALID_INDEXED_FORMAT: return "VDP_STATUS_INVALID_INDEXED_FORMAT"; case VDP_STATUS_INVALID_COLOR_STANDARD: return "VDP_STATUS_INVALID_COLOR_STANDARD"; case VDP_STATUS_INVALID_COLOR_TABLE_FORMAT: return "VDP_STATUS_INVALID_COLOR_TABLE_FORMAT"; case VDP_STATUS_INVALID_BLEND_FACTOR: return "VDP_STATUS_INVALID_BLEND_FACTOR"; case VDP_STATUS_INVALID_BLEND_EQUATION: return "VDP_STATUS_INVALID_BLEND_EQUATION"; case VDP_STATUS_INVALID_FLAG: return "VDP_STATUS_INVALID_FLAG"; case VDP_STATUS_INVALID_DECODER_PROFILE: return "VDP_STATUS_INVALID_DECODER_PROFILE"; case VDP_STATUS_INVALID_VIDEO_MIXER_FEATURE: return "VDP_STATUS_INVALID_VIDEO_MIXER_FEATURE"; case VDP_STATUS_INVALID_VIDEO_MIXER_PARAMETER: return "VDP_STATUS_INVALID_VIDEO_MIXER_PARAMETER"; case VDP_STATUS_INVALID_VIDEO_MIXER_ATTRIBUTE: return "VDP_STATUS_INVALID_VIDEO_MIXER_ATTRIBUTE"; case VDP_STATUS_INVALID_VIDEO_MIXER_PICTURE_STRUCTURE: return "VDP_STATUS_INVALID_VIDEO_MIXER_PICTURE_STRUCTURE"; case VDP_STATUS_INVALID_FUNC_ID: return "VDP_STATUS_INVALID_FUNC_ID"; case VDP_STATUS_INVALID_SIZE: return "VDP_STATUS_INVALID_SIZE"; case VDP_STATUS_INVALID_VALUE: return "VDP_STATUS_INVALID_VALUE"; case VDP_STATUS_INVALID_STRUCT_VERSION: return "VDP_STATUS_INVALID_STRUCT_VERSION"; case VDP_STATUS_RESOURCES: return "VDP_STATUS_RESOURCES"; case VDP_STATUS_HANDLE_DEVICE_MISMATCH: return "VDP_STATUS_HANDLE_DEVICE_MISMATCH"; case VDP_STATUS_ERROR: return "VDP_STATUS_ERROR"; default: return "Unknown VDP error"; } } const char * reverse_indexed_format(VdpIndexedFormat indexed_format) { switch (indexed_format) { case VDP_INDEXED_FORMAT_A4I4: return "VDP_INDEXED_FORMAT_A4I4"; case VDP_INDEXED_FORMAT_I4A4: return "VDP_INDEXED_FORMAT_I4A4"; case VDP_INDEXED_FORMAT_A8I8: return "VDP_INDEXED_FORMAT_A8I8"; case VDP_INDEXED_FORMAT_I8A8: return "VDP_INDEXED_FORMAT_I8A8"; default: return "Unknown indexed format"; } } const char * reverse_color_table_format(VdpColorTableFormat color_table_format) { switch (color_table_format) { case VDP_COLOR_TABLE_FORMAT_B8G8R8X8: return "VDP_COLOR_TABLE_FORMAT_B8G8R8X8"; default: return "Unknown color table format"; } } const char * reverse_video_mixer_parameter(VdpVideoMixerParameter parameter) { switch (parameter) { case VDP_VIDEO_MIXER_PARAMETER_VIDEO_SURFACE_WIDTH: return "VDP_VIDEO_MIXER_PARAMETER_VIDEO_SURFACE_WIDTH"; case VDP_VIDEO_MIXER_PARAMETER_VIDEO_SURFACE_HEIGHT: return "VDP_VIDEO_MIXER_PARAMETER_VIDEO_SURFACE_HEIGHT"; case VDP_VIDEO_MIXER_PARAMETER_CHROMA_TYPE: return "VDP_VIDEO_MIXER_PARAMETER_CHROMA_TYPE"; case VDP_VIDEO_MIXER_PARAMETER_LAYERS: return "VDP_VIDEO_MIXER_PARAMETER_LAYERS"; default: return "Unknown video mixer parameter"; } } const char * reverse_color_standard(VdpColorStandard color_standard) { switch (color_standard) { case VDP_COLOR_STANDARD_ITUR_BT_601: return "VDP_COLOR_STANDARD_ITUR_BT_601"; case VDP_COLOR_STANDARD_ITUR_BT_709: return "VDP_COLOR_STANDARD_ITUR_BT_709"; case VDP_COLOR_STANDARD_SMPTE_240M: return "VDP_COLOR_STANDARD_SMPTE_240M"; default: return "Unknown color standard"; } } const char * reverse_output_surface_render_rotate(int flags) { switch (flags & 3) { case VDP_OUTPUT_SURFACE_RENDER_ROTATE_0: return "VDP_OUTPUT_SURFACE_RENDER_ROTATE_0"; case VDP_OUTPUT_SURFACE_RENDER_ROTATE_90: return "VDP_OUTPUT_SURFACE_RENDER_ROTATE_90"; case VDP_OUTPUT_SURFACE_RENDER_ROTATE_180: return "VDP_OUTPUT_SURFACE_RENDER_ROTATE_180"; case VDP_OUTPUT_SURFACE_RENDER_ROTATE_270: return "VDP_OUTPUT_SURFACE_RENDER_ROTATE_270"; default: return "Unknown render rotate"; } } libvdpau-va-gl-0.2.1/reverse-constant.h000066400000000000000000000026201224142723700200030ustar00rootroot00000000000000/* * Copyright 2013 Rinat Ibragimov * * This file is part of libvdpau-va-gl * * libvdpau-va-gl is distributed under the terms of the LGPLv3. See COPYING for details. */ #pragma once #ifndef REVERSE_CONSTANT_H_ #define REVERSE_CONSTANT_H_ #include const char * reverse_status(VdpStatus status); const char * reverse_func_id(VdpFuncId func_id); const char * reverse_video_mixer_feature(VdpVideoMixerFeature mixer_feature); const char * reverse_video_mixer_parameter(VdpVideoMixerParameter parameter); const char * reverse_video_mixer_attribute(VdpVideoMixerAttribute attr); const char * reverse_rgba_format(VdpRGBAFormat rgba_format); const char * reverse_chroma_type(VdpChromaType chroma_type); const char * reverse_ycbcr_format(VdpYCbCrFormat ycbcr_format); const char * reverser_video_mixer_picture_structure(VdpVideoMixerPictureStructure s); const char * reverse_blend_factor(VdpOutputSurfaceRenderBlendFactor blend_factor); const char * reverse_blend_equation(VdpOutputSurfaceRenderBlendEquation blend_equation); const char * reverse_decoder_profile(VdpDecoderProfile profile); const char * reverse_indexed_format(VdpIndexedFormat indexed_format); const char * reverse_color_table_format(VdpColorTableFormat color_table_format); const char * reverse_color_standard(VdpColorStandard color_standard); const char * reverse_output_surface_render_rotate(int flags); #endif /* REVERSE_CONSTANT_H_ */ libvdpau-va-gl-0.2.1/tests/000077500000000000000000000000001224142723700154725ustar00rootroot00000000000000libvdpau-va-gl-0.2.1/tests/CMakeLists.txt000066400000000000000000000014231224142723700202320ustar00rootroot00000000000000cmake_minimum_required(VERSION 2.8) project(tests-for-libvdpau-va-gl) include_directories(..) find_package(X11 REQUIRED) pkg_check_modules(VDPAU vdpau REQUIRED) link_libraries(${X11_LIBRARIES} ${VDPAU_LIBRARIES} -lpthread) link_directories(${X11_LIBRARY_DIRS} ${VDPAU_LIBRARY_DIRS}) list(APPEND _vdpau_tests test-001 test-002 test-003 test-004 test-005 test-006 test-007 test-008 test-009 test-010) list(APPEND _all_tests test-000 ${_vdpau_tests}) add_executable(test-000 EXCLUDE_FROM_ALL test-000.c ../bitstream.c) foreach(_test ${_vdpau_tests}) add_executable(${_test} EXCLUDE_FROM_ALL "${_test}.c" vdpau-init.c) endforeach(_test) foreach(_test ${_all_tests}) add_test(${_test} ${CMAKE_CURRENT_BINARY_DIR}/${_test}) add_dependencies(build-tests ${_test}) endforeach(_test) libvdpau-va-gl-0.2.1/tests/test-000.c000066400000000000000000000040551224142723700171160ustar00rootroot00000000000000#ifdef NDEBUG #undef NDEBUG #endif #include "bitstream.h" #include #include int main(void) { unsigned char buf[] = {0xa6, 0x42, 0x98, 0xe2, 0x3f}; rbsp_state_t st; rbsp_attach_buffer(&st, buf, 5); assert (0 == rbsp_get_uev(&st)); assert (1 == rbsp_get_uev(&st)); assert (2 == rbsp_get_uev(&st)); assert (3 == rbsp_get_uev(&st)); assert (4 == rbsp_get_uev(&st)); assert (5 == rbsp_get_uev(&st)); assert (6 == rbsp_get_uev(&st)); assert (7 == rbsp_get_uev(&st)); assert (0 == rbsp_get_uev(&st)); assert (0 == rbsp_get_uev(&st)); rbsp_attach_buffer(&st, buf, 5); assert (1 == rbsp_get_u(&st, 1)); assert (2 == rbsp_get_u(&st, 3)); assert (3 == rbsp_get_u(&st, 3)); assert (4 == rbsp_get_u(&st, 5)); assert (5 == rbsp_get_u(&st, 5)); assert (6 == rbsp_get_u(&st, 5)); assert (7 == rbsp_get_u(&st, 5)); assert (8 == rbsp_get_u(&st, 7)); assert (1 == rbsp_get_u(&st, 1)); rbsp_attach_buffer(&st, buf, 5); assert ( 0 == rbsp_get_sev(&st)); assert ( 1 == rbsp_get_sev(&st)); assert (-1 == rbsp_get_sev(&st)); assert ( 2 == rbsp_get_sev(&st)); assert (-2 == rbsp_get_sev(&st)); assert ( 3 == rbsp_get_sev(&st)); assert (-3 == rbsp_get_sev(&st)); assert ( 4 == rbsp_get_sev(&st)); assert ( 0 == rbsp_get_sev(&st)); assert ( 0 == rbsp_get_sev(&st)); unsigned char buf2[] = {0x00, 0x00, 0x03, 0x00, 0x00, 0x03, 0x00, 0x00}; rbsp_attach_buffer(&st, buf2, 8); for (int k = 0; k < 6 * 8; k ++) { assert (0 == rbsp_get_u(&st, 1)); } unsigned char buf3[] = {0x00, 0x00, 0x03, 0xff, 0xff}; rbsp_attach_buffer(&st, buf3, 5); for (int k = 0; k < 16; k ++) assert (0 == rbsp_get_u(&st, 1)); for (int k = 0; k < 16; k ++) assert (1 == rbsp_get_u(&st, 1)); unsigned char buf4[] = {0x00, 0x00, 0x00, 0x03, 0xff}; rbsp_attach_buffer(&st, buf4, 5); for (int k = 0; k < 24; k ++) assert (0 == rbsp_get_u(&st, 1)); for (int k = 0; k < 8; k ++) assert (1 == rbsp_get_u(&st, 1)); printf ("pass\n"); } libvdpau-va-gl-0.2.1/tests/test-001.c000066400000000000000000000114271224142723700171200ustar00rootroot00000000000000// Create two output surfaces (B8G8R8A8) of 4x4, fill first with opaque black // and second with black and two red dots (opaque too). // Render second into first. Check that red dots do not get smoothed. // The dot at (1, 1) checks for smoothing, one at (3,3) checks for edge condition. #include #include #include #include #include "vdpau-init.h" int main(void) { VdpDevice device; VdpStatus st = vdpau_init_functions(&device, NULL, 0); assert (VDP_STATUS_OK == st); VdpOutputSurface out_surface_1; VdpOutputSurface out_surface_2; ASSERT_OK(vdp_output_surface_create(device, VDP_RGBA_FORMAT_B8G8R8A8, 4, 4, &out_surface_1)); ASSERT_OK(vdp_output_surface_create(device, VDP_RGBA_FORMAT_B8G8R8A8, 4, 4, &out_surface_2)); uint32_t black_box[] = { 0xff000000, 0xff000000, 0xff000000, 0xff000000, 0xff000000, 0xff000000, 0xff000000, 0xff000000, 0xff000000, 0xff000000, 0xff000000, 0xff000000, 0xff000000, 0xff000000, 0xff000000, 0xff000000 }; uint32_t two_red_dots[] = { 0xff000000, 0xff000000, 0xff000000, 0xff000000, 0xff000000, 0xffff0000, 0xff000000, 0xff000000, 0xff000000, 0xff000000, 0xff000000, 0xff000000, 0xff000000, 0xff000000, 0xff000000, 0xffff0000 }; const void * const source_data_1[] = {black_box}; const void * const source_data_2[] = {two_red_dots}; uint32_t source_pitches[] = { 4 * 4 }; // upload data ASSERT_OK(vdp_output_surface_put_bits_native(out_surface_1, source_data_1, source_pitches, NULL)); ASSERT_OK(vdp_output_surface_put_bits_native(out_surface_2, source_data_2, source_pitches, NULL)); // render VdpOutputSurfaceRenderBlendState blend_state = { .struct_version = VDP_OUTPUT_SURFACE_RENDER_BLEND_STATE_VERSION, .blend_factor_source_color = VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ONE, .blend_factor_source_alpha = VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ONE, .blend_factor_destination_color = VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ZERO, .blend_factor_destination_alpha = VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ZERO, .blend_equation_color = VDP_OUTPUT_SURFACE_RENDER_BLEND_EQUATION_ADD, .blend_equation_alpha = VDP_OUTPUT_SURFACE_RENDER_BLEND_EQUATION_ADD, .blend_constant = {0, 0, 0, 0} }; ASSERT_OK(vdp_output_surface_render_output_surface(out_surface_1, NULL, out_surface_2, NULL, NULL, &blend_state, VDP_OUTPUT_SURFACE_RENDER_ROTATE_0)); // get data back uint32_t receive_buf[16]; void * const dest_data[] = {receive_buf}; ASSERT_OK(vdp_output_surface_get_bits_native(out_surface_1, NULL, dest_data, source_pitches)); printf("output surface\n"); for (int k = 0; k < 16; k ++) { printf("%x ", receive_buf[k]); if (3 == k % 4) printf("\n"); } printf("----------\n"); for (int k = 0; k < 16; k ++) { printf("%x ", two_red_dots[k]); if (3 == k % 4) printf("\n"); } // compare recieve_buf with two_red_dots if (memcmp(receive_buf, two_red_dots, 4*4*4)) { printf("fail\n"); return 1; } // Check bitmap surface rendering smoothing issue VdpBitmapSurface bmp_surface; ASSERT_OK(vdp_bitmap_surface_create(device, VDP_RGBA_FORMAT_B8G8R8A8, 4, 4, 1, &bmp_surface)); ASSERT_OK(vdp_bitmap_surface_put_bits_native(bmp_surface, source_data_2, source_pitches, NULL)); VdpOutputSurfaceRenderBlendState blend_state_opaque_copy = { .struct_version = VDP_OUTPUT_SURFACE_RENDER_BLEND_STATE_VERSION, .blend_factor_source_color = VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ONE, .blend_factor_source_alpha = VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ONE, .blend_factor_destination_color = VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ZERO, .blend_factor_destination_alpha = VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ZERO, .blend_equation_color = VDP_OUTPUT_SURFACE_RENDER_BLEND_EQUATION_ADD, .blend_equation_alpha = VDP_OUTPUT_SURFACE_RENDER_BLEND_EQUATION_ADD, .blend_constant = {0, 0, 0, 0} }; ASSERT_OK(vdp_output_surface_render_bitmap_surface(out_surface_1, NULL, bmp_surface, NULL, NULL, &blend_state_opaque_copy, VDP_OUTPUT_SURFACE_RENDER_ROTATE_0)); ASSERT_OK(vdp_output_surface_get_bits_native(out_surface_1, NULL, dest_data, source_pitches)); printf("bitmap surface\n"); for (int k = 0; k < 16; k ++) { printf("%x ", receive_buf[k]); if (3 == k % 4) printf("\n"); } printf("----------\n"); for (int k = 0; k < 16; k ++) { printf("%x ", two_red_dots[k]); if (3 == k % 4) printf("\n"); } if (memcmp(receive_buf, two_red_dots, 4*4*4)) { printf("fail\n"); return 2; } printf("pass\n"); return 0; } libvdpau-va-gl-0.2.1/tests/test-002.c000066400000000000000000000066431224142723700171250ustar00rootroot00000000000000// test-002 // Test alignment issues of output surface {get,put}bits. // Uploads 5x5 square of A8 samples, thus breaking 4-byte alignment. Then downloads and // compares. Buffers should contain identical data. // // Bitmap surfaces checked too. But since there is no way to download data directly from // bitmap surface, we doing this via rendering to output surface. #include #include #include "vdpau-init.h" int main(void) { VdpDevice device; VdpOutputSurface out_surface; VdpBitmapSurface bmp_surface; uint8_t twenty_five[] = { 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19 }; uint8_t out_buf[25]; const void * const source_data[] = { twenty_five }; void * const destination_data[] = { out_buf }; uint32_t source_pitches[] = { 5 }; uint32_t destination_pitches[] = { 5 }; ASSERT_OK(vdpau_init_functions(&device, NULL, 0)); ASSERT_OK(vdp_output_surface_create(device, VDP_RGBA_FORMAT_A8, 5, 5, &out_surface)); // upload image to surface, download image from surface ASSERT_OK(vdp_output_surface_put_bits_native(out_surface, source_data, source_pitches, NULL)); ASSERT_OK(vdp_output_surface_get_bits_native(out_surface, NULL, destination_data, destination_pitches)); printf("outputsurface\n"); for (int k = 0; k < 25; k ++) { printf(" %02x", twenty_five[k]); if (k % 5 == 4) printf("\n"); } printf("----------\n"); for (int k = 0; k < 25; k ++) { printf(" %02x", out_buf[k]); if (k % 5 == 4) printf("\n"); } printf("==========\n"); if (memcmp(out_buf, twenty_five, 25)) { printf("failure\n"); return 1; } // Do check bitmap surface ASSERT_OK(vdp_bitmap_surface_create(device, VDP_RGBA_FORMAT_A8, 5, 5, 1, &bmp_surface)); ASSERT_OK(vdp_bitmap_surface_put_bits_native(bmp_surface, source_data, source_pitches, NULL)); // draw alpha channel as color VdpOutputSurfaceRenderBlendState blend_state = { .struct_version = VDP_OUTPUT_SURFACE_RENDER_BLEND_STATE_VERSION, .blend_factor_source_color = VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_SRC_ALPHA, .blend_factor_source_alpha = VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ONE, .blend_factor_destination_color = VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ZERO, .blend_factor_destination_alpha = VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ZERO, .blend_equation_color = VDP_OUTPUT_SURFACE_RENDER_BLEND_EQUATION_ADD, .blend_equation_alpha = VDP_OUTPUT_SURFACE_RENDER_BLEND_EQUATION_ADD, .blend_constant = {0, 0, 0, 0} }; ASSERT_OK(vdp_output_surface_render_bitmap_surface(out_surface, NULL, bmp_surface, NULL, NULL, &blend_state, VDP_OUTPUT_SURFACE_RENDER_ROTATE_0)); ASSERT_OK(vdp_output_surface_get_bits_native(out_surface, NULL, destination_data, destination_pitches)); printf("bitmapsurface\n"); for (int k = 0; k < 25; k ++) { printf(" %02x", twenty_five[k]); if (k % 5 == 4) printf("\n"); } printf("----------\n"); for (int k = 0; k < 25; k ++) { printf(" %02x", out_buf[k]); if (k % 5 == 4) printf("\n"); } printf("==========\n"); if (memcmp(out_buf, twenty_five, 25)) { printf("failure\n"); return 2; } printf("pass\n"); return 0; } libvdpau-va-gl-0.2.1/tests/test-003.c000066400000000000000000000066131224142723700171230ustar00rootroot00000000000000// test-003 // // Testing blending A8 bitmap surface into B8G8R8A8 output with following blend parameters: // source/destination colors : src alpha / 1 - src alpha // source/destination alpha : one / src alpha // blend equation for color / alpha : add / add // // target surface filled with {0, 0, 0, 1} // // coloring with color {0, 1, 0, 1}. This should be green with alpha == 1. #include #include #include "vdpau-init.h" int main(void) { VdpDevice device; VdpBitmapSurface bmp_surface; VdpOutputSurface out_surface; const uint8_t bmp_1[] = { 0x00, 0x01, 0x02, 0x03, 0x14, 0x15, 0x16, 0x17, 0x28, 0x29, 0x2a, 0x2b, 0x3c, 0x3d, 0x3e, 0x3f }; const void * const source_data_bmp[] = { bmp_1 }; uint32_t source_pitches_bmp[] = { 4 }; const uint32_t black_4x4[] = { 0xff000000, 0xff000000, 0xff000000, 0xff000000, 0xff000000, 0xff000000, 0xff000000, 0xff000000, 0xff000000, 0xff000000, 0xff000000, 0xff000000, 0xff000000, 0xff000000, 0xff000000, 0xff000000 }; const void * const source_data_black[] = { black_4x4 }; uint32_t source_pitches_black[] = { 4 * 4 }; ASSERT_OK(vdpau_init_functions(&device, NULL, 0)); // create surfaces ASSERT_OK(vdp_bitmap_surface_create(device, VDP_RGBA_FORMAT_A8, 4, 4, 1, &bmp_surface)); ASSERT_OK(vdp_output_surface_create(device, VDP_RGBA_FORMAT_B8G8R8A8, 4, 4, &out_surface)); // upload data ASSERT_OK(vdp_bitmap_surface_put_bits_native(bmp_surface, source_data_bmp, source_pitches_bmp, NULL)); ASSERT_OK(vdp_output_surface_put_bits_native(out_surface, source_data_black, source_pitches_black, NULL)); VdpOutputSurfaceRenderBlendState blend_state = { .blend_factor_source_color = VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_SRC_ALPHA, .blend_factor_destination_color = VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA, .blend_factor_source_alpha = VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ONE, .blend_factor_destination_alpha = VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_SRC_ALPHA, .blend_equation_color = VDP_OUTPUT_SURFACE_RENDER_BLEND_EQUATION_ADD, .blend_equation_alpha = VDP_OUTPUT_SURFACE_RENDER_BLEND_EQUATION_ADD, .blend_constant = {0, 0, 0, 0} }; VdpColor color[] = {{0, 1.0, 0, 1.0}}; ASSERT_OK(vdp_output_surface_render_bitmap_surface(out_surface, NULL, bmp_surface, NULL, color, &blend_state, VDP_OUTPUT_SURFACE_RENDER_ROTATE_0)); const uint32_t expected_result[] = { 0x00000000, 0x02000100, 0x04000200, 0x06000300, 0x28001400, 0x2a001500, 0x2c001600, 0x2e001700, 0x50002800, 0x52002900, 0x54002a00, 0x56002b00, 0x78003c00, 0x7a003d00, 0x7c003e00, 0x7e003f00 }; uint32_t result[16]; void * const dest_data[] = { result }; ASSERT_OK(vdp_output_surface_get_bits_native(out_surface, NULL, dest_data, source_pitches_black)); printf("=== expected ===\n"); for (int k = 0; k < 16; k ++) { printf(" %08x", expected_result[k]); if (k % 4 == 3) printf("\n"); } printf("--- actual ---\n"); for (int k = 0; k < 16; k ++) { printf(" %08x", result[k]); if (k % 4 == 3) printf("\n"); } printf("==========\n"); if (memcmp(expected_result, result, sizeof(expected_result))) { printf("fail\n"); return 1; } printf("pass\n"); return 0; } libvdpau-va-gl-0.2.1/tests/test-004.c000066400000000000000000000065671224142723700171340ustar00rootroot00000000000000// test-004 // // Large scale (> 500 pixels) smooth test for VdpOutputSurfaceRenderBitmapSurface and // VdpOutputSurfaceRenderOutputSurface. // Rendering the same pattern via both paths and then comparing results. Using opaque copy, // only source matters. #include #include #include #include "vdpau-init.h" #define WIDTH 509 #define HEIGHT 601 int main(void) { int err_code = 0; VdpDevice device; ASSERT_OK(vdpau_init_functions(&device, NULL, 0)); VdpOutputSurface out_surface_in; VdpOutputSurface out_surface_out; VdpBitmapSurface bmp_surface; ASSERT_OK(vdp_output_surface_create(device, VDP_RGBA_FORMAT_B8G8R8A8, WIDTH, HEIGHT, &out_surface_in)); ASSERT_OK(vdp_output_surface_create(device, VDP_RGBA_FORMAT_B8G8R8A8, WIDTH, HEIGHT, &out_surface_out)); ASSERT_OK(vdp_bitmap_surface_create(device, VDP_RGBA_FORMAT_B8G8R8A8, WIDTH, HEIGHT, 1, &bmp_surface)); uint32_t *src = malloc(4 * WIDTH * HEIGHT); uint32_t *dst = malloc(4 * WIDTH * HEIGHT); assert (NULL != src || NULL != dst); for (int k = 0; k < WIDTH * HEIGHT; k ++) { src[k] = ((k & 0xff) << 8) + (0xff << 24); // green pixel pattern } const void * const source_data[] = { src }; void * const destination_data[] = { dst }; uint32_t source_pitches[] = { 4 * WIDTH }; uint32_t destination_pitches[] = { 4 * WIDTH }; ASSERT_OK(vdp_output_surface_put_bits_native(out_surface_in, source_data, source_pitches, NULL)); ASSERT_OK(vdp_bitmap_surface_put_bits_native(bmp_surface, source_data, source_pitches, NULL)); VdpOutputSurfaceRenderBlendState blend_state_opaque_copy = { .struct_version = VDP_OUTPUT_SURFACE_RENDER_BLEND_STATE_VERSION, .blend_factor_source_color = VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ONE, .blend_factor_source_alpha = VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ONE, .blend_factor_destination_color = VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ZERO, .blend_factor_destination_alpha = VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ZERO, .blend_equation_color = VDP_OUTPUT_SURFACE_RENDER_BLEND_EQUATION_ADD, .blend_equation_alpha = VDP_OUTPUT_SURFACE_RENDER_BLEND_EQUATION_ADD, .blend_constant = {0, 0, 0, 0} }; ASSERT_OK(vdp_output_surface_render_output_surface(out_surface_out, NULL, out_surface_in, NULL, NULL, &blend_state_opaque_copy, VDP_OUTPUT_SURFACE_RENDER_ROTATE_0)); // check result of vdpOutputSurfaceRenderOutputSurface ASSERT_OK(vdp_output_surface_get_bits_native(out_surface_out, NULL, destination_data, destination_pitches)); if (memcmp(src, dst, 4 * WIDTH * HEIGHT)) { printf("fail / vdpOutputSurfaceRenderOutputSurface\n"); err_code = 1; goto free_resources_and_exit; } // check vdpOutputSurfaceRenderBitmapSurface ASSERT_OK(vdp_output_surface_render_bitmap_surface(out_surface_out, NULL, bmp_surface, NULL, NULL, &blend_state_opaque_copy, VDP_OUTPUT_SURFACE_RENDER_ROTATE_0)); ASSERT_OK(vdp_output_surface_get_bits_native(out_surface_out, NULL, destination_data, destination_pitches)); if (memcmp(src, dst, 4 * WIDTH * HEIGHT)) { printf("fail / vdpOutputSurfaceRenderBitmapSurface\n"); err_code = 2; goto free_resources_and_exit; } printf("pass\n"); free_resources_and_exit: free(src); free(dst); return err_code; } libvdpau-va-gl-0.2.1/tests/test-005.c000066400000000000000000000123161224142723700171220ustar00rootroot00000000000000// test-005 // // rendering A8 bitmap to non-black output surface. // source/destination colors : src alpha / 1 - src alpha // source/destination alpha : one / src alpha // blend equation for color / alpha : add / add // // target surface filled with {1, 0, 0, 1} // // coloring with color {0, 1, 0, 1}. This should be green with alpha == 1. #include #include #include #include "vdpau-init.h" int main(void) { VdpDevice device; ASSERT_OK(vdpau_init_functions(&device, NULL, 0)); VdpBitmapSurface bmp_surface; VdpOutputSurface out_surface; ASSERT_OK(vdp_bitmap_surface_create(device, VDP_RGBA_FORMAT_A8, 5, 5, 1, &bmp_surface)); ASSERT_OK(vdp_output_surface_create(device, VDP_RGBA_FORMAT_B8G8R8A8, 7, 7, &out_surface)); const uint8_t bmp_data[5 * 5] = { /* 1 2 3 4 5 */ /* 1 */ 0x00, 0x1e, 0x1f, 0x20, 0x21, /* 2 */ 0x01, 0x3e, 0x3f, 0x40, 0x41, /* 3 */ 0x02, 0x5e, 0x5f, 0x60, 0x61, /* 4 */ 0x03, 0x8e, 0x7f, 0xff, 0xff, /* 5 */ 0x04, 0xce, 0x7f, 0xff, 0xff }; const void * const source_data_bmp[] = { bmp_data }; uint32_t source_pitches_bmp[] = { 5 * 1 }; uint32_t green_screen[7 * 7]; const void * const source_data[] = { green_screen }; uint32_t source_pitches[] = { 7 * 4 }; for (int k = 0; k < 7 * 7; k ++) { green_screen[k] = 0xff00ff00; } ASSERT_OK(vdp_output_surface_put_bits_native(out_surface, source_data, source_pitches, NULL)); ASSERT_OK(vdp_bitmap_surface_put_bits_native(bmp_surface, source_data_bmp, source_pitches_bmp, NULL)); VdpOutputSurfaceRenderBlendState blend_state = { .blend_factor_source_color = VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_SRC_ALPHA, .blend_factor_destination_color = VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA, .blend_factor_source_alpha = VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ONE, .blend_factor_destination_alpha = VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_SRC_ALPHA, .blend_equation_color = VDP_OUTPUT_SURFACE_RENDER_BLEND_EQUATION_ADD, .blend_equation_alpha = VDP_OUTPUT_SURFACE_RENDER_BLEND_EQUATION_ADD, .blend_constant = {0, 0, 0, 0} }; VdpColor color[] = {{0.7, 0.3, 0.1, 0.6}}; VdpRect dest_rect = {1, 1, 6, 6}; ASSERT_OK(vdp_output_surface_render_bitmap_surface(out_surface, &dest_rect, bmp_surface, NULL, color, &blend_state, VDP_OUTPUT_SURFACE_RENDER_ROTATE_0)); uint32_t result_buf[7 * 7]; void * const dest_data[] = { result_buf }; ASSERT_OK(vdp_output_surface_get_bits_native(out_surface, NULL, dest_data, source_pitches)); printf("--- actual ---\n"); for (int k = 0; k < 7 * 7; k ++) { printf(" %08x", result_buf[k]); if (k % 7 == 7 - 1) printf("\n"); } uint32_t expected[7 * 7]; // compute expected result for (int x = 0; x < 7 * 7; x ++) expected[x] = green_screen[x]; for (int y = 0; y < 5; y ++) { for (int x = 0; x < 5; x ++) { float src_r = 1.0 * color[0].red; float src_g = 1.0 * color[0].green; float src_b = 1.0 * color[0].blue; float src_a = bmp_data[y*5+x]/255.0 * color[0].alpha; uint32_t dst_bgra = expected[(y+1)*7 + (x+1)]; float dst_a = ((dst_bgra >> 24) & 0xff) / 255.0; float dst_r = ((dst_bgra >> 16) & 0xff) / 255.0; float dst_g = ((dst_bgra >> 8) & 0xff) / 255.0; float dst_b = ((dst_bgra >> 0) & 0xff) / 255.0; float res_r = src_r * src_a + dst_r * (1.0 - src_a); float res_g = src_g * src_a + dst_g * (1.0 - src_a); float res_b = src_b * src_a + dst_b * (1.0 - src_a); float res_a = src_a * 1.0 + dst_a * src_a; uint32_t r = (res_r * 255.0); uint32_t g = (res_g * 255.0); uint32_t b = (res_b * 255.0); uint32_t a = (res_a * 255.0); if (r > 255) r = 255; if (g > 255) g = 255; if (b > 255) b = 255; if (a > 255) a = 255; expected[(y+1)*7 + (x+1)] = (a << 24) | (r << 16) | (g << 8) | (b); } } printf("--- expected ---\n"); for (int k = 0; k < 7 * 7; k ++) { printf(" %08x", expected[k]); if (k % 7 == 7 - 1) printf("\n"); } printf("=================\n"); printf("--- difference --- \n"); uint32_t max_diff = 0; for (int k = 0; k < 7 * 7; k ++) { uint32_t diff_a = abs(((expected[k] >> 24) & 0xff) - ((result_buf[k] >> 24) & 0xff)); uint32_t diff_r = abs(((expected[k] >> 16) & 0xff) - ((result_buf[k] >> 16) & 0xff)); uint32_t diff_g = abs(((expected[k] >> 8) & 0xff) - ((result_buf[k] >> 8) & 0xff)); uint32_t diff_b = abs(((expected[k] >> 0) & 0xff) - ((result_buf[k] >> 0) & 0xff)); printf(" %08x", (diff_a << 24) + (diff_r << 16) + (diff_g << 8) + (diff_b)); if (k % 7 == 7 - 1) printf("\n"); if (diff_a > max_diff) max_diff = diff_a; if (diff_r > max_diff) max_diff = diff_r; if (diff_g > max_diff) max_diff = diff_g; if (diff_b > max_diff) max_diff = diff_b; } printf("=================\n"); if (max_diff > 1) { printf("fail\n"); return 1; } printf("pass\n"); return 0; } libvdpau-va-gl-0.2.1/tests/test-006.c000066400000000000000000000061501224142723700171220ustar00rootroot00000000000000// test-006 // // initializing/finalizing number of times a row with some drawing between. // This test is to reveal thread-safety failure inside VDPAU library. // // Initialization function executed once, but then 30 threads try to do the same work // with rendering simultaneously. #include "vdpau-init.h" #include #include #define THREAD_COUNT 50 VdpDevice device; Window window; void *thread_1_func(void *p) { (void)p; // unused VdpPresentationQueueTarget pq_target; VdpPresentationQueue pq; VdpOutputSurface out_surface; VdpOutputSurface out_surface_2; VdpBitmapSurface bmp_surface; ASSERT_OK(vdp_presentation_queue_target_create_x11(device, window, &pq_target)); ASSERT_OK(vdp_presentation_queue_create(device, pq_target, &pq)); ASSERT_OK(vdp_output_surface_create(device, VDP_RGBA_FORMAT_B8G8R8A8, 300, 150, &out_surface)); ASSERT_OK(vdp_output_surface_create(device, VDP_RGBA_FORMAT_B8G8R8A8, 300, 150, &out_surface_2)); ASSERT_OK(vdp_bitmap_surface_create(device, VDP_RGBA_FORMAT_B8G8R8A8, 300, 150, 1, &bmp_surface)); uint32_t buf[300*150]; const void * const source_data[] = { buf }; uint32_t source_pitches[] = { 4 * 300 }; for (int k = 0; k < 300*150; k ++) { buf[k] = 0xff000000 + (k & 0xffffff); } ASSERT_OK(vdp_bitmap_surface_put_bits_native(bmp_surface, source_data, source_pitches, NULL)); VdpTime vdpTime = 0; ASSERT_OK(vdp_presentation_queue_block_until_surface_idle(pq, out_surface, &vdpTime)); ASSERT_OK(vdp_presentation_queue_get_time(pq, &vdpTime)); VdpOutputSurfaceRenderBlendState blend_state = { .blend_factor_source_color=VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ONE, .blend_factor_destination_color=VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ZERO, .blend_factor_source_alpha=VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ONE, .blend_factor_destination_alpha=VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ZERO, .blend_equation_color=VDP_OUTPUT_SURFACE_RENDER_BLEND_EQUATION_ADD, .blend_equation_alpha=VDP_OUTPUT_SURFACE_RENDER_BLEND_EQUATION_ADD, .blend_constant = { 0, 0, 0, 0} }; VdpRect source_rect = {0, 0, 300, 150}; VdpRect destination_rect = {0, 0, 300, 150}; ASSERT_OK(vdp_output_surface_render_bitmap_surface(out_surface, &destination_rect, bmp_surface, &source_rect, NULL, &blend_state, VDP_OUTPUT_SURFACE_RENDER_ROTATE_0)); ASSERT_OK(vdp_presentation_queue_display(pq, out_surface, 0, 0, 0)); ASSERT_OK(vdp_output_surface_destroy(out_surface)); ASSERT_OK(vdp_output_surface_destroy(out_surface_2)); ASSERT_OK(vdp_presentation_queue_destroy(pq)); ASSERT_OK(vdp_presentation_queue_target_destroy(pq_target)); ASSERT_OK(vdp_bitmap_surface_destroy(bmp_surface)); return NULL; } int main(void) { pthread_t pt[THREAD_COUNT]; ASSERT_OK(vdpau_init_functions(&device, &window, 0)); for (int k = 0; k < THREAD_COUNT; k ++) pthread_create(&pt[k], NULL, thread_1_func, NULL); for (int k = 0; k < THREAD_COUNT; k ++) pthread_join(pt[k], NULL); ASSERT_OK(vdp_device_destroy(device)); return 0; } libvdpau-va-gl-0.2.1/tests/test-007.c000066400000000000000000000111301224142723700171150ustar00rootroot00000000000000// test-007 // TOUCHES: VdpBitmapSurfaceCreate // TOUCHES: VdpBitmapSurfaceDestroy // TOUCHES: VdpBitmapSurfaceGetParameters // TOUCHES: VdpBitmapSurfaceQueryCapabilities #include "vdpau-init.h" #include int main(void) { VdpDevice device; VdpBitmapSurface bmp_surf1; VdpBitmapSurface bmp_surf2; ASSERT_OK(vdpau_init_functions(&device, NULL, 0)); // invalid device handle assert(VDP_STATUS_INVALID_HANDLE == vdp_bitmap_surface_create(device+1, VDP_RGBA_FORMAT_A8, 13, 13, 1, &bmp_surf1)); // invalid rgba format assert(VDP_STATUS_INVALID_RGBA_FORMAT == vdp_bitmap_surface_create(device, -2, 13, 13, 1, &bmp_surf1)); // normal paratemers ASSERT_OK(vdp_bitmap_surface_create(device, VDP_RGBA_FORMAT_B8G8R8A8, 123, 234, 1, &bmp_surf1)); ASSERT_OK(vdp_bitmap_surface_create(device, VDP_RGBA_FORMAT_R8G8B8A8, 345, 456, 0, &bmp_surf2)); uint32_t width, height; VdpBool fa; VdpRGBAFormat rgba_f; // test that getParameters get actual supplied parameters ASSERT_OK(vdp_bitmap_surface_get_parameters(bmp_surf1, &rgba_f, &width, &height, &fa)); assert(VDP_RGBA_FORMAT_B8G8R8A8 == rgba_f); assert(123 == width); assert(234 == height); assert(1 == fa); // test with other surface ASSERT_OK(vdp_bitmap_surface_get_parameters(bmp_surf2, &rgba_f, &width, &height, &fa)); assert(VDP_RGBA_FORMAT_R8G8B8A8 == rgba_f); assert(345 == width); assert(456 == height); assert(0 == fa); // test getParameters with NULLs assert(VDP_STATUS_INVALID_POINTER == vdp_bitmap_surface_get_parameters(bmp_surf1, NULL, &width, &height, &fa)); assert(VDP_STATUS_INVALID_POINTER == vdp_bitmap_surface_get_parameters(bmp_surf1, &rgba_f, NULL, &height, &fa)); assert(VDP_STATUS_INVALID_POINTER == vdp_bitmap_surface_get_parameters(bmp_surf1, &rgba_f, &width, NULL, &fa)); assert(VDP_STATUS_INVALID_POINTER == vdp_bitmap_surface_get_parameters(bmp_surf1, &rgba_f, &width, &height, NULL)); // test with invalid bitmap handle assert(VDP_STATUS_INVALID_HANDLE == vdp_bitmap_surface_get_parameters(device, &rgba_f, &width, &height, &fa)); VdpBool is_supported; // testing query capabilities assert(VDP_STATUS_INVALID_HANDLE == vdp_bitmap_surface_query_capabilities(device+1, VDP_RGBA_FORMAT_A8, &is_supported, &width, &height)); assert(VDP_STATUS_INVALID_POINTER == vdp_bitmap_surface_query_capabilities(device, VDP_RGBA_FORMAT_A8, NULL, &width, &height)); assert(VDP_STATUS_INVALID_POINTER == vdp_bitmap_surface_query_capabilities(device, VDP_RGBA_FORMAT_A8, &is_supported, NULL, &height)); assert(VDP_STATUS_INVALID_POINTER == vdp_bitmap_surface_query_capabilities(device, VDP_RGBA_FORMAT_A8, &is_supported, &width, NULL)); // querying various formats ASSERT_OK(vdp_bitmap_surface_query_capabilities(device, VDP_RGBA_FORMAT_B8G8R8A8, &is_supported, &width, &height)); assert(is_supported); assert(width > 0); assert(height > 0); ASSERT_OK(vdp_bitmap_surface_query_capabilities(device, VDP_RGBA_FORMAT_R8G8B8A8, &is_supported, &width, &height)); assert(is_supported); assert(width > 0); assert(height > 0); ASSERT_OK(vdp_bitmap_surface_query_capabilities(device, VDP_RGBA_FORMAT_R10G10B10A2, &is_supported, &width, &height)); assert(is_supported); assert(width > 0); assert(height > 0); ASSERT_OK(vdp_bitmap_surface_query_capabilities(device, VDP_RGBA_FORMAT_B10G10R10A2, &is_supported, &width, &height)); assert(is_supported); assert(width > 0); assert(height > 0); ASSERT_OK(vdp_bitmap_surface_query_capabilities(device, VDP_RGBA_FORMAT_A8, &is_supported, &width, &height)); assert(is_supported); assert(width > 0); assert(height > 0); // query wrong format ASSERT_OK(vdp_bitmap_surface_query_capabilities(device, 9000, &is_supported, &width, &height)); assert (0 == is_supported); // try to destroy wrong surface assert (VDP_STATUS_INVALID_HANDLE == vdp_bitmap_surface_destroy(-2)); assert (VDP_STATUS_INVALID_HANDLE == vdp_bitmap_surface_destroy(device)); assert (VDP_STATUS_INVALID_HANDLE == vdp_bitmap_surface_destroy(bmp_surf1 + 43000)); // really destroy surfaces ASSERT_OK(vdp_bitmap_surface_destroy(bmp_surf1)); ASSERT_OK(vdp_bitmap_surface_destroy(bmp_surf2)); ASSERT_OK(vdp_device_destroy(device)); printf("pass\n"); return 0; } libvdpau-va-gl-0.2.1/tests/test-008.c000066400000000000000000000064001224142723700171220ustar00rootroot00000000000000// test-008 // Progressively create larger and larger bitmap surfaces, up to maximum // allowed size. First square ones, then stretched in both directions // in turn // TOUCHES: VdpBitmapSurfaceCreate // TOUCHES: VdpBitmapSurfaceQueryCapabilities #include "vdpau-init.h" #include #define MIN(x,y) ((x) < (y) ? (x) : (y)) #define MAX(x,y) ((x) > (y) ? (x) : (y)) void test_bitmaps_of_format(VdpDevice device, int fmt, const char *fmt_name, uint32_t max_width, uint32_t max_height) { VdpBitmapSurface bmp_surf1; const uint32_t max_square_size = MIN(max_width, max_height); const uint32_t step = 128; // trying square surface for (uint32_t k = 0; k < max_square_size + step; (k < step) ? (k++) : (k+=step)) { for (uint32_t freq = 0; freq <= 1; freq ++) { const uint32_t size = MAX(1, MIN(k, max_square_size)); printf("trying square %s bitmap %d x %d (%d)\n", fmt_name, size, size, freq); ASSERT_OK(vdp_bitmap_surface_create(device, fmt, size, size, freq, &bmp_surf1)); ASSERT_OK(vdp_bitmap_surface_destroy(bmp_surf1)); } } // width stretched for (uint32_t k = 0; k < max_width + step; (k < step) ? (k++) : (k+=step)) { for (uint32_t freq = 0; freq <= 1; freq ++) { const uint32_t size = MAX(1, MIN(k, max_width)); printf("trying width stretched %s bitmap %d x %d (%d)\n", fmt_name, size, 128, freq); ASSERT_OK(vdp_bitmap_surface_create(device, fmt, size, 128, freq, &bmp_surf1)); ASSERT_OK(vdp_bitmap_surface_destroy(bmp_surf1)); } } // height stretched for (uint32_t k = 0; k < max_height + step; (k < step) ? (k++) : (k+=step)) { for (uint32_t freq = 0; freq <= 1; freq ++) { const uint32_t size = MAX(1, MIN(k, max_height)); printf("trying height stretched %s bitmap %d x %d (%d)\n", fmt_name, 128, size, freq); ASSERT_OK(vdp_bitmap_surface_create(device, fmt, 128, size, freq, &bmp_surf1)); ASSERT_OK(vdp_bitmap_surface_destroy(bmp_surf1)); } } } int main(void) { VdpDevice device; ASSERT_OK(vdpau_init_functions(&device, NULL, 0)); uint32_t max_width, max_height; VdpBool is_supported; // querying max_size ASSERT_OK(vdp_bitmap_surface_query_capabilities(device, VDP_RGBA_FORMAT_B8G8R8A8, &is_supported, &max_width, &max_height)); assert(is_supported); assert(max_width > 0); assert(max_height > 0); test_bitmaps_of_format(device, VDP_RGBA_FORMAT_B8G8R8A8, "VDP_RGBA_FORMAT_B8G8R8A8", max_width, max_height); test_bitmaps_of_format(device, VDP_RGBA_FORMAT_R8G8B8A8, "VDP_RGBA_FORMAT_R8G8B8A8", max_width, max_height); test_bitmaps_of_format(device, VDP_RGBA_FORMAT_R10G10B10A2, "VDP_RGBA_FORMAT_R10G10B10A2", max_width, max_height); test_bitmaps_of_format(device, VDP_RGBA_FORMAT_B10G10R10A2, "VDP_RGBA_FORMAT_B10G10R10A2", max_width, max_height); test_bitmaps_of_format(device, VDP_RGBA_FORMAT_A8, "VDP_RGBA_FORMAT_A8", max_width, max_height); ASSERT_OK(vdp_device_destroy(device)); printf("pass\n"); return 0; } libvdpau-va-gl-0.2.1/tests/test-009.c000066400000000000000000000006211224142723700171220ustar00rootroot00000000000000// test-009 // Create and destroy vdp device many times a row. // Intended to check X resource leakage introduced by library. #include "vdpau-init.h" #include int main(void) { VdpDevice device; for (int k = 0; k < 3000; k ++) { ASSERT_OK(vdpau_init_functions(&device, NULL, 0)); ASSERT_OK(vdp_device_destroy(device)); } printf("pass\n"); return 0; } libvdpau-va-gl-0.2.1/tests/test-010.c000066400000000000000000000012311224142723700171100ustar00rootroot00000000000000// test-010 // creating and destroying couple of VdpDevice from different threads // caused deadlocks and crashes #include "vdpau-init.h" #include #include VdpDevice device1; VdpDevice device2; void * thread_1(void *param) { (void)param; ASSERT_OK(vdp_device_destroy(device1)); return NULL; } int main(void) { ASSERT_OK(vdpau_init_functions(&device1, NULL, 0)); ASSERT_OK(vdpau_init_functions(&device2, NULL, 0)); pthread_t thread_id_1; pthread_create(&thread_id_1, NULL, thread_1, NULL); pthread_join(thread_id_1, NULL); ASSERT_OK(vdp_device_destroy(device2)); printf("pass\n"); return 0; } libvdpau-va-gl-0.2.1/tests/vdpau-init.c000066400000000000000000000271101224142723700177170ustar00rootroot00000000000000#include "vdpau-init.h" VdpGetErrorString *vdp_get_error_string = NULL; VdpGetApiVersion *vdp_get_api_version = NULL; VdpGetInformationString *vdp_get_information_string = NULL; VdpDeviceDestroy *vdp_device_destroy = NULL; VdpGenerateCSCMatrix *vdp_generate_csc_matrix = NULL; VdpVideoSurfaceQueryCapabilities *vdp_video_surface_query_capabilities = NULL; VdpVideoSurfaceQueryGetPutBitsYCbCrCapabilities *vdp_video_surface_query_get_put_bits_y_cb_cr_capabilities = NULL; VdpVideoSurfaceCreate *vdp_video_surface_create = NULL; VdpVideoSurfaceDestroy *vdp_video_surface_destroy = NULL; VdpVideoSurfaceGetParameters *vdp_video_surface_get_parameters = NULL; VdpVideoSurfaceGetBitsYCbCr *vdp_video_surface_get_bits_y_cb_cr = NULL; VdpVideoSurfacePutBitsYCbCr *vdp_video_surface_put_bits_y_cb_cr = NULL; VdpOutputSurfaceQueryCapabilities *vdp_output_surface_query_capabilities = NULL; VdpOutputSurfaceQueryGetPutBitsNativeCapabilities *vdp_output_surface_query_get_put_bits_native_capabilities = NULL; VdpOutputSurfaceQueryPutBitsIndexedCapabilities *vdp_output_surface_query_put_bits_indexed_capabilities = NULL; VdpOutputSurfaceQueryPutBitsYCbCrCapabilities *vdp_output_surface_query_put_bits_y_cb_cr_capabilities = NULL; VdpOutputSurfaceCreate *vdp_output_surface_create = NULL; VdpOutputSurfaceDestroy *vdp_output_surface_destroy = NULL; VdpOutputSurfaceGetParameters *vdp_output_surface_get_parameters = NULL; VdpOutputSurfaceGetBitsNative *vdp_output_surface_get_bits_native = NULL; VdpOutputSurfacePutBitsNative *vdp_output_surface_put_bits_native = NULL; VdpOutputSurfacePutBitsIndexed *vdp_output_surface_put_bits_indexed = NULL; VdpOutputSurfacePutBitsYCbCr *vdp_output_surface_put_bits_y_cb_cr = NULL; VdpBitmapSurfaceQueryCapabilities *vdp_bitmap_surface_query_capabilities = NULL; VdpBitmapSurfaceCreate *vdp_bitmap_surface_create = NULL; VdpBitmapSurfaceDestroy *vdp_bitmap_surface_destroy = NULL; VdpBitmapSurfaceGetParameters *vdp_bitmap_surface_get_parameters = NULL; VdpBitmapSurfacePutBitsNative *vdp_bitmap_surface_put_bits_native = NULL; VdpOutputSurfaceRenderOutputSurface *vdp_output_surface_render_output_surface = NULL; VdpOutputSurfaceRenderBitmapSurface *vdp_output_surface_render_bitmap_surface = NULL; VdpDecoderQueryCapabilities *vdp_decoder_query_capabilities = NULL; VdpDecoderCreate *vdp_decoder_create = NULL; VdpDecoderDestroy *vdp_decoder_destroy = NULL; VdpDecoderGetParameters *vdp_decoder_get_parameters = NULL; VdpDecoderRender *vdp_decoder_render = NULL; VdpVideoMixerQueryFeatureSupport *vdp_video_mixer_query_feature_support = NULL; VdpVideoMixerQueryParameterSupport *vdp_video_mixer_query_parameter_support = NULL; VdpVideoMixerQueryAttributeSupport *vdp_video_mixer_query_attribute_support = NULL; VdpVideoMixerQueryParameterValueRange *vdp_video_mixer_query_parameter_value_range = NULL; VdpVideoMixerQueryAttributeValueRange *vdp_video_mixer_query_attribute_value_range = NULL; VdpVideoMixerCreate *vdp_video_mixer_create = NULL; VdpVideoMixerSetFeatureEnables *vdp_video_mixer_set_feature_enables = NULL; VdpVideoMixerSetAttributeValues *vdp_video_mixer_set_attribute_values = NULL; VdpVideoMixerGetFeatureSupport *vdp_video_mixer_get_feature_support = NULL; VdpVideoMixerGetFeatureEnables *vdp_video_mixer_get_feature_enables = NULL; VdpVideoMixerGetParameterValues *vdp_video_mixer_get_parameter_values = NULL; VdpVideoMixerGetAttributeValues *vdp_video_mixer_get_attribute_values = NULL; VdpVideoMixerDestroy *vdp_video_mixer_destroy = NULL; VdpVideoMixerRender *vdp_video_mixer_render = NULL; VdpPresentationQueueTargetDestroy *vdp_presentation_queue_target_destroy = NULL; VdpPresentationQueueCreate *vdp_presentation_queue_create = NULL; VdpPresentationQueueDestroy *vdp_presentation_queue_destroy = NULL; VdpPresentationQueueSetBackgroundColor *vdp_presentation_queue_set_background_color = NULL; VdpPresentationQueueGetBackgroundColor *vdp_presentation_queue_get_background_color = NULL; VdpPresentationQueueGetTime *vdp_presentation_queue_get_time = NULL; VdpPresentationQueueDisplay *vdp_presentation_queue_display = NULL; VdpPresentationQueueBlockUntilSurfaceIdle *vdp_presentation_queue_block_until_surface_idle = NULL; VdpPresentationQueueQuerySurfaceStatus *vdp_presentation_queue_query_surface_status = NULL; VdpPreemptionCallbackRegister *vdp_preemption_callback_register = NULL; VdpGetProcAddress *vdp_get_proc_address = NULL; VdpPresentationQueueTargetCreateX11 *vdp_presentation_queue_target_create_x11 = NULL; Display *saved_dpy = NULL; Window saved_window = 0; VdpStatus vdpau_init_functions(VdpDevice *device, Window *window, int do_map_window) { Display *dpy; if (!saved_dpy) { dpy = XOpenDisplay(NULL); saved_dpy = dpy; } else { dpy = saved_dpy; } if (window) { Window root = XDefaultRootWindow(dpy); if (!saved_window) { saved_window = XCreateSimpleWindow(dpy, root, 0, 0, 300, 300, 0, 0, 0); } *window = saved_window; if (do_map_window) XMapWindow(dpy, *window); } XSync(dpy, 0); VdpStatus st = vdp_device_create_x11(dpy, 0, device, &vdp_get_proc_address); if (VDP_STATUS_OK != st) return st; if (!vdp_get_proc_address) return VDP_STATUS_ERROR; #define GET_ADDR(id, ptr) \ do { \ st = vdp_get_proc_address(*device, id, (void **)&ptr); \ assert(VDP_STATUS_OK==st); \ assert(NULL != ptr); \ } while(0) GET_ADDR(VDP_FUNC_ID_GET_ERROR_STRING, vdp_get_error_string); GET_ADDR(VDP_FUNC_ID_GET_API_VERSION, vdp_get_api_version); GET_ADDR(VDP_FUNC_ID_GET_INFORMATION_STRING, vdp_get_information_string); GET_ADDR(VDP_FUNC_ID_DEVICE_DESTROY, vdp_device_destroy); GET_ADDR(VDP_FUNC_ID_GENERATE_CSC_MATRIX, vdp_generate_csc_matrix); GET_ADDR(VDP_FUNC_ID_VIDEO_SURFACE_QUERY_CAPABILITIES, vdp_video_surface_query_capabilities); GET_ADDR(VDP_FUNC_ID_VIDEO_SURFACE_QUERY_GET_PUT_BITS_Y_CB_CR_CAPABILITIES, vdp_video_surface_query_get_put_bits_y_cb_cr_capabilities); GET_ADDR(VDP_FUNC_ID_VIDEO_SURFACE_CREATE, vdp_video_surface_create); GET_ADDR(VDP_FUNC_ID_VIDEO_SURFACE_DESTROY, vdp_video_surface_destroy); GET_ADDR(VDP_FUNC_ID_VIDEO_SURFACE_GET_PARAMETERS, vdp_video_surface_get_parameters); GET_ADDR(VDP_FUNC_ID_VIDEO_SURFACE_GET_BITS_Y_CB_CR, vdp_video_surface_get_bits_y_cb_cr); GET_ADDR(VDP_FUNC_ID_VIDEO_SURFACE_PUT_BITS_Y_CB_CR, vdp_video_surface_put_bits_y_cb_cr); GET_ADDR(VDP_FUNC_ID_OUTPUT_SURFACE_QUERY_CAPABILITIES, vdp_output_surface_query_capabilities); GET_ADDR(VDP_FUNC_ID_OUTPUT_SURFACE_QUERY_GET_PUT_BITS_NATIVE_CAPABILITIES, vdp_output_surface_query_get_put_bits_native_capabilities); GET_ADDR(VDP_FUNC_ID_OUTPUT_SURFACE_QUERY_PUT_BITS_INDEXED_CAPABILITIES, vdp_output_surface_query_put_bits_indexed_capabilities); GET_ADDR(VDP_FUNC_ID_OUTPUT_SURFACE_QUERY_PUT_BITS_Y_CB_CR_CAPABILITIES, vdp_output_surface_query_put_bits_y_cb_cr_capabilities); GET_ADDR(VDP_FUNC_ID_OUTPUT_SURFACE_CREATE, vdp_output_surface_create); GET_ADDR(VDP_FUNC_ID_OUTPUT_SURFACE_DESTROY, vdp_output_surface_destroy); GET_ADDR(VDP_FUNC_ID_OUTPUT_SURFACE_GET_PARAMETERS, vdp_output_surface_get_parameters); GET_ADDR(VDP_FUNC_ID_OUTPUT_SURFACE_GET_BITS_NATIVE, vdp_output_surface_get_bits_native); GET_ADDR(VDP_FUNC_ID_OUTPUT_SURFACE_PUT_BITS_NATIVE, vdp_output_surface_put_bits_native); GET_ADDR(VDP_FUNC_ID_OUTPUT_SURFACE_PUT_BITS_INDEXED, vdp_output_surface_put_bits_indexed); GET_ADDR(VDP_FUNC_ID_OUTPUT_SURFACE_PUT_BITS_Y_CB_CR, vdp_output_surface_put_bits_y_cb_cr); GET_ADDR(VDP_FUNC_ID_BITMAP_SURFACE_QUERY_CAPABILITIES, vdp_bitmap_surface_query_capabilities); GET_ADDR(VDP_FUNC_ID_BITMAP_SURFACE_CREATE, vdp_bitmap_surface_create); GET_ADDR(VDP_FUNC_ID_BITMAP_SURFACE_DESTROY, vdp_bitmap_surface_destroy); GET_ADDR(VDP_FUNC_ID_BITMAP_SURFACE_GET_PARAMETERS, vdp_bitmap_surface_get_parameters); GET_ADDR(VDP_FUNC_ID_BITMAP_SURFACE_PUT_BITS_NATIVE, vdp_bitmap_surface_put_bits_native); GET_ADDR(VDP_FUNC_ID_OUTPUT_SURFACE_RENDER_OUTPUT_SURFACE, vdp_output_surface_render_output_surface); GET_ADDR(VDP_FUNC_ID_OUTPUT_SURFACE_RENDER_BITMAP_SURFACE, vdp_output_surface_render_bitmap_surface); // VDP_FUNC_ID_OUTPUT_SURFACE_RENDER_VIDEO_SURFACE_LUMA not present? GET_ADDR(VDP_FUNC_ID_DECODER_QUERY_CAPABILITIES, vdp_decoder_query_capabilities); GET_ADDR(VDP_FUNC_ID_DECODER_CREATE, vdp_decoder_create); GET_ADDR(VDP_FUNC_ID_DECODER_DESTROY, vdp_decoder_destroy); GET_ADDR(VDP_FUNC_ID_DECODER_GET_PARAMETERS, vdp_decoder_get_parameters); GET_ADDR(VDP_FUNC_ID_DECODER_RENDER, vdp_decoder_render); GET_ADDR(VDP_FUNC_ID_VIDEO_MIXER_QUERY_FEATURE_SUPPORT, vdp_video_mixer_query_feature_support); GET_ADDR(VDP_FUNC_ID_VIDEO_MIXER_QUERY_PARAMETER_SUPPORT, vdp_video_mixer_query_parameter_support); GET_ADDR(VDP_FUNC_ID_VIDEO_MIXER_QUERY_ATTRIBUTE_SUPPORT, vdp_video_mixer_query_attribute_support); GET_ADDR(VDP_FUNC_ID_VIDEO_MIXER_QUERY_PARAMETER_VALUE_RANGE, vdp_video_mixer_query_parameter_value_range); GET_ADDR(VDP_FUNC_ID_VIDEO_MIXER_QUERY_ATTRIBUTE_VALUE_RANGE, vdp_video_mixer_query_attribute_value_range); GET_ADDR(VDP_FUNC_ID_VIDEO_MIXER_CREATE, vdp_video_mixer_create); GET_ADDR(VDP_FUNC_ID_VIDEO_MIXER_SET_FEATURE_ENABLES, vdp_video_mixer_set_feature_enables); GET_ADDR(VDP_FUNC_ID_VIDEO_MIXER_SET_ATTRIBUTE_VALUES, vdp_video_mixer_set_attribute_values); GET_ADDR(VDP_FUNC_ID_VIDEO_MIXER_GET_FEATURE_SUPPORT, vdp_video_mixer_get_feature_support); GET_ADDR(VDP_FUNC_ID_VIDEO_MIXER_GET_FEATURE_ENABLES, vdp_video_mixer_get_feature_enables); GET_ADDR(VDP_FUNC_ID_VIDEO_MIXER_GET_PARAMETER_VALUES, vdp_video_mixer_get_parameter_values); GET_ADDR(VDP_FUNC_ID_VIDEO_MIXER_GET_ATTRIBUTE_VALUES, vdp_video_mixer_get_attribute_values); GET_ADDR(VDP_FUNC_ID_VIDEO_MIXER_DESTROY, vdp_video_mixer_destroy); GET_ADDR(VDP_FUNC_ID_VIDEO_MIXER_RENDER, vdp_video_mixer_render); GET_ADDR(VDP_FUNC_ID_PRESENTATION_QUEUE_TARGET_DESTROY, vdp_presentation_queue_target_destroy); GET_ADDR(VDP_FUNC_ID_PRESENTATION_QUEUE_CREATE, vdp_presentation_queue_create); GET_ADDR(VDP_FUNC_ID_PRESENTATION_QUEUE_DESTROY, vdp_presentation_queue_destroy); GET_ADDR(VDP_FUNC_ID_PRESENTATION_QUEUE_SET_BACKGROUND_COLOR, vdp_presentation_queue_set_background_color); GET_ADDR(VDP_FUNC_ID_PRESENTATION_QUEUE_GET_BACKGROUND_COLOR, vdp_presentation_queue_get_background_color); GET_ADDR(VDP_FUNC_ID_PRESENTATION_QUEUE_GET_TIME, vdp_presentation_queue_get_time); GET_ADDR(VDP_FUNC_ID_PRESENTATION_QUEUE_DISPLAY, vdp_presentation_queue_display); GET_ADDR(VDP_FUNC_ID_PRESENTATION_QUEUE_BLOCK_UNTIL_SURFACE_IDLE, vdp_presentation_queue_block_until_surface_idle); GET_ADDR(VDP_FUNC_ID_PRESENTATION_QUEUE_QUERY_SURFACE_STATUS, vdp_presentation_queue_query_surface_status); GET_ADDR(VDP_FUNC_ID_PREEMPTION_CALLBACK_REGISTER, vdp_preemption_callback_register); GET_ADDR(VDP_FUNC_ID_PRESENTATION_QUEUE_TARGET_CREATE_X11, vdp_presentation_queue_target_create_x11); return VDP_STATUS_OK; } libvdpau-va-gl-0.2.1/tests/vdpau-init.h000066400000000000000000000123731224142723700177310ustar00rootroot00000000000000#ifndef __VDPAU_INIT_H #define __VDPAU_INIT_H #ifdef NDEBUG #undef NDEBUG #endif #include #include #include #define ASSERT_OK(expr) \ do { \ VdpStatus status = expr; \ assert (VDP_STATUS_OK == status); \ } while (0) extern VdpGetErrorString *vdp_get_error_string; extern VdpGetApiVersion *vdp_get_api_version; extern VdpGetInformationString *vdp_get_information_string; extern VdpDeviceDestroy *vdp_device_destroy; extern VdpGenerateCSCMatrix *vdp_generate_csc_matrix; extern VdpVideoSurfaceQueryCapabilities *vdp_video_surface_query_capabilities; extern VdpVideoSurfaceQueryGetPutBitsYCbCrCapabilities *vdp_video_surface_query_get_put_bits_y_cb_cr_capabilities; extern VdpVideoSurfaceCreate *vdp_video_surface_create; extern VdpVideoSurfaceDestroy *vdp_video_surface_destroy; extern VdpVideoSurfaceGetParameters *vdp_video_surface_get_parameters; extern VdpVideoSurfaceGetBitsYCbCr *vdp_video_surface_get_bits_y_cb_cr; extern VdpVideoSurfacePutBitsYCbCr *vdp_video_surface_put_bits_y_cb_cr; extern VdpOutputSurfaceQueryCapabilities *vdp_output_surface_query_capabilities; extern VdpOutputSurfaceQueryGetPutBitsNativeCapabilities *vdp_output_surface_query_get_put_bits_native_capabilities; extern VdpOutputSurfaceQueryPutBitsIndexedCapabilities *vdp_output_surface_query_put_bits_indexed_capabilities; extern VdpOutputSurfaceQueryPutBitsYCbCrCapabilities *vdp_output_surface_query_put_bits_y_cb_cr_capabilities; extern VdpOutputSurfaceCreate *vdp_output_surface_create; extern VdpOutputSurfaceDestroy *vdp_output_surface_destroy; extern VdpOutputSurfaceGetParameters *vdp_output_surface_get_parameters; extern VdpOutputSurfaceGetBitsNative *vdp_output_surface_get_bits_native; extern VdpOutputSurfacePutBitsNative *vdp_output_surface_put_bits_native; extern VdpOutputSurfacePutBitsIndexed *vdp_output_surface_put_bits_indexed; extern VdpOutputSurfacePutBitsYCbCr *vdp_output_surface_put_bits_y_cb_cr; extern VdpBitmapSurfaceQueryCapabilities *vdp_bitmap_surface_query_capabilities; extern VdpBitmapSurfaceCreate *vdp_bitmap_surface_create; extern VdpBitmapSurfaceDestroy *vdp_bitmap_surface_destroy; extern VdpBitmapSurfaceGetParameters *vdp_bitmap_surface_get_parameters; extern VdpBitmapSurfacePutBitsNative *vdp_bitmap_surface_put_bits_native; extern VdpOutputSurfaceRenderOutputSurface *vdp_output_surface_render_output_surface; extern VdpOutputSurfaceRenderBitmapSurface *vdp_output_surface_render_bitmap_surface; extern VdpDecoderQueryCapabilities *vdp_decoder_query_capabilities; extern VdpDecoderCreate *vdp_decoder_create; extern VdpDecoderDestroy *vdp_decoder_destroy; extern VdpDecoderGetParameters *vdp_decoder_get_parameters; extern VdpDecoderRender *vdp_decoder_render; extern VdpVideoMixerQueryFeatureSupport *vdp_video_mixer_query_feature_support; extern VdpVideoMixerQueryParameterSupport *vdp_video_mixer_query_parameter_support; extern VdpVideoMixerQueryAttributeSupport *vdp_video_mixer_query_attribute_support; extern VdpVideoMixerQueryParameterValueRange *vdp_video_mixer_query_parameter_value_range; extern VdpVideoMixerQueryAttributeValueRange *vdp_video_mixer_query_attribute_value_range; extern VdpVideoMixerCreate *vdp_video_mixer_create; extern VdpVideoMixerSetFeatureEnables *vdp_video_mixer_set_feature_enables; extern VdpVideoMixerSetAttributeValues *vdp_video_mixer_set_attribute_values; extern VdpVideoMixerGetFeatureSupport *vdp_video_mixer_get_feature_support; extern VdpVideoMixerGetFeatureEnables *vdp_video_mixer_get_feature_enables; extern VdpVideoMixerGetParameterValues *vdp_video_mixer_get_parameter_values; extern VdpVideoMixerGetAttributeValues *vdp_video_mixer_get_attribute_values; extern VdpVideoMixerDestroy *vdp_video_mixer_destroy; extern VdpVideoMixerRender *vdp_video_mixer_render; extern VdpPresentationQueueTargetDestroy *vdp_presentation_queue_target_destroy; extern VdpPresentationQueueCreate *vdp_presentation_queue_create; extern VdpPresentationQueueDestroy *vdp_presentation_queue_destroy; extern VdpPresentationQueueSetBackgroundColor *vdp_presentation_queue_set_background_color; extern VdpPresentationQueueGetBackgroundColor *vdp_presentation_queue_get_background_color; extern VdpPresentationQueueGetTime *vdp_presentation_queue_get_time; extern VdpPresentationQueueDisplay *vdp_presentation_queue_display; extern VdpPresentationQueueBlockUntilSurfaceIdle *vdp_presentation_queue_block_until_surface_idle; extern VdpPresentationQueueQuerySurfaceStatus *vdp_presentation_queue_query_surface_status; extern VdpPreemptionCallbackRegister *vdp_preemption_callback_register; extern VdpGetProcAddress *vdp_get_proc_address; extern VdpPresentationQueueTargetCreateX11 *vdp_presentation_queue_target_create_x11; VdpStatus vdpau_init_functions(VdpDevice *device, Window *window, int do_map_window); #endif /* __VDPAU_INIT_H */ libvdpau-va-gl-0.2.1/vdpau-decoder.c000066400000000000000000000627371224142723700172350ustar00rootroot00000000000000/* * Copyright 2013 Rinat Ibragimov * * This file is part of libvdpau-va-gl * * libvdpau-va-gl is distributed under the terms of the LGPLv3. See COPYING for details. */ #include #include #include "ctx-stack.h" #include "h264-parse.h" #include "vdpau-trace.h" #include "vdpau-soft.h" VdpStatus softVdpDecoderCreate(VdpDevice device, VdpDecoderProfile profile, uint32_t width, uint32_t height, uint32_t max_references, VdpDecoder *decoder) { VdpStatus err_code; if (!decoder) return VDP_STATUS_INVALID_POINTER; VdpDeviceData *deviceData = handle_acquire(device, HANDLETYPE_DEVICE); if (NULL == deviceData) return VDP_STATUS_INVALID_HANDLE; if (!deviceData->va_available) { err_code = VDP_STATUS_INVALID_DECODER_PROFILE; goto quit; } VADisplay va_dpy = deviceData->va_dpy; VdpDecoderData *data = calloc(1, sizeof(VdpDecoderData)); if (NULL == data) { err_code = VDP_STATUS_RESOURCES; goto quit; } data->type = HANDLETYPE_DECODER; data->device = deviceData; data->profile = profile; data->width = width; data->height = height; data->max_references = max_references; // initialize free_list. Initially they all free data->free_list_head = -1; for (int k = 0; k < MAX_RENDER_TARGETS; k ++) { free_list_push(data->free_list, &data->free_list_head, k); } VAProfile va_profile; VAStatus status; int final_try = 0; VdpDecoderProfile next_profile = profile; // Try to create decoder for asked profile. On failure try to create more advanced one while (! final_try) { profile = next_profile; switch (profile) { case VDP_DECODER_PROFILE_H264_BASELINE: va_profile = VAProfileH264Baseline; data->num_render_targets = NUM_RENDER_TARGETS_H264; next_profile = VDP_DECODER_PROFILE_H264_MAIN; break; case VDP_DECODER_PROFILE_H264_MAIN: va_profile = VAProfileH264Main; data->num_render_targets = NUM_RENDER_TARGETS_H264; next_profile = VDP_DECODER_PROFILE_H264_HIGH; break; case VDP_DECODER_PROFILE_H264_HIGH: va_profile = VAProfileH264High; data->num_render_targets = NUM_RENDER_TARGETS_H264; // there is no more advanced profile, so it's final try final_try = 1; break; default: traceError("error (softVdpDecoderCreate): decoder %s not implemented\n", reverse_decoder_profile(profile)); err_code = VDP_STATUS_INVALID_DECODER_PROFILE; goto quit_free_data; } status = vaCreateConfig(va_dpy, va_profile, VAEntrypointVLD, NULL, 0, &data->config_id); if (VA_STATUS_SUCCESS == status) // break loop if decoder created break; } if (VA_STATUS_SUCCESS != status) { err_code = VDP_STATUS_ERROR; goto quit_free_data; } // Create surfaces. All video surfaces created here, rather than in VdpVideoSurfaceCreate. // VAAPI requires surfaces to be bound with context on its creation time, while VDPAU allows // to do it later. So here is a trick: VDP video surfaces get their va_surf dynamically in // DecoderRender. // TODO: check format of surfaces created #if VA_CHECK_VERSION(0, 34, 0) status = vaCreateSurfaces(va_dpy, VA_RT_FORMAT_YUV420, width, height, data->render_targets, data->num_render_targets, NULL, 0); #else status = vaCreateSurfaces(va_dpy, width, height, VA_RT_FORMAT_YUV420, data->num_render_targets, data->render_targets); #endif if (VA_STATUS_SUCCESS != status) { err_code = VDP_STATUS_ERROR; goto quit_free_data; } status = vaCreateContext(va_dpy, data->config_id, width, height, VA_PROGRESSIVE, data->render_targets, data->num_render_targets, &data->context_id); if (VA_STATUS_SUCCESS != status) { err_code = VDP_STATUS_ERROR; goto quit_free_data; } deviceData->refcount ++; *decoder = handle_insert(data); err_code = VDP_STATUS_OK; goto quit; quit_free_data: free(data); quit: handle_release(device); return err_code; } VdpStatus softVdpDecoderDestroy(VdpDecoder decoder) { VdpDecoderData *decoderData = handle_acquire(decoder, HANDLETYPE_DECODER); if (NULL == decoderData) return VDP_STATUS_INVALID_HANDLE; VdpDeviceData *deviceData = decoderData->device; if (deviceData->va_available) { VADisplay va_dpy = deviceData->va_dpy; vaDestroySurfaces(va_dpy, decoderData->render_targets, decoderData->num_render_targets); vaDestroyContext(va_dpy, decoderData->context_id); vaDestroyConfig(va_dpy, decoderData->config_id); } handle_expunge(decoder); deviceData->refcount --; free(decoderData); return VDP_STATUS_OK; } VdpStatus softVdpDecoderGetParameters(VdpDecoder decoder, VdpDecoderProfile *profile, uint32_t *width, uint32_t *height) { if (!profile || !width || !height) return VDP_STATUS_INVALID_HANDLE; VdpDecoderData *decoderData = handle_acquire(decoder, HANDLETYPE_DECODER); if (!decoderData) return VDP_STATUS_INVALID_HANDLE; *profile = decoderData->profile; *width = decoderData->width; *height = decoderData->height; handle_release(decoder); return VDP_STATUS_OK; } static VdpStatus h264_translate_reference_frames(VdpVideoSurfaceData *dstSurfData, VdpDecoder decoder, VdpDecoderData *decoderData, VAPictureParameterBufferH264 *pic_param, const VdpPictureInfoH264 *vdppi) { // take new VA surface from buffer if needed if (VA_INVALID_SURFACE == dstSurfData->va_surf) { int idx = free_list_pop(decoderData->free_list, &decoderData->free_list_head); if (-1 == idx) return VDP_STATUS_RESOURCES; dstSurfData->decoder = decoder; dstSurfData->va_surf = decoderData->render_targets[idx]; dstSurfData->rt_idx = idx; } // current frame pic_param->CurrPic.picture_id = dstSurfData->va_surf; pic_param->CurrPic.frame_idx = vdppi->frame_num; pic_param->CurrPic.flags = vdppi->is_reference ? VA_PICTURE_H264_SHORT_TERM_REFERENCE : 0; if (vdppi->field_pic_flag) { pic_param->CurrPic.flags |= vdppi->bottom_field_flag ? VA_PICTURE_H264_BOTTOM_FIELD : VA_PICTURE_H264_TOP_FIELD; } pic_param->CurrPic.TopFieldOrderCnt = vdppi->field_order_cnt[0]; pic_param->CurrPic.BottomFieldOrderCnt = vdppi->field_order_cnt[1]; // mark all pictures invalid preliminary for (int k = 0; k < 16; k ++) reset_va_picture_h264(&pic_param->ReferenceFrames[k]); // reference frames for (int k = 0; k < vdppi->num_ref_frames; k ++) { if (VDP_INVALID_HANDLE == vdppi->referenceFrames[k].surface) { reset_va_picture_h264(&pic_param->ReferenceFrames[k]); continue; } VdpReferenceFrameH264 const *vdp_ref = &(vdppi->referenceFrames[k]); VdpVideoSurfaceData *vdpSurfData = handle_acquire(vdp_ref->surface, HANDLETYPE_VIDEO_SURFACE); VAPictureH264 *va_ref = &(pic_param->ReferenceFrames[k]); if (NULL == vdpSurfData) { traceError("error (h264_translate_reference_frames): NULL == vdpSurfData"); return VDP_STATUS_ERROR; } // take new VA surface from buffer if needed if (VA_INVALID_SURFACE == vdpSurfData->va_surf) { int idx = free_list_pop(decoderData->free_list, &decoderData->free_list_head); if (-1 == idx) return VDP_STATUS_RESOURCES; dstSurfData->decoder = decoder; dstSurfData->va_surf = decoderData->render_targets[idx]; dstSurfData->rt_idx = idx; } va_ref->picture_id = vdpSurfData->va_surf; va_ref->frame_idx = vdp_ref->frame_idx; va_ref->flags = vdp_ref->is_long_term ? VA_PICTURE_H264_LONG_TERM_REFERENCE : VA_PICTURE_H264_SHORT_TERM_REFERENCE; if (vdp_ref->top_is_reference && vdp_ref->bottom_is_reference) { // Full frame. This block intentionally left blank. No flags set. } else { if (vdp_ref->top_is_reference) va_ref->flags |= VA_PICTURE_H264_TOP_FIELD; else va_ref->flags |= VA_PICTURE_H264_BOTTOM_FIELD; } va_ref->TopFieldOrderCnt = vdp_ref->field_order_cnt[0]; va_ref->BottomFieldOrderCnt = vdp_ref->field_order_cnt[1]; handle_release(vdp_ref->surface); } return VDP_STATUS_OK; } VdpStatus softVdpDecoderQueryCapabilities(VdpDevice device, VdpDecoderProfile profile, VdpBool *is_supported, uint32_t *max_level, uint32_t *max_macroblocks, uint32_t *max_width, uint32_t *max_height) { VdpStatus err_code; if (!is_supported || !max_level || !max_macroblocks || !max_width || !max_height) return VDP_STATUS_INVALID_POINTER; VdpDeviceData *deviceData = handle_acquire(device, HANDLETYPE_DEVICE); if (NULL == deviceData) return VDP_STATUS_INVALID_HANDLE; *max_level = 0; *max_macroblocks = 0; *max_width = 0; *max_height = 0; if (!deviceData->va_available) { *is_supported = 0; err_code = VDP_STATUS_OK; goto quit; } VAProfile *va_profile_list = malloc(sizeof(VAProfile) * vaMaxNumProfiles(deviceData->va_dpy)); if (NULL == va_profile_list) { err_code = VDP_STATUS_RESOURCES; goto quit; } int num_profiles; VAStatus status = vaQueryConfigProfiles(deviceData->va_dpy, va_profile_list, &num_profiles); if (VA_STATUS_SUCCESS != status) { free(va_profile_list); err_code = VDP_STATUS_ERROR; goto quit; } struct { int mpeg2_simple; int mpeg2_main; int h264_baseline; int h264_main; int h264_high; int vc1_simple; int vc1_main; int vc1_advanced; } available_profiles = {0, 0, 0, 0, 0, 0, 0, 0}; for (int k = 0; k < num_profiles; k ++) { switch (va_profile_list[k]) { case VAProfileMPEG2Main: available_profiles.mpeg2_main = 0; /* fall through */ case VAProfileMPEG2Simple: available_profiles.mpeg2_simple = 0; break; case VAProfileH264High: available_profiles.h264_high = 1; /* fall through */ case VAProfileH264Main: available_profiles.h264_main = 1; /* fall through */ case VAProfileH264Baseline: available_profiles.h264_baseline = 1; /* fall though */ case VAProfileH264ConstrainedBaseline: break; case VAProfileVC1Advanced: available_profiles.vc1_advanced = 0; /* fall though */ case VAProfileVC1Main: available_profiles.vc1_main = 0; /* fall though */ case VAProfileVC1Simple: available_profiles.vc1_simple = 0; break; // unhandled profiles case VAProfileH263Baseline: case VAProfileJPEGBaseline: default: // do nothing break; } } free(va_profile_list); *is_supported = 0; // TODO: How to determine max width and height width libva? *max_width = 2048; *max_height = 2048; *max_macroblocks = 16384; switch (profile) { case VDP_DECODER_PROFILE_MPEG2_SIMPLE: *is_supported = available_profiles.mpeg2_simple; *max_level = VDP_DECODER_LEVEL_MPEG2_HL; break; case VDP_DECODER_PROFILE_MPEG2_MAIN: *is_supported = available_profiles.mpeg2_main; *max_level = VDP_DECODER_LEVEL_MPEG2_HL; break; case VDP_DECODER_PROFILE_H264_BASELINE: *is_supported = available_profiles.h264_baseline; // TODO: Do underlying libva really support 5.1? *max_level = VDP_DECODER_LEVEL_H264_5_1; break; case VDP_DECODER_PROFILE_H264_MAIN: *is_supported = available_profiles.h264_main; *max_level = VDP_DECODER_LEVEL_H264_5_1; break; case VDP_DECODER_PROFILE_H264_HIGH: *is_supported = available_profiles.h264_high; *max_level = VDP_DECODER_LEVEL_H264_5_1; break; case VDP_DECODER_PROFILE_VC1_SIMPLE: *is_supported = available_profiles.vc1_simple; *max_level = VDP_DECODER_LEVEL_VC1_SIMPLE_MEDIUM; break; case VDP_DECODER_PROFILE_VC1_MAIN: *is_supported = available_profiles.vc1_main; *max_level = VDP_DECODER_LEVEL_VC1_MAIN_HIGH; break; case VDP_DECODER_PROFILE_VC1_ADVANCED: *is_supported = available_profiles.vc1_advanced; *max_level = VDP_DECODER_LEVEL_VC1_ADVANCED_L4; break; // unsupported case VDP_DECODER_PROFILE_MPEG1: case VDP_DECODER_PROFILE_MPEG4_PART2_SP: case VDP_DECODER_PROFILE_MPEG4_PART2_ASP: case VDP_DECODER_PROFILE_DIVX4_QMOBILE: case VDP_DECODER_PROFILE_DIVX4_MOBILE: case VDP_DECODER_PROFILE_DIVX4_HOME_THEATER: case VDP_DECODER_PROFILE_DIVX4_HD_1080P: case VDP_DECODER_PROFILE_DIVX5_QMOBILE: case VDP_DECODER_PROFILE_DIVX5_MOBILE: case VDP_DECODER_PROFILE_DIVX5_HOME_THEATER: case VDP_DECODER_PROFILE_DIVX5_HD_1080P: default: break; } err_code = VDP_STATUS_OK; quit: handle_release(device); return err_code; } static void h264_translate_pic_param(VAPictureParameterBufferH264 *pic_param, uint32_t width, uint32_t height, const VdpPictureInfoH264 *vdppi, uint32_t level) { pic_param->picture_width_in_mbs_minus1 = (width - 1) / 16; pic_param->picture_height_in_mbs_minus1 = (height - 1) / 16; pic_param->bit_depth_luma_minus8 = 0; // TODO: deal with more than 8 bits pic_param->bit_depth_chroma_minus8 = 0; // same for luma pic_param->num_ref_frames = vdppi->num_ref_frames; #define SEQ_FIELDS(fieldname) pic_param->seq_fields.bits.fieldname #define PIC_FIELDS(fieldname) pic_param->pic_fields.bits.fieldname SEQ_FIELDS(chroma_format_idc) = 1; // TODO: not only YUV420 SEQ_FIELDS(residual_colour_transform_flag) = 0; SEQ_FIELDS(gaps_in_frame_num_value_allowed_flag)= 0; SEQ_FIELDS(frame_mbs_only_flag) = vdppi->frame_mbs_only_flag; SEQ_FIELDS(mb_adaptive_frame_field_flag) = vdppi->mb_adaptive_frame_field_flag; SEQ_FIELDS(direct_8x8_inference_flag) = vdppi->direct_8x8_inference_flag; SEQ_FIELDS(MinLumaBiPredSize8x8) = (level >= 31); SEQ_FIELDS(log2_max_frame_num_minus4) = vdppi->log2_max_frame_num_minus4; SEQ_FIELDS(pic_order_cnt_type) = vdppi->pic_order_cnt_type; SEQ_FIELDS(log2_max_pic_order_cnt_lsb_minus4) = vdppi->log2_max_pic_order_cnt_lsb_minus4; SEQ_FIELDS(delta_pic_order_always_zero_flag) = vdppi->delta_pic_order_always_zero_flag; pic_param->num_slice_groups_minus1 = vdppi->slice_count - 1; // ??? pic_param->slice_group_map_type = 0; // ??? pic_param->slice_group_change_rate_minus1 = 0; // ??? pic_param->pic_init_qp_minus26 = vdppi->pic_init_qp_minus26; pic_param->pic_init_qs_minus26 = 0; // ??? pic_param->chroma_qp_index_offset = vdppi->chroma_qp_index_offset; pic_param->second_chroma_qp_index_offset = vdppi->second_chroma_qp_index_offset; PIC_FIELDS(entropy_coding_mode_flag) = vdppi->entropy_coding_mode_flag; PIC_FIELDS(weighted_pred_flag) = vdppi->weighted_pred_flag; PIC_FIELDS(weighted_bipred_idc) = vdppi->weighted_bipred_idc; PIC_FIELDS(transform_8x8_mode_flag) = vdppi->transform_8x8_mode_flag; PIC_FIELDS(field_pic_flag) = vdppi->field_pic_flag; PIC_FIELDS(constrained_intra_pred_flag) = vdppi->constrained_intra_pred_flag; PIC_FIELDS(pic_order_present_flag) = vdppi->pic_order_present_flag; PIC_FIELDS(deblocking_filter_control_present_flag) = vdppi->deblocking_filter_control_present_flag; PIC_FIELDS(redundant_pic_cnt_present_flag) = vdppi->redundant_pic_cnt_present_flag; PIC_FIELDS(reference_pic_flag) = vdppi->is_reference; pic_param->frame_num = vdppi->frame_num; #undef SEQ_FIELDS #undef PIC_FIELDS } static void h264_translate_iq_matrix(VAIQMatrixBufferH264 *iq_matrix, const VdpPictureInfoH264 *vdppi) { for (int j = 0; j < 6; j ++) for (int k = 0; k < 16; k ++) iq_matrix->ScalingList4x4[j][k] = vdppi->scaling_lists_4x4[j][k]; for (int j = 0; j < 2; j ++) for (int k = 0; k < 64; k ++) iq_matrix->ScalingList8x8[j][k] = vdppi->scaling_lists_8x8[j][k]; } static VdpStatus softVdpDecoderRender_h264(VdpDecoder decoder, VdpDecoderData *decoderData, VdpVideoSurfaceData *dstSurfData, VdpPictureInfo const *picture_info, uint32_t bitstream_buffer_count, VdpBitstreamBuffer const *bitstream_buffers) { VdpDeviceData *deviceData = decoderData->device; VADisplay va_dpy = deviceData->va_dpy; VAStatus status; VdpStatus vs, err_code; VdpPictureInfoH264 const *vdppi = (void *)picture_info; // TODO: figure out where to get level uint32_t level = 41; // preparing picture parameters and IQ matrix VABufferID pic_param_buf, iq_matrix_buf; VAPictureParameterBufferH264 pic_param; VAIQMatrixBufferH264 iq_matrix; vs = h264_translate_reference_frames(dstSurfData, decoder, decoderData, &pic_param, vdppi); if (VDP_STATUS_OK != vs) { if (VDP_STATUS_RESOURCES == vs) { traceError("error (softVdpDecoderRender): no surfaces left in buffer\n"); err_code = VDP_STATUS_RESOURCES; } else { err_code = VDP_STATUS_ERROR; } goto quit; } h264_translate_pic_param(&pic_param, decoderData->width, decoderData->height, vdppi, level); h264_translate_iq_matrix(&iq_matrix, vdppi); glx_context_lock(); status = vaCreateBuffer(va_dpy, decoderData->context_id, VAPictureParameterBufferType, sizeof(VAPictureParameterBufferH264), 1, &pic_param, &pic_param_buf); if (VA_STATUS_SUCCESS != status) { glx_context_unlock(); err_code = VDP_STATUS_ERROR; goto quit; } status = vaCreateBuffer(va_dpy, decoderData->context_id, VAIQMatrixBufferType, sizeof(VAIQMatrixBufferH264), 1, &iq_matrix, &iq_matrix_buf); if (VA_STATUS_SUCCESS != status) { glx_context_unlock(); err_code = VDP_STATUS_ERROR; goto quit; } // send data to decoding hardware status = vaBeginPicture(va_dpy, decoderData->context_id, dstSurfData->va_surf); if (VA_STATUS_SUCCESS != status) { glx_context_unlock(); err_code = VDP_STATUS_ERROR; goto quit; } status = vaRenderPicture(va_dpy, decoderData->context_id, &pic_param_buf, 1); if (VA_STATUS_SUCCESS != status) { glx_context_unlock(); err_code = VDP_STATUS_ERROR; goto quit; } status = vaRenderPicture(va_dpy, decoderData->context_id, &iq_matrix_buf, 1); if (VA_STATUS_SUCCESS != status) { glx_context_unlock(); err_code = VDP_STATUS_ERROR; goto quit; } vaDestroyBuffer(va_dpy, pic_param_buf); vaDestroyBuffer(va_dpy, iq_matrix_buf); glx_context_unlock(); // merge bitstream buffers int total_bitstream_bytes = 0; for (unsigned int k = 0; k < bitstream_buffer_count; k ++) total_bitstream_bytes += bitstream_buffers[k].bitstream_bytes; uint8_t *merged_bitstream = malloc(total_bitstream_bytes); if (NULL == merged_bitstream) { err_code = VDP_STATUS_RESOURCES; goto quit; } do { unsigned char *ptr = merged_bitstream; for (unsigned int k = 0; k < bitstream_buffer_count; k ++) { memcpy(ptr, bitstream_buffers[k].bitstream, bitstream_buffers[k].bitstream_bytes); ptr += bitstream_buffers[k].bitstream_bytes; } } while(0); // Slice parameters // All slice data have been merged into one continuous buffer. But we must supply // slices one by one to the hardware decoder, so we need to delimit them. VDPAU // requires bitstream buffers to include slice start code (0x00 0x00 0x01). Those // will be used to calculate offsets and sizes of slice data in code below. rbsp_state_t st_g; // reference, global state rbsp_attach_buffer(&st_g, merged_bitstream, total_bitstream_bytes); int nal_offset = rbsp_navigate_to_nal_unit(&st_g); if (nal_offset < 0) { traceError("error (softVdpDecoderRender): no NAL header\n"); err_code = VDP_STATUS_ERROR; goto quit; } do { VASliceParameterBufferH264 sp_h264; memset(&sp_h264, 0, sizeof(VASliceParameterBufferH264)); // make a copy of global rbsp state for using in slice header parser rbsp_state_t st = rbsp_copy_state(&st_g); rbsp_reset_bit_counter(&st); int nal_offset_next = rbsp_navigate_to_nal_unit(&st_g); // calculate end of current slice. Note (-3). It's slice start code length. const unsigned int end_pos = (nal_offset_next > 0) ? (nal_offset_next - 3) : total_bitstream_bytes; sp_h264.slice_data_size = end_pos - nal_offset; sp_h264.slice_data_offset = 0; sp_h264.slice_data_flag = VA_SLICE_DATA_FLAG_ALL; // TODO: this may be not entirely true for YUV444 // but if we limiting to YUV420, that's ok int ChromaArrayType = pic_param.seq_fields.bits.chroma_format_idc; // parse slice header and use its data to fill slice parameter buffer parse_slice_header(&st, &pic_param, ChromaArrayType, vdppi->num_ref_idx_l0_active_minus1, vdppi->num_ref_idx_l1_active_minus1, &sp_h264); VABufferID slice_parameters_buf; glx_context_lock(); status = vaCreateBuffer(va_dpy, decoderData->context_id, VASliceParameterBufferType, sizeof(VASliceParameterBufferH264), 1, &sp_h264, &slice_parameters_buf); if (VA_STATUS_SUCCESS != status) { glx_context_unlock(); err_code = VDP_STATUS_ERROR; goto quit; } status = vaRenderPicture(va_dpy, decoderData->context_id, &slice_parameters_buf, 1); if (VA_STATUS_SUCCESS != status) { glx_context_unlock(); err_code = VDP_STATUS_ERROR; goto quit; } VABufferID slice_buf; status = vaCreateBuffer(va_dpy, decoderData->context_id, VASliceDataBufferType, sp_h264.slice_data_size, 1, merged_bitstream + nal_offset, &slice_buf); if (VA_STATUS_SUCCESS != status) { glx_context_unlock(); err_code = VDP_STATUS_ERROR; goto quit; } status = vaRenderPicture(va_dpy, decoderData->context_id, &slice_buf, 1); if (VA_STATUS_SUCCESS != status) { glx_context_unlock(); err_code = VDP_STATUS_ERROR; goto quit; } vaDestroyBuffer(va_dpy, slice_parameters_buf); vaDestroyBuffer(va_dpy, slice_buf); glx_context_unlock(); if (nal_offset_next < 0) // nal_offset_next equals -1 when there is no slice break; // start code found. Thus that was the final slice. nal_offset = nal_offset_next; } while (1); glx_context_lock(); status = vaEndPicture(va_dpy, decoderData->context_id); glx_context_unlock(); if (VA_STATUS_SUCCESS != status) { err_code = VDP_STATUS_ERROR; goto quit; } free(merged_bitstream); err_code = VDP_STATUS_OK; quit: return err_code; } VdpStatus softVdpDecoderRender(VdpDecoder decoder, VdpVideoSurface target, VdpPictureInfo const *picture_info, uint32_t bitstream_buffer_count, VdpBitstreamBuffer const *bitstream_buffers) { VdpStatus err_code; if (!picture_info || !bitstream_buffers) return VDP_STATUS_INVALID_POINTER; VdpDecoderData *decoderData = handle_acquire(decoder, HANDLETYPE_DECODER); VdpVideoSurfaceData *dstSurfData = handle_acquire(target, HANDLETYPE_VIDEO_SURFACE); if (NULL == decoderData || NULL == dstSurfData) { err_code = VDP_STATUS_INVALID_HANDLE; goto quit; } if (VDP_DECODER_PROFILE_H264_BASELINE == decoderData->profile || VDP_DECODER_PROFILE_H264_MAIN == decoderData->profile || VDP_DECODER_PROFILE_H264_HIGH == decoderData->profile) { // TODO: check exit code softVdpDecoderRender_h264(decoder, decoderData, dstSurfData, picture_info, bitstream_buffer_count, bitstream_buffers); } else { traceError("error (softVdpDecoderRender): no implementation for profile %s\n", reverse_decoder_profile(decoderData->profile)); err_code = VDP_STATUS_NO_IMPLEMENTATION; goto quit; } err_code = VDP_STATUS_OK; quit: handle_release(decoder); handle_release(target); return err_code; } libvdpau-va-gl-0.2.1/vdpau-entry.c000066400000000000000000000112721224142723700167550ustar00rootroot00000000000000/* * Copyright 2013 Rinat Ibragimov * * This file is part of libvdpau-va-gl * * libvdpau-va-gl is distributed under the terms of the LGPLv3. See COPYING for details. */ #define _XOPEN_SOURCE 500 #define _GNU_SOURCE #include #include #include #include #include #include "handle-storage.h" #include "vdpau-soft.h" #include "vdpau-trace.h" #include "globals.h" #include #include void trc_hk(void *longterm_param, void *shortterm_param, int origin, int after) { (void)longterm_param; (void)origin; int before = !after; if (global.quirks.log_call_duration) { static __thread struct timespec start_ts = {0, 0}; if (before) { clock_gettime(CLOCK_MONOTONIC, &start_ts); } if (after) { struct timespec end_ts; clock_gettime(CLOCK_MONOTONIC, &end_ts); double diff = (end_ts.tv_sec - start_ts.tv_sec) + (end_ts.tv_nsec - start_ts.tv_nsec) / 1.0e9; printf("Duration %7.5f secs, %s, %s\n", diff, reverse_func_id(origin), reverse_status((VdpStatus)shortterm_param)); } } if (before && global.quirks.log_timestamp) { struct timespec now; clock_gettime(CLOCK_REALTIME, &now); printf("%d.%03d ", (int)now.tv_sec, (int)now.tv_nsec/1000000); } if (before && global.quirks.log_thread_id) { printf("[%5d] ", (pid_t)syscall(__NR_gettid)); } } static void initialize_quirks(void) { global.quirks.buggy_XCloseDisplay = 0; global.quirks.show_watermark = 0; global.quirks.log_thread_id = 0; global.quirks.log_call_duration = 0; global.quirks.log_pq_delay = 0; global.quirks.log_timestamp = 0; global.quirks.avoid_va = 0; const char *value = getenv("VDPAU_QUIRKS"); if (!value) return; char *value_lc = strdup(value); if (NULL == value_lc) return; for (int k = 0; value_lc[k] != 0; k ++) value_lc[k] = tolower(value_lc[k]); // tokenize string const char delimiter = ','; char *item_start = value_lc; char *ptr = item_start; while (1) { int last = (0 == *ptr); if (delimiter == *ptr || 0 == *ptr) { *ptr = 0; if (!strcmp("xclosedisplay", item_start)) { global.quirks.buggy_XCloseDisplay = 1; } else if (!strcmp("showwatermark", item_start)) { global.quirks.show_watermark = 1; } else if (!strcmp("logthreadid", item_start)) { global.quirks.log_thread_id = 1; } else if (!strcmp("logcallduration", item_start)) { global.quirks.log_call_duration = 1; } else if (!strcmp("logpqdelay", item_start)) { global.quirks.log_pq_delay = 1; } else if (!strcmp("logtimestamp", item_start)) { global.quirks.log_timestamp = 1; } else if (!strcmp("avoidva", item_start)) { global.quirks.avoid_va = 1; } item_start = ptr + 1; } ptr ++; if (last) break; } free(value_lc); } __attribute__((constructor)) static void library_constructor(void) { handle_initialize_storage(); // Initialize global data pthread_mutex_init(&global.glx_ctx_stack_mutex, NULL); initialize_quirks(); // initialize tracer traceSetTarget(stdout); traceSetHook(trc_hk, NULL); traceInfo("Software VDPAU backend library initialized\n"); #ifdef NDEBUG traceEnableTracing(0); #else traceEnableTracing(1); #endif const char *value = getenv("VDPAU_LOG"); if (value) { // enable tracing when variable present traceEnableTracing(1); char *value_lc = strdup(value); // convert to lowercase for (int k = 0; value_lc[k] != 0; k ++) value_lc[k] = tolower(value_lc[k]); // and disable tracing when variable value equals one of the following values if (!strcmp(value_lc, "0") || !strcmp(value_lc, "false") || !strcmp(value_lc, "off") || !strcmp(value_lc, "disable") || !strcmp(value_lc, "disabled")) { traceEnableTracing(0); } free(value_lc); } } __attribute__((destructor)) static void library_destructor(void) { handle_destory_storage(); } __attribute__ ((visibility("default"))) VdpStatus vdp_imp_device_create_x11(Display *display, int screen, VdpDevice *device, VdpGetProcAddress **get_proc_address) { return traceVdpDeviceCreateX11(display, screen, device, get_proc_address); } libvdpau-va-gl-0.2.1/vdpau-presentation.c000066400000000000000000000447611224142723700203400ustar00rootroot00000000000000/* * Copyright 2013 Rinat Ibragimov * * This file is part of libvdpau-va-gl * * libvdpau-va-gl is distributed under the terms of the LGPLv3. See COPYING for details. */ #define GL_GLEXT_PROTOTYPES #define _XOPEN_SOURCE 500 #include #include #include #include #include #include #include #include "ctx-stack.h" #include "globals.h" #include "handle-storage.h" #include "vdpau-soft.h" #include "vdpau-trace.h" #include "watermark.h" static VdpTime timespec2vdptime(struct timespec t) { return (uint64_t)t.tv_sec * 1000 * 1000 * 1000 + t.tv_nsec; } static struct timespec vdptime2timespec(VdpTime t) { struct timespec res; res.tv_sec = t / (1000*1000*1000); res.tv_nsec = t % (1000*1000*1000); return res; } VdpStatus softVdpPresentationQueueBlockUntilSurfaceIdle(VdpPresentationQueue presentation_queue, VdpOutputSurface surface, VdpTime *first_presentation_time) { if (!first_presentation_time) return VDP_STATUS_INVALID_POINTER; VdpPresentationQueueData *pqData = handle_acquire(presentation_queue, HANDLETYPE_PRESENTATION_QUEUE); if (NULL == pqData) return VDP_STATUS_INVALID_HANDLE; handle_release(presentation_queue); VdpOutputSurfaceData *surfData = handle_acquire(surface, HANDLETYPE_OUTPUT_SURFACE); if (NULL == surfData) return VDP_STATUS_INVALID_HANDLE; // TODO: use locking instead of busy loop while (surfData->status != VDP_PRESENTATION_QUEUE_STATUS_IDLE) { handle_release(surface); usleep(1000); surfData = handle_acquire(surface, HANDLETYPE_OUTPUT_SURFACE); if (!surfData) return VDP_STATUS_ERROR; } *first_presentation_time = surfData->first_presentation_time; handle_release(surface); return VDP_STATUS_OK; } VdpStatus softVdpPresentationQueueQuerySurfaceStatus(VdpPresentationQueue presentation_queue, VdpOutputSurface surface, VdpPresentationQueueStatus *status, VdpTime *first_presentation_time) { if (!status || !first_presentation_time) return VDP_STATUS_INVALID_POINTER; VdpPresentationQueueData *pqData = handle_acquire(presentation_queue, HANDLETYPE_PRESENTATION_QUEUE); if (NULL == pqData) return VDP_STATUS_INVALID_HANDLE; VdpOutputSurfaceData *surfData = handle_acquire(surface, HANDLETYPE_OUTPUT_SURFACE); if (NULL == surfData) { handle_release(presentation_queue); return VDP_STATUS_INVALID_HANDLE; } *status = surfData->status; *first_presentation_time = surfData->first_presentation_time; handle_release(presentation_queue); handle_release(surface); return VDP_STATUS_OK; } static void do_presentation_queue_display(VdpPresentationQueueData *pqData) { pthread_mutex_lock(&pqData->queue_mutex); assert(pqData->queue.used > 0); const int entry = pqData->queue.head; VdpDeviceData *deviceData = pqData->device; VdpOutputSurface surface = pqData->queue.item[entry].surface; const uint32_t clip_width = pqData->queue.item[entry].clip_width; const uint32_t clip_height = pqData->queue.item[entry].clip_height; // remove first entry from queue pqData->queue.used --; pqData->queue.freelist[pqData->queue.head] = pqData->queue.firstfree; pqData->queue.firstfree = pqData->queue.head; pqData->queue.head = pqData->queue.item[pqData->queue.head].next; pthread_mutex_unlock(&pqData->queue_mutex); VdpOutputSurfaceData *surfData = handle_acquire(surface, HANDLETYPE_OUTPUT_SURFACE); if (surfData == NULL) return; glx_context_push_global(deviceData->display, pqData->target->drawable, pqData->target->glc); const uint32_t target_width = (clip_width > 0) ? clip_width : surfData->width; const uint32_t target_height = (clip_height > 0) ? clip_height : surfData->height; glMatrixMode(GL_PROJECTION); glLoadIdentity(); glOrtho(0, target_width, target_height, 0, -1.0, 1.0); glViewport(0, 0, target_width, target_height); glMatrixMode(GL_MODELVIEW); glLoadIdentity(); glMatrixMode(GL_TEXTURE); glLoadIdentity(); glScalef(1.0f/surfData->width, 1.0f/surfData->height, 1.0f); glEnable(GL_TEXTURE_2D); glDisable(GL_BLEND); glBindTexture(GL_TEXTURE_2D, surfData->tex_id); glColor4f(1, 1, 1, 1); glBegin(GL_QUADS); glTexCoord2i(0, 0); glVertex2i(0, 0); glTexCoord2i(target_width, 0); glVertex2i(target_width, 0); glTexCoord2i(target_width, target_height); glVertex2i(target_width, target_height); glTexCoord2i(0, target_height); glVertex2i(0, target_height); glEnd(); if (global.quirks.show_watermark) { glEnable(GL_BLEND); glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); glBlendEquation(GL_FUNC_ADD); glBindTexture(GL_TEXTURE_2D, deviceData->watermark_tex_id); glMatrixMode(GL_TEXTURE); glLoadIdentity(); glColor3f(0.8, 0.08, 0.35); glBegin(GL_QUADS); glTexCoord2i(0, 0); glVertex2i(target_width - watermark_width, target_height - watermark_height); glTexCoord2i(1, 0); glVertex2i(target_width, target_height - watermark_height); glTexCoord2i(1, 1); glVertex2i(target_width, target_height); glTexCoord2i(0, 1); glVertex2i(target_width - watermark_width, target_height); glEnd(); } glXSwapBuffers(deviceData->display, pqData->target->drawable); struct timespec now; clock_gettime(CLOCK_REALTIME, &now); surfData->first_presentation_time = timespec2vdptime(now); surfData->status = VDP_PRESENTATION_QUEUE_STATUS_IDLE; if (global.quirks.log_pq_delay) { const int64_t delta = timespec2vdptime(now) - surfData->queued_at; const struct timespec delta_ts = vdptime2timespec(delta); traceInfo("pqdelay %d.%09d %d.%09d\n", (int)now.tv_sec, (int)now.tv_nsec, delta_ts.tv_sec, delta_ts.tv_nsec); } GLenum gl_error = glGetError(); glx_context_pop(); handle_release(surface); if (GL_NO_ERROR != gl_error) { traceError("error (VdpPresentationQueueDisplay): gl error %d\n", gl_error); } } static void * presentation_thread(void *param) { pthread_mutex_t cond_mutex = PTHREAD_MUTEX_INITIALIZER; pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, NULL); VdpPresentationQueue presentation_queue = (VdpPresentationQueue)(size_t)param; VdpPresentationQueueData *pqData = handle_acquire(presentation_queue, HANDLETYPE_PRESENTATION_QUEUE); if (NULL == pqData) return NULL; pthread_mutex_lock(&cond_mutex); while (1) { struct timespec now; clock_gettime(CLOCK_REALTIME, &now); struct timespec target_time = now; while (1) { int ret; handle_release(presentation_queue); pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL); ret = pthread_cond_timedwait(&pqData->new_work_available, &cond_mutex, &target_time); if (ret != 0 && ret != ETIMEDOUT) { traceError("%s: pthread_cond_timedwait failed with code %d\n", __func__, ret); goto quit; } struct timespec now; clock_gettime(CLOCK_REALTIME, &now); pqData = handle_acquire(presentation_queue, HANDLETYPE_PRESENTATION_QUEUE); if (!pqData) goto quit; pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, NULL); pthread_mutex_lock(&pqData->queue_mutex); if (pqData->queue.head != -1) { struct timespec ht = vdptime2timespec(pqData->queue.item[pqData->queue.head].t); if (now.tv_sec > ht.tv_sec || (now.tv_sec == ht.tv_sec && now.tv_nsec > ht.tv_nsec)) { // break loop and process event pthread_mutex_unlock(&pqData->queue_mutex); break; } else { // sleep until next event target_time = ht; } } else { // queue empty, no work to do. Wait for next event target_time = now; target_time.tv_sec += 1; } pthread_mutex_unlock(&pqData->queue_mutex); } // do event processing pthread_mutex_unlock(&pqData->queue_mutex); do_presentation_queue_display(pqData); } quit: return NULL; } VdpStatus softVdpPresentationQueueCreate(VdpDevice device, VdpPresentationQueueTarget presentation_queue_target, VdpPresentationQueue *presentation_queue) { if (!presentation_queue) return VDP_STATUS_INVALID_POINTER; VdpDeviceData *deviceData = handle_acquire(device, HANDLETYPE_DEVICE); if (NULL == deviceData) return VDP_STATUS_INVALID_HANDLE; VdpPresentationQueueTargetData *targetData = handle_acquire(presentation_queue_target, HANDLETYPE_PRESENTATION_QUEUE_TARGET); if (NULL == targetData) { handle_release(device); return VDP_STATUS_INVALID_HANDLE; } VdpPresentationQueueData *data = calloc(1, sizeof(VdpPresentationQueueData)); if (NULL == data) { handle_release(device); handle_release(presentation_queue_target); return VDP_STATUS_RESOURCES; } data->type = HANDLETYPE_PRESENTATION_QUEUE; data->device = deviceData; data->target = targetData; data->bg_color.red = 0.0; data->bg_color.green = 0.0; data->bg_color.blue = 0.0; data->bg_color.alpha = 0.0; deviceData->refcount ++; targetData->refcount ++; *presentation_queue = handle_insert(data); // initialize queue data->queue.head = -1; data->queue.used = 0; for (unsigned int k = 0; k < PRESENTATION_QUEUE_LENGTH; k ++) { data->queue.item[k].next = -1; // other fields are zero due to calloc } for (unsigned int k = 0; k < PRESENTATION_QUEUE_LENGTH - 1; k ++) data->queue.freelist[k] = k + 1; data->queue.freelist[PRESENTATION_QUEUE_LENGTH - 1] = -1; data->queue.firstfree = 0; pthread_mutex_init(&data->queue_mutex, NULL); pthread_cond_init(&data->new_work_available, NULL); // launch worker thread pthread_create(&data->worker_thread, NULL, presentation_thread, (void *)(size_t)(*presentation_queue)); handle_release(device); handle_release(presentation_queue_target); return VDP_STATUS_OK; } VdpStatus softVdpPresentationQueueDestroy(VdpPresentationQueue presentation_queue) { VdpPresentationQueueData *pqData = handle_acquire(presentation_queue, HANDLETYPE_PRESENTATION_QUEUE); if (NULL == pqData) return VDP_STATUS_INVALID_HANDLE; pthread_cancel(pqData->worker_thread); if (0 != pthread_join(pqData->worker_thread, NULL)) { traceError("VdpPresentationQueueDestroy: failed to stop worker thread"); handle_release(presentation_queue); return VDP_STATUS_ERROR; } handle_expunge(presentation_queue); pqData->device->refcount --; pqData->target->refcount --; free(pqData); return VDP_STATUS_OK; } VdpStatus softVdpPresentationQueueSetBackgroundColor(VdpPresentationQueue presentation_queue, VdpColor *const background_color) { VdpPresentationQueueData *pqData = handle_acquire(presentation_queue, HANDLETYPE_PRESENTATION_QUEUE); if (NULL == pqData) return VDP_STATUS_INVALID_HANDLE; if (background_color) { pqData->bg_color = *background_color; } else { pqData->bg_color.red = 0.0; pqData->bg_color.green = 0.0; pqData->bg_color.blue = 0.0; pqData->bg_color.alpha = 0.0; } handle_release(presentation_queue); return VDP_STATUS_OK; } VdpStatus softVdpPresentationQueueGetBackgroundColor(VdpPresentationQueue presentation_queue, VdpColor *background_color) { if (!background_color) return VDP_STATUS_INVALID_POINTER; VdpPresentationQueueData *pqData = handle_acquire(presentation_queue, HANDLETYPE_PRESENTATION_QUEUE); if (NULL == pqData) return VDP_STATUS_INVALID_HANDLE; *background_color = pqData->bg_color; handle_release(presentation_queue); return VDP_STATUS_OK; } VdpStatus softVdpPresentationQueueGetTime(VdpPresentationQueue presentation_queue, VdpTime *current_time) { if (!current_time) return VDP_STATUS_INVALID_POINTER; (void)presentation_queue; struct timespec now; clock_gettime(CLOCK_REALTIME, &now); *current_time = timespec2vdptime(now); return VDP_STATUS_OK; } VdpStatus softVdpPresentationQueueDisplay(VdpPresentationQueue presentation_queue, VdpOutputSurface surface, uint32_t clip_width, uint32_t clip_height, VdpTime earliest_presentation_time) { VdpPresentationQueueData *pqData = handle_acquire(presentation_queue, HANDLETYPE_PRESENTATION_QUEUE); if (NULL == pqData) return VDP_STATUS_INVALID_HANDLE; // push work to queue pthread_mutex_lock(&pqData->queue_mutex); while (pqData->queue.used >= PRESENTATION_QUEUE_LENGTH) { // wait while queue is full // TODO: check for deadlock here // TODO: is there a way to drop pqData->queue_mutex, and use only pqData->lock? pthread_mutex_unlock(&pqData->queue_mutex); handle_release(presentation_queue); usleep(10*1000); pqData = handle_acquire(presentation_queue, HANDLETYPE_PRESENTATION_QUEUE); pthread_mutex_lock(&pqData->queue_mutex); } VdpOutputSurfaceData *surfData = handle_acquire(surface, HANDLETYPE_OUTPUT_SURFACE); if (NULL == surfData) { pthread_mutex_unlock(&pqData->queue_mutex); handle_release(presentation_queue); return VDP_STATUS_INVALID_HANDLE; } if (pqData->device != surfData->device) { handle_release(surface); handle_release(presentation_queue); return VDP_STATUS_HANDLE_DEVICE_MISMATCH; } pqData->queue.used ++; int new_item = pqData->queue.firstfree; assert(new_item != -1); pqData->queue.firstfree = pqData->queue.freelist[new_item]; pqData->queue.item[new_item].t = earliest_presentation_time; pqData->queue.item[new_item].clip_width = clip_width; pqData->queue.item[new_item].clip_height = clip_height; pqData->queue.item[new_item].surface = surface; surfData->first_presentation_time = 0; surfData->status = VDP_PRESENTATION_QUEUE_STATUS_QUEUED; // keep queue sorted if (pqData->queue.head == -1 || earliest_presentation_time < pqData->queue.item[pqData->queue.head].t) { pqData->queue.item[new_item].next = pqData->queue.head; pqData->queue.head = new_item; } else { int ptr = pqData->queue.head; int prev = ptr; while (ptr != -1 && pqData->queue.item[ptr].t <= earliest_presentation_time) { prev = ptr; ptr = pqData->queue.item[ptr].next; } pqData->queue.item[new_item].next = ptr; pqData->queue.item[prev].next = new_item; } pthread_mutex_unlock(&pqData->queue_mutex); if (global.quirks.log_pq_delay) { struct timespec now; clock_gettime(CLOCK_REALTIME, &now); surfData->queued_at = timespec2vdptime(now); } pthread_cond_broadcast(&pqData->new_work_available); handle_release(presentation_queue); handle_release(surface); return VDP_STATUS_OK; } VdpStatus softVdpPresentationQueueTargetCreateX11(VdpDevice device, Drawable drawable, VdpPresentationQueueTarget *target) { if (!target) return VDP_STATUS_INVALID_POINTER; VdpDeviceData *deviceData = handle_acquire(device, HANDLETYPE_DEVICE); if (NULL == deviceData) return VDP_STATUS_INVALID_HANDLE; VdpPresentationQueueTargetData *data = calloc(1, sizeof(VdpPresentationQueueTargetData)); if (NULL == data) { handle_release(device); return VDP_STATUS_RESOURCES; } data->type = HANDLETYPE_PRESENTATION_QUEUE_TARGET; data->device = deviceData; data->drawable = drawable; data->refcount = 0; pthread_mutex_lock(&global.glx_ctx_stack_mutex); GLint att[] = { GLX_RGBA, GLX_DEPTH_SIZE, 24, GLX_DOUBLEBUFFER, None }; XVisualInfo *vi; vi = glXChooseVisual(deviceData->display, deviceData->screen, att); if (NULL == vi) { traceError("error (softVdpPresentationQueueTargetCreateX11): glXChooseVisual failed\n"); free(data); pthread_mutex_unlock(&global.glx_ctx_stack_mutex); handle_release(device); return VDP_STATUS_ERROR; } // create context for dislaying result (can share display lists with deviceData->glc data->glc = glXCreateContext(deviceData->display, vi, deviceData->root_glc, GL_TRUE); deviceData->refcount ++; *target = handle_insert(data); pthread_mutex_unlock(&global.glx_ctx_stack_mutex); handle_release(device); return VDP_STATUS_OK; } VdpStatus softVdpPresentationQueueTargetDestroy(VdpPresentationQueueTarget presentation_queue_target) { VdpPresentationQueueTargetData *pqTargetData = handle_acquire(presentation_queue_target, HANDLETYPE_PRESENTATION_QUEUE_TARGET); if (NULL == pqTargetData) return VDP_STATUS_INVALID_HANDLE; VdpDeviceData *deviceData = pqTargetData->device; if (0 != pqTargetData->refcount) { traceError("warning (softVdpPresentationQueueTargetDestroy): non-zero reference" "count (%d)\n", pqTargetData->refcount); handle_release(presentation_queue_target); return VDP_STATUS_ERROR; } // drawable may be destroyed already, so one should activate global context glx_context_push_thread_local(deviceData); glXDestroyContext(deviceData->display, pqTargetData->glc); GLenum gl_error = glGetError(); glx_context_pop(); if (GL_NO_ERROR != gl_error) { traceError("error (VdpPresentationQueueTargetDestroy): gl error %d\n", gl_error); handle_release(presentation_queue_target); return VDP_STATUS_ERROR; } deviceData->refcount --; handle_expunge(presentation_queue_target); free(pqTargetData); return VDP_STATUS_OK; } libvdpau-va-gl-0.2.1/vdpau-soft.c000066400000000000000000002463011224142723700165720ustar00rootroot00000000000000/* * Copyright 2013 Rinat Ibragimov * * This file is part of libvdpau-va-gl * * libvdpau-va-gl is distributed under the terms of the LGPLv3. See COPYING for details. */ #define _XOPEN_SOURCE #define GL_GLEXT_PROTOTYPES #include #include #include #include #include #include #include #include #include #include #include #include "bitstream.h" #include "ctx-stack.h" #include "h264-parse.h" #include "reverse-constant.h" #include "handle-storage.h" #include "vdpau-trace.h" #include "watermark.h" #include "globals.h" #define DESCRIBE(xparam, format) fprintf(stderr, #xparam " = %" #format "\n", xparam) static char const * implemetation_description_string = "OpenGL/VAAPI/libswscale backend for VDPAU"; static uint32_t chroma_storage_size_divider(VdpChromaType chroma_type) { switch (chroma_type) { case VDP_CHROMA_TYPE_420: return 4; case VDP_CHROMA_TYPE_422: return 2; case VDP_CHROMA_TYPE_444: return 1; default: return 1; } } static const char * softVdpGetErrorString(VdpStatus status) { return reverse_status(status); } VdpStatus softVdpGetApiVersion(uint32_t *api_version) { if (!api_version) return VDP_STATUS_INVALID_POINTER; *api_version = VDPAU_VERSION; return VDP_STATUS_OK; } VdpStatus softVdpOutputSurfaceQueryCapabilities(VdpDevice device, VdpRGBAFormat surface_rgba_format, VdpBool *is_supported, uint32_t *max_width, uint32_t *max_height) { VdpStatus err_code; if (!is_supported || !max_width || !max_height) return VDP_STATUS_INVALID_POINTER; VdpDeviceData *deviceData = handle_acquire(device, HANDLETYPE_DEVICE); if (NULL == deviceData) return VDP_STATUS_INVALID_HANDLE; switch (surface_rgba_format) { case VDP_RGBA_FORMAT_B8G8R8A8: case VDP_RGBA_FORMAT_R8G8B8A8: case VDP_RGBA_FORMAT_R10G10B10A2: case VDP_RGBA_FORMAT_B10G10R10A2: case VDP_RGBA_FORMAT_A8: *is_supported = 1; // All these formats should be supported by OpenGL break; // implementation. default: *is_supported = 0; break; } GLint max_texture_size; glGetIntegerv(GL_MAX_TEXTURE_SIZE, &max_texture_size); GLenum gl_error = glGetError(); if (GL_NO_ERROR != gl_error) { traceError("error (VdpOutputSurfaceQueryCapabilities): gl error %d\n", gl_error); err_code = VDP_STATUS_ERROR; goto quit; } *max_width = max_texture_size; *max_height = max_texture_size; err_code = VDP_STATUS_OK; quit: handle_release(device); return err_code; } VdpStatus softVdpOutputSurfaceQueryGetPutBitsNativeCapabilities(VdpDevice device, VdpRGBAFormat surface_rgba_format, VdpBool *is_supported) { (void)device; (void)surface_rgba_format; (void)is_supported; return VDP_STATUS_NO_IMPLEMENTATION; } VdpStatus softVdpOutputSurfaceQueryPutBitsIndexedCapabilities(VdpDevice device, VdpRGBAFormat surface_rgba_format, VdpIndexedFormat bits_indexed_format, VdpColorTableFormat color_table_format, VdpBool *is_supported) { (void)device; (void)surface_rgba_format; (void)bits_indexed_format; (void)color_table_format; (void)is_supported; return VDP_STATUS_NO_IMPLEMENTATION; } VdpStatus softVdpOutputSurfaceQueryPutBitsYCbCrCapabilities(VdpDevice device, VdpRGBAFormat surface_rgba_format, VdpYCbCrFormat bits_ycbcr_format, VdpBool *is_supported) { (void)device; (void)surface_rgba_format; (void)bits_ycbcr_format; (void)is_supported; return VDP_STATUS_NO_IMPLEMENTATION; } VdpStatus softVdpOutputSurfaceCreate(VdpDevice device, VdpRGBAFormat rgba_format, uint32_t width, uint32_t height, VdpOutputSurface *surface) { VdpStatus err_code; if (!surface) return VDP_STATUS_INVALID_POINTER; VdpDeviceData *deviceData = handle_acquire(device, HANDLETYPE_DEVICE); if (NULL == deviceData) return VDP_STATUS_INVALID_HANDLE; //TODO: figure out reasonable limits if (width > 4096 || height > 4096) { err_code = VDP_STATUS_INVALID_SIZE; goto quit; } VdpOutputSurfaceData *data = calloc(1, sizeof(VdpOutputSurfaceData)); if (NULL == data) { err_code = VDP_STATUS_RESOURCES; goto quit; } switch (rgba_format) { case VDP_RGBA_FORMAT_B8G8R8A8: data->gl_internal_format = GL_RGBA; data->gl_format = GL_BGRA; data->gl_type = GL_UNSIGNED_BYTE; data->bytes_per_pixel = 4; break; case VDP_RGBA_FORMAT_R8G8B8A8: data->gl_internal_format = GL_RGBA; data->gl_format = GL_RGBA; data->gl_type = GL_UNSIGNED_BYTE; data->bytes_per_pixel = 4; break; case VDP_RGBA_FORMAT_R10G10B10A2: data->gl_internal_format = GL_RGB10_A2; data->gl_format = GL_RGBA; data->gl_type = GL_UNSIGNED_INT_10_10_10_2; data->bytes_per_pixel = 4; break; case VDP_RGBA_FORMAT_B10G10R10A2: data->gl_internal_format = GL_RGB10_A2; data->gl_format = GL_BGRA; data->gl_type = GL_UNSIGNED_INT_10_10_10_2; data->bytes_per_pixel = 4; break; case VDP_RGBA_FORMAT_A8: data->gl_internal_format = GL_RGBA; data->gl_format = GL_RED; data->gl_type = GL_UNSIGNED_BYTE; data->bytes_per_pixel = 1; break; default: traceError("error (VdpOutputSurfaceCreate): %s is not implemented\n", reverse_rgba_format(rgba_format)); free(data); err_code = VDP_STATUS_INVALID_RGBA_FORMAT; goto quit; } data->type = HANDLETYPE_OUTPUT_SURFACE; data->width = width; data->height = height; data->device = deviceData; data->rgba_format = rgba_format; glx_context_push_thread_local(deviceData); glGenTextures(1, &data->tex_id); glBindTexture(GL_TEXTURE_2D, data->tex_id); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); // reserve texture glTexImage2D(GL_TEXTURE_2D, 0, data->gl_internal_format, width, height, 0, data->gl_format, data->gl_type, NULL); glGenFramebuffers(1, &data->fbo_id); glBindFramebuffer(GL_FRAMEBUFFER, data->fbo_id); glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, data->tex_id, 0); GLenum gl_status = glCheckFramebufferStatus(GL_FRAMEBUFFER); if (GL_FRAMEBUFFER_COMPLETE != gl_status) { traceError("error (VdpOutputSurfaceCreate): " "framebuffer not ready, %d, %s\n", gl_status, gluErrorString(gl_status)); glx_context_pop(); free(data); err_code = VDP_STATUS_ERROR; goto quit; } glClearColor(0.0, 0.0, 0.0, 0.0); glClear(GL_COLOR_BUFFER_BIT); glFinish(); GLenum gl_error = glGetError(); glx_context_pop(); if (GL_NO_ERROR != gl_error) { traceError("error (VdpOutputSurfaceCreate): gl error %d\n", gl_error); free(data); err_code = VDP_STATUS_ERROR; goto quit; } deviceData->refcount ++; *surface = handle_insert(data); err_code = VDP_STATUS_OK; quit: handle_release(device); return err_code; } VdpStatus softVdpOutputSurfaceDestroy(VdpOutputSurface surface) { VdpStatus err_code; VdpOutputSurfaceData *data = handle_acquire(surface, HANDLETYPE_OUTPUT_SURFACE); if (NULL == data) return VDP_STATUS_INVALID_HANDLE; VdpDeviceData *deviceData = data->device; glx_context_push_thread_local(deviceData); glDeleteTextures(1, &data->tex_id); glDeleteFramebuffers(1, &data->fbo_id); GLenum gl_error = glGetError(); glx_context_pop(); if (GL_NO_ERROR != gl_error) { traceError("error (VdpOutputSurfaceDestroy): gl error %d\n", gl_error); err_code = VDP_STATUS_ERROR; goto quit; } handle_expunge(surface); deviceData->refcount --; free(data); return VDP_STATUS_OK; quit: handle_release(surface); return err_code; } VdpStatus softVdpOutputSurfaceGetParameters(VdpOutputSurface surface, VdpRGBAFormat *rgba_format, uint32_t *width, uint32_t *height) { if (!rgba_format || !width || !height) return VDP_STATUS_INVALID_POINTER; VdpOutputSurfaceData *surfData = handle_acquire(surface, HANDLETYPE_OUTPUT_SURFACE); if (NULL == surfData) return VDP_STATUS_INVALID_HANDLE; // TODO: check surfData validity again *rgba_format = surfData->rgba_format; *width = surfData->width; *height = surfData->height; handle_release(surface); return VDP_STATUS_OK; } VdpStatus softVdpOutputSurfaceGetBitsNative(VdpOutputSurface surface, VdpRect const *source_rect, void *const *destination_data, uint32_t const *destination_pitches) { VdpStatus err_code; if (!destination_data || !destination_pitches) return VDP_STATUS_INVALID_POINTER; VdpOutputSurfaceData *srcSurfData = handle_acquire(surface, HANDLETYPE_OUTPUT_SURFACE); if (NULL == srcSurfData) return VDP_STATUS_INVALID_HANDLE; VdpDeviceData *deviceData = srcSurfData->device; VdpRect srcRect = {0, 0, srcSurfData->width, srcSurfData->height}; if (source_rect) srcRect = *source_rect; glx_context_push_thread_local(deviceData); glBindFramebuffer(GL_FRAMEBUFFER, srcSurfData->fbo_id); glReadBuffer(GL_COLOR_ATTACHMENT0); glPixelStorei(GL_UNPACK_ROW_LENGTH, destination_pitches[0] / srcSurfData->bytes_per_pixel); if (4 != srcSurfData->bytes_per_pixel) glPixelStorei(GL_PACK_ALIGNMENT, 1); glReadPixels(srcRect.x0, srcRect.y0, srcRect.x1 - srcRect.x0, srcRect.y1 - srcRect.y0, srcSurfData->gl_format, srcSurfData->gl_type, destination_data[0]); glPixelStorei(GL_UNPACK_ROW_LENGTH, 0); if (4 != srcSurfData->bytes_per_pixel) glPixelStorei(GL_PACK_ALIGNMENT, 4); glFinish(); GLenum gl_error = glGetError(); glx_context_pop(); if (GL_NO_ERROR != gl_error) { traceError("error (VdpOutputSurfaceGetBitsNative): gl error %d\n", gl_error); err_code = VDP_STATUS_ERROR; goto quit; } err_code = VDP_STATUS_OK; quit: handle_release(surface); return err_code; } VdpStatus softVdpOutputSurfacePutBitsNative(VdpOutputSurface surface, void const *const *source_data, uint32_t const *source_pitches, VdpRect const *destination_rect) { VdpStatus err_code; if (!source_data || !source_pitches) return VDP_STATUS_INVALID_POINTER; VdpOutputSurfaceData *dstSurfData = handle_acquire(surface, HANDLETYPE_OUTPUT_SURFACE); if (NULL == dstSurfData) return VDP_STATUS_INVALID_HANDLE; VdpDeviceData *deviceData = dstSurfData->device; VdpRect dstRect = {0, 0, dstSurfData->width, dstSurfData->height}; if (destination_rect) dstRect = *destination_rect; glx_context_push_thread_local(deviceData); glBindTexture(GL_TEXTURE_2D, dstSurfData->tex_id); glPixelStorei(GL_UNPACK_ROW_LENGTH, source_pitches[0] / dstSurfData->bytes_per_pixel); if (4 != dstSurfData->bytes_per_pixel) glPixelStorei(GL_UNPACK_ALIGNMENT, 1); glTexSubImage2D(GL_TEXTURE_2D, 0, dstRect.x0, dstRect.y0, dstRect.x1 - dstRect.x0, dstRect.y1 - dstRect.y0, dstSurfData->gl_format, dstSurfData->gl_type, source_data[0]); glPixelStorei(GL_UNPACK_ROW_LENGTH, 0); if (4 != dstSurfData->bytes_per_pixel) glPixelStorei(GL_UNPACK_ALIGNMENT, 4); glFinish(); GLenum gl_error = glGetError(); glx_context_pop(); if (GL_NO_ERROR != gl_error) { traceError("error (VdpOutputSurfacePutBitsNative): gl error %d\n", gl_error); err_code = VDP_STATUS_ERROR; goto quit; } err_code = VDP_STATUS_OK; quit: handle_release(surface); return err_code; } VdpStatus softVdpOutputSurfacePutBitsIndexed(VdpOutputSurface surface, VdpIndexedFormat source_indexed_format, void const *const *source_data, uint32_t const *source_pitch, VdpRect const *destination_rect, VdpColorTableFormat color_table_format, void const *color_table) { VdpStatus err_code; if (!source_data || !source_pitch || !color_table) return VDP_STATUS_INVALID_POINTER; VdpOutputSurfaceData *surfData = handle_acquire(surface, HANDLETYPE_OUTPUT_SURFACE); if (NULL == surfData) return VDP_STATUS_INVALID_HANDLE; VdpDeviceData *deviceData = surfData->device; VdpRect dstRect = {0, 0, surfData->width, surfData->height}; if (destination_rect) dstRect = *destination_rect; // there is no other formats anyway if (VDP_COLOR_TABLE_FORMAT_B8G8R8X8 != color_table_format) { err_code = VDP_STATUS_INVALID_COLOR_TABLE_FORMAT; goto quit; } const uint32_t *color_table32 = color_table; glx_context_push_thread_local(deviceData); switch (source_indexed_format) { case VDP_INDEXED_FORMAT_I8A8: // TODO: use shader? do { const uint32_t dstRectWidth = dstRect.x1 - dstRect.x0; const uint32_t dstRectHeight = dstRect.y1 - dstRect.y0; uint32_t *unpacked_buf = malloc(4 * dstRectWidth * dstRectHeight); if (NULL == unpacked_buf) { err_code = VDP_STATUS_RESOURCES; goto quit; } for (unsigned int y = 0; y < dstRectHeight; y ++) { const uint8_t *src_ptr = source_data[0]; src_ptr += y * source_pitch[0]; uint32_t *dst_ptr = unpacked_buf + y * dstRectWidth; for (unsigned int x = 0; x < dstRectWidth; x ++) { const uint8_t i = *src_ptr++; const uint32_t a = (*src_ptr++) << 24; dst_ptr[x] = (color_table32[i] & 0x00ffffff) + a; } } glBindTexture(GL_TEXTURE_2D, surfData->tex_id); glTexSubImage2D(GL_TEXTURE_2D, 0, dstRect.x0, dstRect.y0, dstRect.x1 - dstRect.x0, dstRect.y1 - dstRect.y0, GL_BGRA, GL_UNSIGNED_BYTE, unpacked_buf); glFinish(); free(unpacked_buf); GLenum gl_error = glGetError(); glx_context_pop(); if (GL_NO_ERROR != gl_error) { traceError("error (VdpOutputSurfacePutBitsIndexed): gl error %d\n", gl_error); err_code = VDP_STATUS_ERROR; goto quit; } err_code = VDP_STATUS_OK; goto quit; } while (0); break; default: traceError("error (VdpOutputSurfacePutBitsIndexed): unsupported indexed format %s\n", reverse_indexed_format(source_indexed_format)); err_code = VDP_STATUS_INVALID_INDEXED_FORMAT; goto quit; } quit: handle_release(surface); return err_code; } VdpStatus softVdpOutputSurfacePutBitsYCbCr(VdpOutputSurface surface, VdpYCbCrFormat source_ycbcr_format, void const *const *source_data, uint32_t const *source_pitches, VdpRect const *destination_rect, VdpCSCMatrix const *csc_matrix) { (void)surface; (void)source_ycbcr_format; (void)source_data; (void)source_pitches; (void)destination_rect; (void)csc_matrix; return VDP_STATUS_NO_IMPLEMENTATION; } VdpStatus softVdpVideoMixerQueryFeatureSupport(VdpDevice device, VdpVideoMixerFeature feature, VdpBool *is_supported) { (void)device; (void)feature; (void)is_supported; return VDP_STATUS_NO_IMPLEMENTATION; } VdpStatus softVdpVideoMixerQueryParameterSupport(VdpDevice device, VdpVideoMixerParameter parameter, VdpBool *is_supported) { (void)device; (void)parameter; (void)is_supported; return VDP_STATUS_NO_IMPLEMENTATION; } VdpStatus softVdpVideoMixerQueryAttributeSupport(VdpDevice device, VdpVideoMixerAttribute attribute, VdpBool *is_supported) { (void)device; (void)attribute; (void)is_supported; return VDP_STATUS_NO_IMPLEMENTATION; } VdpStatus softVdpVideoMixerQueryParameterValueRange(VdpDevice device, VdpVideoMixerParameter parameter, void *min_value, void *max_value) { (void)device; (void)parameter; (void)min_value; (void)max_value; return VDP_STATUS_NO_IMPLEMENTATION; } VdpStatus softVdpVideoMixerQueryAttributeValueRange(VdpDevice device, VdpVideoMixerAttribute attribute, void *min_value, void *max_value) { (void)device; (void)attribute; (void)min_value; (void)max_value; return VDP_STATUS_NO_IMPLEMENTATION; } VdpStatus softVdpVideoMixerCreate(VdpDevice device, uint32_t feature_count, VdpVideoMixerFeature const *features, uint32_t parameter_count, VdpVideoMixerParameter const *parameters, void const *const *parameter_values, VdpVideoMixer *mixer) { VdpStatus err_code; if (!mixer) return VDP_STATUS_INVALID_POINTER; (void)feature_count; (void)features; // TODO: mixer features (void)parameter_count; (void)parameters; (void)parameter_values; // TODO: mixer parameters VdpDeviceData *deviceData = handle_acquire(device, HANDLETYPE_DEVICE); if (NULL == deviceData) return VDP_STATUS_INVALID_HANDLE; VdpVideoMixerData *data = calloc(1, sizeof(VdpVideoMixerData)); if (NULL == data) { err_code = VDP_STATUS_RESOURCES; goto quit; } data->type = HANDLETYPE_VIDEO_MIXER; data->device = deviceData; deviceData->refcount ++; *mixer = handle_insert(data); err_code = VDP_STATUS_OK; quit: handle_release(device); return err_code; } VdpStatus softVdpVideoMixerSetFeatureEnables(VdpVideoMixer mixer, uint32_t feature_count, VdpVideoMixerFeature const *features, VdpBool const *feature_enables) { (void)mixer; (void)feature_count; (void)features; (void)feature_enables; return VDP_STATUS_OK; } VdpStatus softVdpVideoMixerSetAttributeValues(VdpVideoMixer mixer, uint32_t attribute_count, VdpVideoMixerAttribute const *attributes, void const *const *attribute_values) { (void)mixer; (void)attribute_count; (void)attributes; (void)attribute_values; return VDP_STATUS_OK; } VdpStatus softVdpVideoMixerGetFeatureSupport(VdpVideoMixer mixer, uint32_t feature_count, VdpVideoMixerFeature const *features, VdpBool *feature_supports) { (void)mixer; (void)feature_count; (void)features; (void)feature_supports; return VDP_STATUS_NO_IMPLEMENTATION; } VdpStatus softVdpVideoMixerGetFeatureEnables(VdpVideoMixer mixer, uint32_t feature_count, VdpVideoMixerFeature const *features, VdpBool *feature_enables) { (void)mixer; (void)feature_count; (void)features; (void)feature_enables; return VDP_STATUS_NO_IMPLEMENTATION; } VdpStatus softVdpVideoMixerGetParameterValues(VdpVideoMixer mixer, uint32_t parameter_count, VdpVideoMixerParameter const *parameters, void *const *parameter_values) { (void)mixer; (void)parameter_count; (void)parameters; (void)parameter_values; return VDP_STATUS_NO_IMPLEMENTATION; } VdpStatus softVdpVideoMixerGetAttributeValues(VdpVideoMixer mixer, uint32_t attribute_count, VdpVideoMixerAttribute const *attributes, void *const *attribute_values) { (void)mixer; (void)attribute_count; (void)attributes; (void)attribute_values; return VDP_STATUS_NO_IMPLEMENTATION; } VdpStatus softVdpVideoMixerDestroy(VdpVideoMixer mixer) { VdpVideoMixerData *videoMixerData = handle_acquire(mixer, HANDLETYPE_VIDEO_MIXER); if (NULL == videoMixerData) return VDP_STATUS_INVALID_HANDLE; VdpDeviceData *deviceData = videoMixerData->device; deviceData->refcount --; handle_expunge(mixer); free(videoMixerData); return VDP_STATUS_OK; } VdpStatus softVdpVideoMixerRender(VdpVideoMixer mixer, VdpOutputSurface background_surface, VdpRect const *background_source_rect, VdpVideoMixerPictureStructure current_picture_structure, uint32_t video_surface_past_count, VdpVideoSurface const *video_surface_past, VdpVideoSurface video_surface_current, uint32_t video_surface_future_count, VdpVideoSurface const *video_surface_future, VdpRect const *video_source_rect, VdpOutputSurface destination_surface, VdpRect const *destination_rect, VdpRect const *destination_video_rect, uint32_t layer_count, VdpLayer const *layers) { VdpStatus err_code; (void)mixer; // TODO: mixer should be used to get mixing parameters // TODO: current implementation ignores previous and future surfaces, using only current. // Is that acceptable for interlaced video? Will VAAPI handle deinterlacing? (void)background_surface; // TODO: background_surface. Is it safe to just ignore it? (void)background_source_rect; (void)current_picture_structure; (void)video_surface_past_count; (void)video_surface_past; (void)video_surface_future_count; (void)video_surface_future; (void)layer_count; (void)layers; VdpVideoSurfaceData *srcSurfData = handle_acquire(video_surface_current, HANDLETYPE_VIDEO_SURFACE); VdpOutputSurfaceData *dstSurfData = handle_acquire(destination_surface, HANDLETYPE_OUTPUT_SURFACE); if (NULL == srcSurfData || NULL == dstSurfData) { err_code = VDP_STATUS_INVALID_HANDLE; goto quit; } if (srcSurfData->device != dstSurfData->device) { err_code = VDP_STATUS_HANDLE_DEVICE_MISMATCH; goto quit; } VdpDeviceData *deviceData = srcSurfData->device; VdpRect srcVideoRect = {0, 0, srcSurfData->width, srcSurfData->height}; if (video_source_rect) srcVideoRect = *video_source_rect; VdpRect dstRect = {0, 0, dstSurfData->width, dstSurfData->height}; if (destination_rect) dstRect = *destination_rect; VdpRect dstVideoRect = srcVideoRect; if (destination_video_rect) dstVideoRect = *destination_video_rect; // TODO: dstRect should clip dstVideoRect glx_context_push_thread_local(deviceData); if (deviceData->va_available) { VAStatus status; if (NULL == srcSurfData->va_glx) { status = vaCreateSurfaceGLX(deviceData->va_dpy, GL_TEXTURE_2D, srcSurfData->tex_id, &srcSurfData->va_glx); if (VA_STATUS_SUCCESS != status) { glx_context_pop(); err_code = VDP_STATUS_ERROR; goto quit; } } status = vaCopySurfaceGLX(deviceData->va_dpy, srcSurfData->va_glx, srcSurfData->va_surf, 0); // TODO: check result of previous call glBindFramebuffer(GL_FRAMEBUFFER, dstSurfData->fbo_id); glMatrixMode(GL_PROJECTION); glLoadIdentity(); glOrtho(0, dstSurfData->width, 0, dstSurfData->height, -1.0f, 1.0f); glViewport(0, 0, dstSurfData->width, dstSurfData->height); glDisable(GL_BLEND); glMatrixMode(GL_MODELVIEW); glLoadIdentity(); glMatrixMode(GL_TEXTURE); glLoadIdentity(); glScalef(1.0f/srcSurfData->width, 1.0f/srcSurfData->height, 1.0f); // Clear dstRect area glDisable(GL_TEXTURE_2D); glColor4f(0, 0, 0, 1); glBegin(GL_QUADS); glVertex2f(dstRect.x0, dstRect.y0); glVertex2f(dstRect.x1, dstRect.y0); glVertex2f(dstRect.x1, dstRect.y1); glVertex2f(dstRect.x0, dstRect.y1); glEnd(); // Render (maybe scaled) data from video surface glEnable(GL_TEXTURE_2D); glBindTexture(GL_TEXTURE_2D, srcSurfData->tex_id); glColor4f(1, 1, 1, 1); glBegin(GL_QUADS); glTexCoord2i(srcVideoRect.x0, srcVideoRect.y0); glVertex2f(dstVideoRect.x0, dstVideoRect.y0); glTexCoord2i(srcVideoRect.x1, srcVideoRect.y0); glVertex2f(dstVideoRect.x1, dstVideoRect.y0); glTexCoord2i(srcVideoRect.x1, srcVideoRect.y1); glVertex2f(dstVideoRect.x1, dstVideoRect.y1); glTexCoord2i(srcVideoRect.x0, srcVideoRect.y1); glVertex2f(dstVideoRect.x0, dstVideoRect.y1); glEnd(); } else { // fall back to software convertion // TODO: make sure not to do scaling in software, only colorspace conversion // TODO: use GL shaders to do colorspace conversion job // TODO: handle all three kind of rectangles and clipping const uint32_t dstVideoWidth = dstVideoRect.x1 - dstVideoRect.x0; const uint32_t dstVideoHeight = dstVideoRect.y1 - dstVideoRect.y0; const uint32_t dstVideoStride = (dstVideoWidth & 3) ? (dstVideoWidth & ~3u) + 4 : dstVideoWidth; uint8_t *img_buf = malloc(dstVideoStride * dstVideoHeight * 4); if (NULL == img_buf) { err_code = VDP_STATUS_RESOURCES; goto quit; } struct SwsContext *sws_ctx = sws_getContext(srcSurfData->width, srcSurfData->height, PIX_FMT_YUV420P, dstVideoWidth, dstVideoHeight, PIX_FMT_RGBA, SWS_POINT, NULL, NULL, NULL); uint8_t const * const src_planes[] = { srcSurfData->y_plane, srcSurfData->v_plane, srcSurfData->u_plane, NULL }; int src_strides[] = {srcSurfData->stride, srcSurfData->stride/2, srcSurfData->stride/2, 0}; uint8_t *dst_planes[] = {img_buf, NULL, NULL, NULL}; int dst_strides[] = {dstVideoStride * 4, 0, 0, 0}; int res = sws_scale(sws_ctx, src_planes, src_strides, 0, srcSurfData->height, dst_planes, dst_strides); sws_freeContext(sws_ctx); if (res != (int)dstVideoHeight) { traceError("error (softVdpVideoMixerRender): libswscale scaling failed\n"); glx_context_pop(); err_code = VDP_STATUS_ERROR; goto quit; } // copy converted image to texture glPixelStorei(GL_UNPACK_ROW_LENGTH, dstVideoStride); glBindTexture(GL_TEXTURE_2D, dstSurfData->tex_id); glTexSubImage2D(GL_TEXTURE_2D, 0, dstVideoRect.x0, dstVideoRect.y0, dstVideoRect.x1 - dstVideoRect.x0, dstVideoRect.y1 - dstVideoRect.y0, GL_BGRA, GL_UNSIGNED_BYTE, img_buf); glPixelStorei(GL_UNPACK_ROW_LENGTH, 0); free(img_buf); } glFinish(); GLenum gl_error = glGetError(); glx_context_pop(); if (GL_NO_ERROR != gl_error) { traceError("error (VdpVideoMixerRender): gl error %d\n", gl_error); err_code = VDP_STATUS_ERROR; goto quit; } err_code = VDP_STATUS_OK; quit: handle_release(video_surface_current); handle_release(destination_surface); return err_code; } VdpStatus softVdpVideoSurfaceQueryCapabilities(VdpDevice device, VdpChromaType surface_chroma_type, VdpBool *is_supported, uint32_t *max_width, uint32_t *max_height) { if (!is_supported || !max_width || !max_height) return VDP_STATUS_INVALID_POINTER; (void)device; (void)surface_chroma_type; // TODO: implement *is_supported = 1; *max_width = 1920; *max_height = 1080; return VDP_STATUS_OK; } VdpStatus softVdpVideoSurfaceQueryGetPutBitsYCbCrCapabilities(VdpDevice device, VdpChromaType surface_chroma_type, VdpYCbCrFormat bits_ycbcr_format, VdpBool *is_supported) { if (!is_supported) return VDP_STATUS_INVALID_POINTER; (void)device; (void)surface_chroma_type; (void)bits_ycbcr_format; // TODO: implement *is_supported = 1; return VDP_STATUS_OK; } VdpStatus softVdpVideoSurfaceCreate(VdpDevice device, VdpChromaType chroma_type, uint32_t width, uint32_t height, VdpVideoSurface *surface) { VdpStatus err_code; if (!surface) return VDP_STATUS_INVALID_POINTER; VdpDeviceData *deviceData = handle_acquire(device, HANDLETYPE_DEVICE); if (NULL == deviceData) return VDP_STATUS_INVALID_HANDLE; VdpVideoSurfaceData *data = calloc(1, sizeof(VdpVideoSurfaceData)); if (NULL == data) { err_code = VDP_STATUS_RESOURCES; goto quit; } uint32_t const stride = (width % 4 == 0) ? width : (width & ~0x3UL) + 4; data->type = HANDLETYPE_VIDEO_SURFACE; data->device = deviceData; data->chroma_type = chroma_type; data->width = width; data->stride = stride; data->height = height; data->va_surf = VA_INVALID_SURFACE; data->va_glx = NULL; data->tex_id = 0; data->decoder = VDP_INVALID_HANDLE; glx_context_push_thread_local(deviceData); glGenTextures(1, &data->tex_id); glBindTexture(GL_TEXTURE_2D, data->tex_id); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, data->width, data->height, 0, GL_BGRA, GL_UNSIGNED_BYTE, NULL); glFinish(); GLenum gl_error = glGetError(); glx_context_pop(); if (GL_NO_ERROR != gl_error) { traceError("error (VdpVideoSurfaceCreate): gl error %d\n", gl_error); free(data); err_code = VDP_STATUS_ERROR; goto quit; } if (deviceData->va_available) { // no VA surface creation here. Actual pool of VA surfaces should be allocated already // by VdpDecoderCreate. VdpDecoderCreate will update ->va_surf field as needed. data->y_plane = NULL; data->v_plane = NULL; data->u_plane = NULL; } else { //TODO: find valid storage size for chroma_type data->y_plane = malloc(stride * height); data->v_plane = malloc(stride * height / chroma_storage_size_divider(chroma_type)); data->u_plane = malloc(stride * height / chroma_storage_size_divider(chroma_type)); if (NULL == data->y_plane || NULL == data->v_plane || NULL == data->u_plane) { if (data->y_plane) free(data->y_plane); if (data->v_plane) free(data->v_plane); if (data->u_plane) free(data->u_plane); free(data); err_code = VDP_STATUS_RESOURCES; goto quit; } } deviceData->refcount ++; *surface = handle_insert(data); err_code = VDP_STATUS_OK; quit: handle_release(device); return err_code; } VdpStatus softVdpVideoSurfaceDestroy(VdpVideoSurface surface) { VdpVideoSurfaceData *videoSurfData = handle_acquire(surface, HANDLETYPE_VIDEO_SURFACE); if (NULL == videoSurfData) return VDP_STATUS_INVALID_HANDLE; VdpDeviceData *deviceData = videoSurfData->device; glx_context_push_thread_local(deviceData); glDeleteTextures(1, &videoSurfData->tex_id); GLenum gl_error = glGetError(); if (GL_NO_ERROR != gl_error) { traceError("error (VdpVideoSurfaceDestroy): gl error %d\n", gl_error); glx_context_pop(); handle_release(surface); return VDP_STATUS_ERROR; } if (videoSurfData->va_glx) { vaDestroySurfaceGLX(deviceData->va_dpy, videoSurfData->va_glx); } if (deviceData->va_available) { // return VA surface to the free list if (videoSurfData->decoder != VDP_INVALID_HANDLE) { VdpDecoderData *dd = handle_acquire(videoSurfData->decoder, HANDLETYPE_DECODER); if (NULL != dd) { free_list_push(dd->free_list, &dd->free_list_head, videoSurfData->rt_idx); handle_release(videoSurfData->decoder); } } // .va_surf will be freed in VdpDecoderDestroy } else { free(videoSurfData->y_plane); free(videoSurfData->v_plane); free(videoSurfData->u_plane); } glx_context_pop(); deviceData->refcount --; handle_expunge(surface); free(videoSurfData); return VDP_STATUS_OK; } VdpStatus softVdpVideoSurfaceGetParameters(VdpVideoSurface surface, VdpChromaType *chroma_type, uint32_t *width, uint32_t *height) { if (!chroma_type || !width || !height) return VDP_STATUS_INVALID_POINTER; VdpVideoSurfaceData *videoSurf = handle_acquire(surface, HANDLETYPE_VIDEO_SURFACE); if (NULL == videoSurf) return VDP_STATUS_INVALID_HANDLE; *chroma_type = videoSurf->chroma_type; *width = videoSurf->width; *height = videoSurf->height; handle_release(surface); return VDP_STATUS_OK; } VdpStatus softVdpVideoSurfaceGetBitsYCbCr(VdpVideoSurface surface, VdpYCbCrFormat destination_ycbcr_format, void *const *destination_data, uint32_t const *destination_pitches) { VdpStatus err_code; if (!destination_data || !destination_pitches) return VDP_STATUS_INVALID_POINTER; VdpVideoSurfaceData *srcSurfData = handle_acquire(surface, HANDLETYPE_VIDEO_SURFACE); if (NULL == srcSurfData) return VDP_STATUS_INVALID_HANDLE; VdpDeviceData *deviceData = srcSurfData->device; VADisplay va_dpy = deviceData->va_dpy; if (deviceData->va_available) { VAImage q; vaDeriveImage(va_dpy, srcSurfData->va_surf, &q); if (VA_FOURCC('N', 'V', '1', '2') == q.format.fourcc && VDP_YCBCR_FORMAT_NV12 == destination_ycbcr_format) { uint8_t *img_data; vaMapBuffer(va_dpy, q.buf, (void **)&img_data); if (destination_pitches[0] == q.pitches[0] && destination_pitches[1] == q.pitches[1]) { memcpy(destination_data[0], img_data + q.offsets[0], q.width * q.height); memcpy(destination_data[1], img_data + q.offsets[1], q.width * q.height / 2); } else { uint8_t *src = img_data + q.offsets[0]; uint8_t *dst = destination_data[0]; for (unsigned int y = 0; y < q.height; y ++) { // Y plane memcpy (dst, src, q.width); src += q.pitches[0]; dst += destination_pitches[0]; } src = img_data + q.offsets[1]; dst = destination_data[1]; for (unsigned int y = 0; y < q.height / 2; y ++) { // UV plane memcpy(dst, src, q.width); // q.width/2 samples of U and V each, hence q.width src += q.pitches[1]; dst += destination_pitches[1]; } } vaUnmapBuffer(va_dpy, q.buf); } else if (VA_FOURCC('N', 'V', '1', '2') == q.format.fourcc && VDP_YCBCR_FORMAT_YV12 == destination_ycbcr_format) { uint8_t *img_data; vaMapBuffer(va_dpy, q.buf, (void **)&img_data); // Y plane if (destination_pitches[0] == q.pitches[0]) { memcpy(destination_data[0], img_data + q.offsets[0], q.width * q.height); } else { uint8_t *src = img_data + q.offsets[0]; uint8_t *dst = destination_data[0]; for (unsigned int y = 0; y < q.height; y ++) { memcpy (dst, src, q.width); src += q.pitches[0]; dst += destination_pitches[0]; } } // unpack mixed UV to separate planes for (unsigned int y = 0; y < q.height/2; y ++) { uint8_t *src = img_data + q.offsets[1] + y * q.pitches[1]; uint8_t *dst_u = destination_data[1] + y * destination_pitches[1]; uint8_t *dst_v = destination_data[2] + y * destination_pitches[2]; for (unsigned int x = 0; x < q.width/2; x++) { *dst_v++ = *src++; *dst_u++ = *src++; } } vaUnmapBuffer(va_dpy, q.buf); } else { const char *c = (const char *)&q.format.fourcc; traceError("error (softVdpVideoSurfaceGetBitsYCbCr): not implemented conversion " "VA FOURCC %c%c%c%c -> %s\n", *c, *(c+1), *(c+2), *(c+3), reverse_ycbcr_format(destination_ycbcr_format)); vaDestroyImage(va_dpy, q.image_id); err_code = VDP_STATUS_INVALID_Y_CB_CR_FORMAT; goto quit; } vaDestroyImage(va_dpy, q.image_id); } else { // software fallback traceError("error (softVdpVideoSurfaceGetBitsYCbCr): not implemented software fallback\n"); err_code = VDP_STATUS_ERROR; goto quit; } GLenum gl_error = glGetError(); if (GL_NO_ERROR != gl_error) { traceError("error (VdpVideoSurfaceGetBitsYCbCr): gl error %d\n", gl_error); err_code = VDP_STATUS_ERROR; goto quit; } err_code = VDP_STATUS_OK; quit: handle_release(surface); return err_code; } VdpStatus softVdpVideoSurfacePutBitsYCbCr(VdpVideoSurface surface, VdpYCbCrFormat source_ycbcr_format, void const *const *source_data, uint32_t const *source_pitches) { VdpStatus err_code; if (!source_data || !source_pitches) return VDP_STATUS_INVALID_POINTER; //TODO: figure out what to do with other formats VdpVideoSurfaceData *dstSurfData = handle_acquire(surface, HANDLETYPE_VIDEO_SURFACE); if (NULL == dstSurfData) return VDP_STATUS_INVALID_HANDLE; VdpDeviceData *deviceData = dstSurfData->device; glx_context_push_thread_local(deviceData); if (deviceData->va_available) { if (VDP_YCBCR_FORMAT_YV12 != source_ycbcr_format) { traceError("error (softVdpVideoSurfacePutBitsYCbCr): not supported source_ycbcr_format " "%s\n", reverse_ycbcr_format(source_ycbcr_format)); err_code = VDP_STATUS_INVALID_Y_CB_CR_FORMAT; goto quit; } // libswscale likes aligned data int stride = (dstSurfData->width + 7) & ~0x7; void *bgra_buf = memalign(16, stride * dstSurfData->height * 4); if (NULL == bgra_buf) { traceError("error (softVdpVideoSurfacePutBitsYCbCr): can not allocate memory\n"); err_code = VDP_STATUS_RESOURCES; goto quit; } // TODO: other source formats struct SwsContext *sws_ctx = sws_getContext(dstSurfData->width, dstSurfData->height, PIX_FMT_YUV420P, dstSurfData->width, dstSurfData->height, PIX_FMT_BGRA, SWS_POINT, NULL, NULL, NULL); if (NULL == sws_ctx) { traceError("error (softVdpVideoSurfacePutBitsYCbCr): can not create SwsContext\n"); free(bgra_buf); err_code = VDP_STATUS_RESOURCES; goto quit; } const uint8_t * const srcSlice[] = { source_data[0], source_data[2], source_data[1], NULL }; const int srcStride[] = { source_pitches[0], source_pitches[2], source_pitches[1], 0 }; uint8_t * const dst[] = { bgra_buf, NULL, NULL, NULL }; const int dstStride[] = { stride * 4, 0, 0, 0 }; int res = sws_scale(sws_ctx, srcSlice, srcStride, 0, dstSurfData->height, dst, dstStride); if (res != (int)dstSurfData->height) { traceError("error (softVdpVideoSurfacePutBitsYCbCr): sws_scale returned %d while " "%d expected\n", res, dstSurfData->height); free(bgra_buf); sws_freeContext(sws_ctx); err_code = VDP_STATUS_ERROR; goto quit; } sws_freeContext(sws_ctx); glBindTexture(GL_TEXTURE_2D, dstSurfData->tex_id); glPixelStorei(GL_UNPACK_ROW_LENGTH, stride); glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, dstSurfData->width, dstSurfData->height, GL_BGRA, GL_UNSIGNED_BYTE, bgra_buf); glPixelStorei(GL_UNPACK_ROW_LENGTH, 0); free(bgra_buf); } else { if (VDP_YCBCR_FORMAT_YV12 != source_ycbcr_format) { traceError("error (softVdpVideoSurfacePutBitsYCbCr): not supported source_ycbcr_format " "%s\n", reverse_ycbcr_format(source_ycbcr_format)); err_code = VDP_STATUS_INVALID_Y_CB_CR_FORMAT; goto quit; } uint8_t const *src; uint8_t *dst; dst = dstSurfData->y_plane; src = source_data[0]; for (uint32_t k = 0; k < dstSurfData->height; k ++) { memcpy(dst, src, dstSurfData->width); dst += dstSurfData->stride; src += source_pitches[0]; } dst = dstSurfData->v_plane; src = source_data[1]; for (uint32_t k = 0; k < dstSurfData->height / 2; k ++) { memcpy(dst, src, dstSurfData->width / 2); dst += dstSurfData->stride / 2; src += source_pitches[1]; } dst = dstSurfData->u_plane; src = source_data[2]; for (uint32_t k = 0; k < dstSurfData->height / 2; k ++) { memcpy(dst, src, dstSurfData->width/2); dst += dstSurfData->stride / 2; src += source_pitches[2]; } } GLenum gl_error = glGetError(); glx_context_pop(); if (GL_NO_ERROR != gl_error) { traceError("error (VdpVideoSurfacePutBitsYCbCr): gl error %d\n", gl_error); err_code = VDP_STATUS_ERROR; goto quit; } err_code = VDP_STATUS_OK; quit: handle_release(surface); return err_code; } VdpStatus softVdpBitmapSurfaceQueryCapabilities(VdpDevice device, VdpRGBAFormat surface_rgba_format, VdpBool *is_supported, uint32_t *max_width, uint32_t *max_height) { VdpStatus err_code; VdpDeviceData *deviceData = handle_acquire(device, HANDLETYPE_DEVICE); if (NULL == deviceData) return VDP_STATUS_INVALID_HANDLE; if (NULL == is_supported || NULL == max_width || NULL == max_height) { err_code = VDP_STATUS_INVALID_POINTER; goto quit; } switch (surface_rgba_format) { case VDP_RGBA_FORMAT_B8G8R8A8: case VDP_RGBA_FORMAT_R8G8B8A8: case VDP_RGBA_FORMAT_R10G10B10A2: case VDP_RGBA_FORMAT_B10G10R10A2: case VDP_RGBA_FORMAT_A8: *is_supported = 1; // All these formats should be supported by OpenGL break; // implementation. default: *is_supported = 0; break; } glx_context_push_thread_local(deviceData); GLint max_texture_size; glGetIntegerv(GL_MAX_TEXTURE_SIZE, &max_texture_size); GLenum gl_error = glGetError(); glx_context_pop(); if (GL_NO_ERROR != gl_error) { traceError("error (VdpBitmapSurfaceQueryCapabilities): gl error %d\n", gl_error); err_code = VDP_STATUS_ERROR; goto quit; } *max_width = max_texture_size; *max_height = max_texture_size; err_code = VDP_STATUS_OK; quit: handle_release(device); return err_code; } VdpStatus softVdpBitmapSurfaceCreate(VdpDevice device, VdpRGBAFormat rgba_format, uint32_t width, uint32_t height, VdpBool frequently_accessed, VdpBitmapSurface *surface) { VdpStatus err_code; if (!surface) return VDP_STATUS_INVALID_HANDLE; VdpDeviceData *deviceData = handle_acquire(device, HANDLETYPE_DEVICE); if (NULL == deviceData) return VDP_STATUS_INVALID_HANDLE; VdpBitmapSurfaceData *data = calloc(1, sizeof(VdpBitmapSurfaceData)); if (NULL == data) { err_code = VDP_STATUS_RESOURCES; goto quit; } switch (rgba_format) { case VDP_RGBA_FORMAT_B8G8R8A8: data->gl_internal_format = GL_RGBA; data->gl_format = GL_BGRA; data->gl_type = GL_UNSIGNED_BYTE; data->bytes_per_pixel = 4; break; case VDP_RGBA_FORMAT_R8G8B8A8: data->gl_internal_format = GL_RGBA; data->gl_format = GL_RGBA; data->gl_type = GL_UNSIGNED_BYTE; data->bytes_per_pixel = 4; break; case VDP_RGBA_FORMAT_R10G10B10A2: data->gl_internal_format = GL_RGB10_A2; data->gl_format = GL_RGBA; data->gl_type = GL_UNSIGNED_INT_10_10_10_2; data->bytes_per_pixel = 4; break; case VDP_RGBA_FORMAT_B10G10R10A2: data->gl_internal_format = GL_RGB10_A2; data->gl_format = GL_BGRA; data->gl_type = GL_UNSIGNED_INT_10_10_10_2; data->bytes_per_pixel = 4; break; case VDP_RGBA_FORMAT_A8: data->gl_internal_format = GL_RGBA; data->gl_format = GL_RED; data->gl_type = GL_UNSIGNED_BYTE; data->bytes_per_pixel = 1; break; default: traceError("error (VdpBitmapSurfaceCreate): %s not implemented\n", reverse_rgba_format(rgba_format)); free(data); err_code = VDP_STATUS_INVALID_RGBA_FORMAT; goto quit; } data->type = HANDLETYPE_BITMAP_SURFACE; data->device = deviceData; data->rgba_format = rgba_format; data->width = width; data->height = height; data->frequently_accessed = frequently_accessed; // Frequently accessed bitmaps reside in system memory rather that in GPU texture. data->dirty = 0; if (frequently_accessed) { data->bitmap_data = calloc(width * height, data->bytes_per_pixel); if (NULL == data->bitmap_data) { traceError("error (VdpBitmapSurfaceCreate): calloc returned NULL\n"); free(data); err_code = VDP_STATUS_RESOURCES; goto quit; } } else { data->bitmap_data = NULL; } glx_context_push_thread_local(deviceData); glGenTextures(1, &data->tex_id); glBindTexture(GL_TEXTURE_2D, data->tex_id); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); glTexImage2D(GL_TEXTURE_2D, 0, data->gl_internal_format, width, height, 0, data->gl_format, data->gl_type, NULL); glFinish(); GLuint gl_error = glGetError(); if (GL_NO_ERROR != gl_error) { // Requested RGBA format was wrong traceError("error (VdpBitmapSurfaceCreate): texture failure, gl error (%d, %s)\n", gl_error, gluErrorString(gl_error)); free(data); glx_context_pop(); err_code = VDP_STATUS_ERROR; goto quit; } if (VDP_RGBA_FORMAT_A8 == rgba_format) { // map red channel to alpha GLint swizzle_mask[] = {GL_ONE, GL_ONE, GL_ONE, GL_RED}; glTexParameteriv(GL_TEXTURE_2D, GL_TEXTURE_SWIZZLE_RGBA, swizzle_mask); } gl_error = glGetError(); glx_context_pop(); if (GL_NO_ERROR != gl_error) { free(data); traceError("error (VdpBitmapSurfaceCreate): gl error %d\n", gl_error); err_code = VDP_STATUS_ERROR; goto quit; } deviceData->refcount ++; *surface = handle_insert(data); err_code = VDP_STATUS_OK; quit: handle_release(device); return err_code; } VdpStatus softVdpBitmapSurfaceDestroy(VdpBitmapSurface surface) { VdpBitmapSurfaceData *data = handle_acquire(surface, HANDLETYPE_BITMAP_SURFACE); if (NULL == data) return VDP_STATUS_INVALID_HANDLE; VdpDeviceData *deviceData = data->device; if (data->frequently_accessed) { free(data->bitmap_data); data->bitmap_data = NULL; } glx_context_push_thread_local(deviceData); glDeleteTextures(1, &data->tex_id); GLenum gl_error = glGetError(); glx_context_pop(); if (GL_NO_ERROR != gl_error) { traceError("error (VdpBitmapSurfaceDestroy): gl error %d\n", gl_error); handle_release(surface); return VDP_STATUS_ERROR; } handle_expunge(surface); deviceData->refcount --; free(data); return VDP_STATUS_OK; } VdpStatus softVdpBitmapSurfaceGetParameters(VdpBitmapSurface surface, VdpRGBAFormat *rgba_format, uint32_t *width, uint32_t *height, VdpBool *frequently_accessed) { VdpBitmapSurfaceData *srcSurfData = handle_acquire(surface, HANDLETYPE_BITMAP_SURFACE); if (NULL == srcSurfData) return VDP_STATUS_INVALID_HANDLE; if (NULL == rgba_format || NULL == width || NULL == height || NULL == frequently_accessed) { handle_release(surface); return VDP_STATUS_INVALID_POINTER; } *rgba_format = srcSurfData->rgba_format; *width = srcSurfData->width; *height = srcSurfData->height; *frequently_accessed = srcSurfData->frequently_accessed; handle_release(surface); return VDP_STATUS_OK; } VdpStatus softVdpBitmapSurfacePutBitsNative(VdpBitmapSurface surface, void const *const *source_data, uint32_t const *source_pitches, VdpRect const *destination_rect) { VdpStatus err_code; if (!source_data || !source_pitches) return VDP_STATUS_INVALID_POINTER; VdpBitmapSurfaceData *dstSurfData = handle_acquire(surface, HANDLETYPE_BITMAP_SURFACE); if (NULL == dstSurfData) return VDP_STATUS_INVALID_HANDLE; VdpDeviceData *deviceData = dstSurfData->device; VdpRect d_rect = {0, 0, dstSurfData->width, dstSurfData->height}; if (destination_rect) d_rect = *destination_rect; if (dstSurfData->frequently_accessed) { if (0 == d_rect.x0 && dstSurfData->width == d_rect.x1 && source_pitches[0] == d_rect.x1) { // full width const int bytes_to_copy = (d_rect.x1 - d_rect.x0) * (d_rect.y1 - d_rect.y0) * dstSurfData->bytes_per_pixel; memcpy(dstSurfData->bitmap_data + d_rect.y0 * dstSurfData->width * dstSurfData->bytes_per_pixel, source_data[0], bytes_to_copy); } else { const unsigned int bytes_in_line = (d_rect.x1-d_rect.x0)*dstSurfData->bytes_per_pixel; for (unsigned int y = d_rect.y0; y < d_rect.y1; y ++) { memcpy(dstSurfData->bitmap_data + (y * dstSurfData->width + d_rect.x0) * dstSurfData->bytes_per_pixel, source_data[0] + (y - d_rect.y0) * source_pitches[0], bytes_in_line); } } dstSurfData->dirty = 1; } else { glx_context_push_thread_local(deviceData); glBindTexture(GL_TEXTURE_2D, dstSurfData->tex_id); glPixelStorei(GL_UNPACK_ROW_LENGTH, source_pitches[0]/dstSurfData->bytes_per_pixel); if (4 != dstSurfData->bytes_per_pixel) glPixelStorei(GL_UNPACK_ALIGNMENT, 1); glTexSubImage2D(GL_TEXTURE_2D, 0, d_rect.x0, d_rect.y0, d_rect.x1 - d_rect.x0, d_rect.y1 - d_rect.y0, dstSurfData->gl_format, dstSurfData->gl_type, source_data[0]); glPixelStorei(GL_UNPACK_ROW_LENGTH, 0); if (4 != dstSurfData->bytes_per_pixel) glPixelStorei(GL_UNPACK_ALIGNMENT, 4); glFinish(); GLenum gl_error = glGetError(); glx_context_pop(); if (GL_NO_ERROR != gl_error) { traceError("error (VdpBitmapSurfacePutBitsNative): gl error %d\n", gl_error); err_code = VDP_STATUS_ERROR; goto quit; } } err_code = VDP_STATUS_OK; quit: handle_release(surface); return err_code; } void print_handle_type(int handle, void *item, void *p) { VdpGenericHandle *gh = item; struct { int cnt; int total_cnt; VdpDeviceData *deviceData; } *pp = p; pp->total_cnt ++; if (gh) { if (pp->deviceData == gh->parent) { traceError("handle %d type = %d\n", handle, gh->type); pp->cnt ++; } } } static void destroy_child_objects(int handle, void *item, void *p) { const void *parent = p; VdpGenericHandle *gh = item; if (gh) { if (parent == gh->parent) { switch (gh->type) { case HANDLETYPE_DEVICE: // do nothing break; case HANDLETYPE_PRESENTATION_QUEUE_TARGET: softVdpPresentationQueueDestroy(handle); break; case HANDLETYPE_PRESENTATION_QUEUE: softVdpPresentationQueueDestroy(handle); break; case HANDLETYPE_VIDEO_MIXER: softVdpVideoMixerDestroy(handle); break; case HANDLETYPE_OUTPUT_SURFACE: softVdpOutputSurfaceDestroy(handle); break; case HANDLETYPE_VIDEO_SURFACE: softVdpVideoSurfaceDestroy(handle); break; case HANDLETYPE_BITMAP_SURFACE: softVdpBitmapSurfaceDestroy(handle); break; case HANDLETYPE_DECODER: softVdpDecoderDestroy(handle); break; default: traceError("warning (destroy_child_objects): unknown handle type %d\n", gh->type); break; } } } } VdpStatus softVdpDeviceDestroy(VdpDevice device) { VdpStatus err_code; VdpDeviceData *data = handle_acquire(device, HANDLETYPE_DEVICE); if (NULL == data) return VDP_STATUS_INVALID_HANDLE; if (0 != data->refcount) { // Buggy client forgot to destroy dependend objects or decided that destroying // VdpDevice destroys all child object. Let's try to mitigate and prevent leakage. traceError("warning (softVdpDeviceDestroy): non-zero reference count (%d). " "Trying to free child objects.\n", data->refcount); void *parent_object = data; handle_execute_for_all(destroy_child_objects, parent_object); } if (0 != data->refcount) { traceError("error (softVdpDeviceDestroy): still non-zero reference count (%d)\n", data->refcount); traceError("Here is the list of objects:\n"); struct { int cnt; int total_cnt; VdpDeviceData *deviceData; } state = { .cnt = 0, .total_cnt = 0, .deviceData = data }; handle_execute_for_all(print_handle_type, &state); traceError("Objects leaked: %d\n", state.cnt); traceError("Objects visited during scan: %d\n", state.total_cnt); err_code = VDP_STATUS_ERROR; goto quit; } // cleaup libva if (data->va_available) vaTerminate(data->va_dpy); glx_context_push_thread_local(data); glDeleteTextures(1, &data->watermark_tex_id); glBindFramebuffer(GL_FRAMEBUFFER, 0); glx_context_pop(); pthread_mutex_lock(&global.glx_ctx_stack_mutex); glXMakeCurrent(data->display, None, NULL); pthread_mutex_unlock(&global.glx_ctx_stack_mutex); glx_context_unref_glc_hash_table(data->display); handle_xdpy_unref(data->display_orig); handle_expunge(device); free(data); GLenum gl_error = glGetError(); if (GL_NO_ERROR != gl_error) { traceError("error (VdpDeviceDestroy): gl error %d\n", gl_error); err_code = VDP_STATUS_ERROR; goto quit_skip_release; } return VDP_STATUS_OK; quit: handle_release(device); quit_skip_release: return err_code; } VdpStatus softVdpGetInformationString(char const **information_string) { if (!information_string) return VDP_STATUS_INVALID_POINTER; *information_string = implemetation_description_string; return VDP_STATUS_OK; } VdpStatus softVdpGenerateCSCMatrix(VdpProcamp *procamp, VdpColorStandard standard, VdpCSCMatrix *csc_matrix) { if (!csc_matrix) return VDP_STATUS_INVALID_POINTER; if (procamp && VDP_PROCAMP_VERSION != procamp->struct_version) return VDP_STATUS_INVALID_VALUE; // TODO: do correct matricies calculation VdpCSCMatrix *m = csc_matrix; switch (standard) { case VDP_COLOR_STANDARD_ITUR_BT_601: (*m)[0][0] = 1.164f; (*m)[0][1] = 0.0f; (*m)[0][2] = 1.596f; (*m)[0][3] = -222.9f; (*m)[1][0] = 1.164f; (*m)[1][1] = -0.392f; (*m)[1][2] = -0.813f; (*m)[1][3] = 135.6f; (*m)[2][0] = 1.164f; (*m)[2][1] = 2.017f; (*m)[2][2] = 0.0f; (*m)[2][3] = -276.8f; break; case VDP_COLOR_STANDARD_ITUR_BT_709: (*m)[0][0] = 1.0f; (*m)[0][1] = 0.0f; (*m)[0][2] = 1.402f; (*m)[0][3] = -179.4f; (*m)[1][0] = 1.0f; (*m)[1][1] = -0.344f; (*m)[1][2] = -0.714f; (*m)[1][3] = 135.5f; (*m)[2][0] = 1.0f; (*m)[2][1] = 1.772f; (*m)[2][2] = 0.0f; (*m)[2][3] = -226.8f; break; case VDP_COLOR_STANDARD_SMPTE_240M: (*m)[0][0] = 0.581f; (*m)[0][1] = -0.764f; (*m)[0][2] = 1.576f; (*m)[0][3] = 0.0f; (*m)[1][0] = 0.581f; (*m)[1][1] = -0.991f; (*m)[1][2] = -0.477f; (*m)[1][3] = 0.0f; (*m)[2][0] = 0.581f; (*m)[2][1] = 1.062f; (*m)[2][2] = 0.000f; (*m)[2][3] = 0.0f; break; default: return VDP_STATUS_INVALID_COLOR_STANDARD; } return VDP_STATUS_OK; } static GLuint vdpBlendFuncToGLBlendFunc(VdpOutputSurfaceRenderBlendFactor blend_factor) { switch (blend_factor) { case VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ZERO: return GL_ZERO; case VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ONE: return GL_ONE; case VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_SRC_COLOR: return GL_SRC_COLOR; case VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ONE_MINUS_SRC_COLOR: return GL_ONE_MINUS_SRC_COLOR; case VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_SRC_ALPHA: return GL_SRC_ALPHA; case VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA: return GL_ONE_MINUS_SRC_ALPHA; case VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_DST_ALPHA: return GL_DST_ALPHA; case VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ONE_MINUS_DST_ALPHA: return GL_ONE_MINUS_DST_ALPHA; case VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_DST_COLOR: return GL_DST_COLOR; case VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ONE_MINUS_DST_COLOR: return GL_ONE_MINUS_DST_COLOR; case VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_SRC_ALPHA_SATURATE: return GL_SRC_ALPHA_SATURATE; case VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_CONSTANT_COLOR: return GL_CONSTANT_COLOR; case VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR: return GL_ONE_MINUS_CONSTANT_COLOR; case VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_CONSTANT_ALPHA: return GL_CONSTANT_ALPHA; case VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA: return GL_ONE_MINUS_CONSTANT_ALPHA; default: return GL_INVALID_VALUE; } } static GLenum vdpBlendEquationToGLEquation(VdpOutputSurfaceRenderBlendEquation blend_equation) { switch (blend_equation) { case VDP_OUTPUT_SURFACE_RENDER_BLEND_EQUATION_SUBTRACT: return GL_FUNC_SUBTRACT; case VDP_OUTPUT_SURFACE_RENDER_BLEND_EQUATION_REVERSE_SUBTRACT: return GL_FUNC_REVERSE_SUBTRACT; case VDP_OUTPUT_SURFACE_RENDER_BLEND_EQUATION_ADD: return GL_FUNC_ADD; case VDP_OUTPUT_SURFACE_RENDER_BLEND_EQUATION_MIN: return GL_MIN; case VDP_OUTPUT_SURFACE_RENDER_BLEND_EQUATION_MAX: return GL_MAX; default: return GL_INVALID_VALUE; } } struct blend_state_struct { GLuint srcFuncRGB; GLuint srcFuncAlpha; GLuint dstFuncRGB; GLuint dstFuncAlpha; GLuint modeRGB; GLuint modeAlpha; int invalid_func; int invalid_eq; }; static struct blend_state_struct vdpBlendStateToGLBlendState(VdpOutputSurfaceRenderBlendState const *blend_state) { struct blend_state_struct bs; bs.invalid_func = 0; bs.invalid_eq = 0; // it's ok to pass NULL as blend_state if (blend_state) { bs.srcFuncRGB = vdpBlendFuncToGLBlendFunc(blend_state->blend_factor_source_color); bs.srcFuncAlpha = vdpBlendFuncToGLBlendFunc(blend_state->blend_factor_source_alpha); bs.dstFuncRGB = vdpBlendFuncToGLBlendFunc(blend_state->blend_factor_destination_color); bs.dstFuncAlpha = vdpBlendFuncToGLBlendFunc(blend_state->blend_factor_destination_alpha); } else { bs.srcFuncRGB = bs.srcFuncAlpha = GL_ONE; bs.dstFuncRGB = bs.dstFuncAlpha = GL_ZERO; } if (GL_INVALID_VALUE == bs.srcFuncRGB || GL_INVALID_VALUE == bs.srcFuncAlpha || GL_INVALID_VALUE == bs.dstFuncRGB || GL_INVALID_VALUE == bs.dstFuncAlpha) { bs.invalid_func = 1; } if (blend_state) { bs.modeRGB = vdpBlendEquationToGLEquation(blend_state->blend_equation_color); bs.modeAlpha = vdpBlendEquationToGLEquation(blend_state->blend_equation_alpha); } else { bs.modeRGB = bs.modeAlpha = GL_FUNC_ADD; } if (GL_INVALID_VALUE == bs.modeRGB || GL_INVALID_VALUE == bs.modeAlpha) bs.invalid_eq = 1; return bs; } static void compose_surfaces(struct blend_state_struct bs, VdpRect srcRect, VdpRect dstRect, VdpColor const *colors, int flags, int has_src_surf) { glBlendFuncSeparate(bs.srcFuncRGB, bs.dstFuncRGB, bs.srcFuncAlpha, bs.dstFuncAlpha); glBlendEquationSeparate(bs.modeRGB, bs.modeAlpha); glColor4f(1, 1, 1, 1); glBegin(GL_QUADS); if (has_src_surf) { switch (flags & 3) { case VDP_OUTPUT_SURFACE_RENDER_ROTATE_0: glTexCoord2i(srcRect.x0, srcRect.y0); break; case VDP_OUTPUT_SURFACE_RENDER_ROTATE_90: glTexCoord2i(srcRect.x0, srcRect.y1); break; case VDP_OUTPUT_SURFACE_RENDER_ROTATE_180: glTexCoord2i(srcRect.x1, srcRect.y1); break; case VDP_OUTPUT_SURFACE_RENDER_ROTATE_270: glTexCoord2i(srcRect.x1, srcRect.y0); break; } } if (colors) glColor4f(colors[0].red, colors[0].green, colors[0].blue, colors[0].alpha); glVertex2f(dstRect.x0, dstRect.y0); if (has_src_surf) { switch (flags & 3) { case VDP_OUTPUT_SURFACE_RENDER_ROTATE_0: glTexCoord2i(srcRect.x1, srcRect.y0); break; case VDP_OUTPUT_SURFACE_RENDER_ROTATE_90: glTexCoord2i(srcRect.x0, srcRect.y0); break; case VDP_OUTPUT_SURFACE_RENDER_ROTATE_180: glTexCoord2i(srcRect.x0, srcRect.y1); break; case VDP_OUTPUT_SURFACE_RENDER_ROTATE_270: glTexCoord2i(srcRect.x1, srcRect.y1); break; } } if (colors && (flags & VDP_OUTPUT_SURFACE_RENDER_COLOR_PER_VERTEX)) glColor4f(colors[1].red, colors[1].green, colors[1].blue, colors[1].alpha); glVertex2f(dstRect.x1, dstRect.y0); if (has_src_surf) { switch (flags & 3) { case VDP_OUTPUT_SURFACE_RENDER_ROTATE_0: glTexCoord2i(srcRect.x1, srcRect.y1); break; case VDP_OUTPUT_SURFACE_RENDER_ROTATE_90: glTexCoord2i(srcRect.x1, srcRect.y0); break; case VDP_OUTPUT_SURFACE_RENDER_ROTATE_180: glTexCoord2i(srcRect.x0, srcRect.y0); break; case VDP_OUTPUT_SURFACE_RENDER_ROTATE_270: glTexCoord2i(srcRect.x0, srcRect.y1); break; } } if (colors && (flags & VDP_OUTPUT_SURFACE_RENDER_COLOR_PER_VERTEX)) glColor4f(colors[2].red, colors[2].green, colors[2].blue, colors[2].alpha); glVertex2f(dstRect.x1, dstRect.y1); if (has_src_surf) { switch (flags & 3) { case VDP_OUTPUT_SURFACE_RENDER_ROTATE_0: glTexCoord2i(srcRect.x0, srcRect.y1); break; case VDP_OUTPUT_SURFACE_RENDER_ROTATE_90: glTexCoord2i(srcRect.x1, srcRect.y1); break; case VDP_OUTPUT_SURFACE_RENDER_ROTATE_180: glTexCoord2i(srcRect.x1, srcRect.y0); break; case VDP_OUTPUT_SURFACE_RENDER_ROTATE_270: glTexCoord2i(srcRect.x0, srcRect.y0); break; } } if (colors && (flags & VDP_OUTPUT_SURFACE_RENDER_COLOR_PER_VERTEX)) glColor4f(colors[3].red, colors[3].green, colors[3].blue, colors[3].alpha); glVertex2f(dstRect.x0, dstRect.y1); glEnd(); glColor4f(1, 1, 1, 1); } VdpStatus softVdpOutputSurfaceRenderOutputSurface(VdpOutputSurface destination_surface, VdpRect const *destination_rect, VdpOutputSurface source_surface, VdpRect const *source_rect, VdpColor const *colors, VdpOutputSurfaceRenderBlendState const *blend_state, uint32_t flags) { VdpStatus err_code; if (blend_state) { if (VDP_OUTPUT_SURFACE_RENDER_BLEND_STATE_VERSION != blend_state->struct_version) { err_code = VDP_STATUS_INVALID_VALUE; goto quit_skip_release; } } VdpOutputSurfaceData *dstSurfData = handle_acquire(destination_surface, HANDLETYPE_OUTPUT_SURFACE); VdpOutputSurfaceData *srcSurfData = handle_acquire(source_surface, HANDLETYPE_OUTPUT_SURFACE); if (NULL == dstSurfData) { err_code = VDP_STATUS_INVALID_HANDLE; goto quit; } if (srcSurfData && srcSurfData->device != dstSurfData->device) { err_code = VDP_STATUS_HANDLE_DEVICE_MISMATCH; goto quit; } VdpDeviceData *deviceData = dstSurfData->device; VdpRect s_rect = {0, 0, 0, 0}; VdpRect d_rect = {0, 0, dstSurfData->width, dstSurfData->height}; s_rect.x1 = srcSurfData ? srcSurfData->width : 1; s_rect.y1 = srcSurfData ? srcSurfData->height : 1; if (source_rect) s_rect = *source_rect; if (destination_rect) d_rect = *destination_rect; // select blend functions struct blend_state_struct bs = vdpBlendStateToGLBlendState(blend_state); if (bs.invalid_func) { err_code = VDP_STATUS_INVALID_BLEND_FACTOR; goto quit; } if (bs.invalid_eq) { err_code = VDP_STATUS_INVALID_BLEND_EQUATION; goto quit; } glx_context_push_thread_local(deviceData); glBindFramebuffer(GL_FRAMEBUFFER, dstSurfData->fbo_id); glMatrixMode(GL_PROJECTION); glLoadIdentity(); glOrtho(0, dstSurfData->width, 0, dstSurfData->height, -1.0f, 1.0f); glViewport(0, 0, dstSurfData->width, dstSurfData->height); glEnable(GL_TEXTURE_2D); glEnable(GL_BLEND); glMatrixMode(GL_MODELVIEW); glLoadIdentity(); if (srcSurfData) { glBindTexture(GL_TEXTURE_2D, srcSurfData->tex_id); glMatrixMode(GL_TEXTURE); glLoadIdentity(); glScalef(1.0f/srcSurfData->width, 1.0f/srcSurfData->height, 1.0f); } compose_surfaces(bs, s_rect, d_rect, colors, flags, !!srcSurfData); glFinish(); GLenum gl_error = glGetError(); glx_context_pop(); if (GL_NO_ERROR != gl_error) { traceError("error (VdpOutputSurfaceRenderOutputSurface): gl error %d\n", gl_error); err_code = VDP_STATUS_ERROR; goto quit; } err_code = VDP_STATUS_OK; quit: handle_release(source_surface); handle_release(destination_surface); quit_skip_release: return err_code; } VdpStatus softVdpOutputSurfaceRenderBitmapSurface(VdpOutputSurface destination_surface, VdpRect const *destination_rect, VdpBitmapSurface source_surface, VdpRect const *source_rect, VdpColor const *colors, VdpOutputSurfaceRenderBlendState const *blend_state, uint32_t flags) { VdpStatus err_code; if (blend_state) { if (VDP_OUTPUT_SURFACE_RENDER_BLEND_STATE_VERSION != blend_state->struct_version) { err_code = VDP_STATUS_INVALID_VALUE; goto quit_skip_release; } } VdpOutputSurfaceData *dstSurfData = handle_acquire(destination_surface, HANDLETYPE_OUTPUT_SURFACE); VdpBitmapSurfaceData *srcSurfData = handle_acquire(source_surface, HANDLETYPE_BITMAP_SURFACE); if (NULL == dstSurfData) { err_code = VDP_STATUS_INVALID_HANDLE; goto quit; } if (srcSurfData && srcSurfData->device != dstSurfData->device) { err_code = VDP_STATUS_HANDLE_DEVICE_MISMATCH; goto quit; } VdpDeviceData *deviceData = dstSurfData->device; VdpRect s_rect = {0, 0, 0, 0}; VdpRect d_rect = {0, 0, dstSurfData->width, dstSurfData->height}; s_rect.x1 = srcSurfData ? srcSurfData->width : 1; s_rect.y1 = srcSurfData ? srcSurfData->height : 1; if (source_rect) s_rect = *source_rect; if (destination_rect) d_rect = *destination_rect; // select blend functions struct blend_state_struct bs = vdpBlendStateToGLBlendState(blend_state); if (bs.invalid_func) { err_code = VDP_STATUS_INVALID_BLEND_FACTOR; goto quit; } if (bs.invalid_eq) { err_code = VDP_STATUS_INVALID_BLEND_EQUATION; goto quit; } glx_context_push_thread_local(deviceData); glBindFramebuffer(GL_FRAMEBUFFER, dstSurfData->fbo_id); glMatrixMode(GL_PROJECTION); glLoadIdentity(); glOrtho(0, dstSurfData->width, 0, dstSurfData->height, -1.0f, 1.0f); glViewport(0, 0, dstSurfData->width, dstSurfData->height); glEnable(GL_TEXTURE_2D); glEnable(GL_BLEND); glMatrixMode(GL_MODELVIEW); glLoadIdentity(); if (srcSurfData) { glBindTexture(GL_TEXTURE_2D, srcSurfData->tex_id); if (srcSurfData->dirty) { if (4 != srcSurfData->bytes_per_pixel) glPixelStorei(GL_UNPACK_ALIGNMENT, 1); glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, srcSurfData->width, srcSurfData->height, srcSurfData->gl_format, srcSurfData->gl_type, srcSurfData->bitmap_data); if (4 != srcSurfData->bytes_per_pixel) glPixelStorei(GL_UNPACK_ALIGNMENT, 4); srcSurfData->dirty = 0; } glMatrixMode(GL_TEXTURE); glLoadIdentity(); glScalef(1.0f/srcSurfData->width, 1.0f/srcSurfData->height, 1.0f); } compose_surfaces(bs, s_rect, d_rect, colors, flags, !!srcSurfData); glFinish(); GLenum gl_error = glGetError(); glx_context_pop(); if (GL_NO_ERROR != gl_error) { traceError("error (VdpOutputSurfaceRenderBitmapSurface): gl error %d\n", gl_error); err_code = VDP_STATUS_ERROR; goto quit; } err_code = VDP_STATUS_OK; quit: handle_release(source_surface); handle_release(destination_surface); quit_skip_release: return err_code; } VdpStatus softVdpPreemptionCallbackRegister(VdpDevice device, VdpPreemptionCallback callback, void *context) { (void)device; (void)callback; (void)context; return VDP_STATUS_OK; } // ========================= VdpStatus softVdpGetProcAddress(VdpDevice device, VdpFuncId function_id, void **function_pointer) { (void)device; // there is no difference between various devices. All have same procedures if (!function_pointer) return VDP_STATUS_INVALID_POINTER; switch (function_id) { case VDP_FUNC_ID_GET_ERROR_STRING: *function_pointer = &softVdpGetErrorString; break; case VDP_FUNC_ID_GET_PROC_ADDRESS: *function_pointer = &softVdpGetProcAddress; break; case VDP_FUNC_ID_GET_API_VERSION: *function_pointer = &traceVdpGetApiVersion; break; case VDP_FUNC_ID_GET_INFORMATION_STRING: *function_pointer = &traceVdpGetInformationString; break; case VDP_FUNC_ID_DEVICE_DESTROY: *function_pointer = &traceVdpDeviceDestroy; break; case VDP_FUNC_ID_GENERATE_CSC_MATRIX: *function_pointer = &traceVdpGenerateCSCMatrix; break; case VDP_FUNC_ID_VIDEO_SURFACE_QUERY_CAPABILITIES: *function_pointer = &traceVdpVideoSurfaceQueryCapabilities; break; case VDP_FUNC_ID_VIDEO_SURFACE_QUERY_GET_PUT_BITS_Y_CB_CR_CAPABILITIES: *function_pointer = &traceVdpVideoSurfaceQueryGetPutBitsYCbCrCapabilities; break; case VDP_FUNC_ID_VIDEO_SURFACE_CREATE: *function_pointer = &traceVdpVideoSurfaceCreate; break; case VDP_FUNC_ID_VIDEO_SURFACE_DESTROY: *function_pointer = &traceVdpVideoSurfaceDestroy; break; case VDP_FUNC_ID_VIDEO_SURFACE_GET_PARAMETERS: *function_pointer = &traceVdpVideoSurfaceGetParameters; break; case VDP_FUNC_ID_VIDEO_SURFACE_GET_BITS_Y_CB_CR: *function_pointer = &traceVdpVideoSurfaceGetBitsYCbCr; break; case VDP_FUNC_ID_VIDEO_SURFACE_PUT_BITS_Y_CB_CR: *function_pointer = &traceVdpVideoSurfacePutBitsYCbCr; break; case VDP_FUNC_ID_OUTPUT_SURFACE_QUERY_CAPABILITIES: *function_pointer = &traceVdpOutputSurfaceQueryCapabilities; break; case VDP_FUNC_ID_OUTPUT_SURFACE_QUERY_GET_PUT_BITS_NATIVE_CAPABILITIES: *function_pointer = &traceVdpOutputSurfaceQueryGetPutBitsNativeCapabilities; break; case VDP_FUNC_ID_OUTPUT_SURFACE_QUERY_PUT_BITS_INDEXED_CAPABILITIES: *function_pointer = &traceVdpOutputSurfaceQueryPutBitsIndexedCapabilities; break; case VDP_FUNC_ID_OUTPUT_SURFACE_QUERY_PUT_BITS_Y_CB_CR_CAPABILITIES: *function_pointer = &traceVdpOutputSurfaceQueryPutBitsYCbCrCapabilities; break; case VDP_FUNC_ID_OUTPUT_SURFACE_CREATE: *function_pointer = &traceVdpOutputSurfaceCreate; break; case VDP_FUNC_ID_OUTPUT_SURFACE_DESTROY: *function_pointer = &traceVdpOutputSurfaceDestroy; break; case VDP_FUNC_ID_OUTPUT_SURFACE_GET_PARAMETERS: *function_pointer = &traceVdpOutputSurfaceGetParameters; break; case VDP_FUNC_ID_OUTPUT_SURFACE_GET_BITS_NATIVE: *function_pointer = &traceVdpOutputSurfaceGetBitsNative; break; case VDP_FUNC_ID_OUTPUT_SURFACE_PUT_BITS_NATIVE: *function_pointer = &traceVdpOutputSurfacePutBitsNative; break; case VDP_FUNC_ID_OUTPUT_SURFACE_PUT_BITS_INDEXED: *function_pointer = &traceVdpOutputSurfacePutBitsIndexed; break; case VDP_FUNC_ID_OUTPUT_SURFACE_PUT_BITS_Y_CB_CR: *function_pointer = &traceVdpOutputSurfacePutBitsYCbCr; break; case VDP_FUNC_ID_BITMAP_SURFACE_QUERY_CAPABILITIES: *function_pointer = &traceVdpBitmapSurfaceQueryCapabilities; break; case VDP_FUNC_ID_BITMAP_SURFACE_CREATE: *function_pointer = &traceVdpBitmapSurfaceCreate; break; case VDP_FUNC_ID_BITMAP_SURFACE_DESTROY: *function_pointer = &traceVdpBitmapSurfaceDestroy; break; case VDP_FUNC_ID_BITMAP_SURFACE_GET_PARAMETERS: *function_pointer = &traceVdpBitmapSurfaceGetParameters; break; case VDP_FUNC_ID_BITMAP_SURFACE_PUT_BITS_NATIVE: *function_pointer = &traceVdpBitmapSurfacePutBitsNative; break; case VDP_FUNC_ID_OUTPUT_SURFACE_RENDER_OUTPUT_SURFACE: *function_pointer = &traceVdpOutputSurfaceRenderOutputSurface; break; case VDP_FUNC_ID_OUTPUT_SURFACE_RENDER_BITMAP_SURFACE: *function_pointer = &traceVdpOutputSurfaceRenderBitmapSurface; break; case VDP_FUNC_ID_OUTPUT_SURFACE_RENDER_VIDEO_SURFACE_LUMA: // *function_pointer = &traceVdpOutputSurfaceRenderVideoSurfaceLuma; *function_pointer = NULL; break; case VDP_FUNC_ID_DECODER_QUERY_CAPABILITIES: *function_pointer = &traceVdpDecoderQueryCapabilities; break; case VDP_FUNC_ID_DECODER_CREATE: *function_pointer = &traceVdpDecoderCreate; break; case VDP_FUNC_ID_DECODER_DESTROY: *function_pointer = &traceVdpDecoderDestroy; break; case VDP_FUNC_ID_DECODER_GET_PARAMETERS: *function_pointer = &traceVdpDecoderGetParameters; break; case VDP_FUNC_ID_DECODER_RENDER: *function_pointer = &traceVdpDecoderRender; break; case VDP_FUNC_ID_VIDEO_MIXER_QUERY_FEATURE_SUPPORT: *function_pointer = &traceVdpVideoMixerQueryFeatureSupport; break; case VDP_FUNC_ID_VIDEO_MIXER_QUERY_PARAMETER_SUPPORT: *function_pointer = &traceVdpVideoMixerQueryParameterSupport; break; case VDP_FUNC_ID_VIDEO_MIXER_QUERY_ATTRIBUTE_SUPPORT: *function_pointer = &traceVdpVideoMixerQueryAttributeSupport; break; case VDP_FUNC_ID_VIDEO_MIXER_QUERY_PARAMETER_VALUE_RANGE: *function_pointer = &traceVdpVideoMixerQueryParameterValueRange; break; case VDP_FUNC_ID_VIDEO_MIXER_QUERY_ATTRIBUTE_VALUE_RANGE: *function_pointer = &traceVdpVideoMixerQueryAttributeValueRange; break; case VDP_FUNC_ID_VIDEO_MIXER_CREATE: *function_pointer = &traceVdpVideoMixerCreate; break; case VDP_FUNC_ID_VIDEO_MIXER_SET_FEATURE_ENABLES: *function_pointer = &traceVdpVideoMixerSetFeatureEnables; break; case VDP_FUNC_ID_VIDEO_MIXER_SET_ATTRIBUTE_VALUES: *function_pointer = &traceVdpVideoMixerSetAttributeValues; break; case VDP_FUNC_ID_VIDEO_MIXER_GET_FEATURE_SUPPORT: *function_pointer = &traceVdpVideoMixerGetFeatureSupport; break; case VDP_FUNC_ID_VIDEO_MIXER_GET_FEATURE_ENABLES: *function_pointer = &traceVdpVideoMixerGetFeatureEnables; break; case VDP_FUNC_ID_VIDEO_MIXER_GET_PARAMETER_VALUES: *function_pointer = &traceVdpVideoMixerGetParameterValues; break; case VDP_FUNC_ID_VIDEO_MIXER_GET_ATTRIBUTE_VALUES: *function_pointer = &traceVdpVideoMixerGetAttributeValues; break; case VDP_FUNC_ID_VIDEO_MIXER_DESTROY: *function_pointer = &traceVdpVideoMixerDestroy; break; case VDP_FUNC_ID_VIDEO_MIXER_RENDER: *function_pointer = &traceVdpVideoMixerRender; break; case VDP_FUNC_ID_PRESENTATION_QUEUE_TARGET_DESTROY: *function_pointer = &traceVdpPresentationQueueTargetDestroy; break; case VDP_FUNC_ID_PRESENTATION_QUEUE_CREATE: *function_pointer = &traceVdpPresentationQueueCreate; break; case VDP_FUNC_ID_PRESENTATION_QUEUE_DESTROY: *function_pointer = &traceVdpPresentationQueueDestroy; break; case VDP_FUNC_ID_PRESENTATION_QUEUE_SET_BACKGROUND_COLOR: *function_pointer = &traceVdpPresentationQueueSetBackgroundColor; break; case VDP_FUNC_ID_PRESENTATION_QUEUE_GET_BACKGROUND_COLOR: *function_pointer = &traceVdpPresentationQueueGetBackgroundColor; break; case VDP_FUNC_ID_PRESENTATION_QUEUE_GET_TIME: *function_pointer = &traceVdpPresentationQueueGetTime; break; case VDP_FUNC_ID_PRESENTATION_QUEUE_DISPLAY: *function_pointer = &traceVdpPresentationQueueDisplay; break; case VDP_FUNC_ID_PRESENTATION_QUEUE_BLOCK_UNTIL_SURFACE_IDLE: *function_pointer = &traceVdpPresentationQueueBlockUntilSurfaceIdle; break; case VDP_FUNC_ID_PRESENTATION_QUEUE_QUERY_SURFACE_STATUS: *function_pointer = &traceVdpPresentationQueueQuerySurfaceStatus; break; case VDP_FUNC_ID_PREEMPTION_CALLBACK_REGISTER: *function_pointer = &traceVdpPreemptionCallbackRegister; break; case VDP_FUNC_ID_BASE_WINSYS: *function_pointer = &traceVdpPresentationQueueTargetCreateX11; break; default: *function_pointer = NULL; break; } // switch if (NULL == *function_pointer) return VDP_STATUS_INVALID_FUNC_ID; return VDP_STATUS_OK; } VdpStatus softVdpDeviceCreateX11(Display *display_orig, int screen, VdpDevice *device, VdpGetProcAddress **get_proc_address) { if (!display_orig || !device || !get_proc_address) return VDP_STATUS_INVALID_POINTER; // Let's get own connection to the X server Display *display = handle_xdpy_ref(display_orig); if (NULL == display) return VDP_STATUS_ERROR; if (global.quirks.buggy_XCloseDisplay) { // XCloseDisplay could segfault on fglrx. To avoid calling XCloseDisplay, // make one more reference to xdpy copy. handle_xdpy_ref(display_orig); } VdpDeviceData *data = calloc(1, sizeof(VdpDeviceData)); if (NULL == data) return VDP_STATUS_RESOURCES; data->type = HANDLETYPE_DEVICE; data->display = display; data->display_orig = display_orig; // save supplied pointer too data->screen = screen; data->refcount = 0; data->root = DefaultRootWindow(display); // create master GLX context to share data between further created ones glx_context_ref_glc_hash_table(display, screen); data->root_glc = glx_context_get_root_context(); glx_context_push_thread_local(data); glClearColor(0.0f, 0.0f, 0.0f, 0.0f); glMatrixMode(GL_PROJECTION); glLoadIdentity(); glMatrixMode(GL_MODELVIEW); glLoadIdentity(); // initialize VAAPI if (global.quirks.avoid_va) { // pretend there is no VA-API available data->va_available = 0; } else { data->va_dpy = vaGetDisplayGLX(display); data->va_available = 0; VAStatus status = vaInitialize(data->va_dpy, &data->va_version_major, &data->va_version_minor); if (VA_STATUS_SUCCESS == status) { data->va_available = 1; traceInfo("libva (version %d.%d) library initialized\n", data->va_version_major, data->va_version_minor); } else { data->va_available = 0; traceInfo("warning: failed to initialize libva. " "No video decode acceleration available.\n"); } } glGenTextures(1, &data->watermark_tex_id); glBindTexture(GL_TEXTURE_2D, data->watermark_tex_id); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_SWIZZLE_R, GL_ONE); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_SWIZZLE_G, GL_ONE); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_SWIZZLE_B, GL_ONE); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_SWIZZLE_A, GL_RED); glPixelStorei(GL_UNPACK_ALIGNMENT, 1); glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, watermark_width, watermark_height, 0, GL_RED, GL_UNSIGNED_BYTE, watermark_data); glPixelStorei(GL_UNPACK_ALIGNMENT, 4); glFinish(); *device = handle_insert(data); *get_proc_address = &softVdpGetProcAddress; GLenum gl_error = glGetError(); glx_context_pop(); if (GL_NO_ERROR != gl_error) { traceError("error (VdpDeviceCreateX11): gl error %d\n", gl_error); return VDP_STATUS_ERROR; } return VDP_STATUS_OK; } libvdpau-va-gl-0.2.1/vdpau-soft.h000066400000000000000000000252071224142723700165770ustar00rootroot00000000000000/* * Copyright 2013 Rinat Ibragimov * * This file is part of libvdpau-va-gl * * libvdpau-va-gl is distributed under the terms of the LGPLv3. See COPYING for details. */ #ifndef VDPAU_SOFT_H_ #define VDPAU_SOFT_H_ #include #include #include #include #include "handle-storage.h" #define MAX_RENDER_TARGETS 21 #define NUM_RENDER_TARGETS_H264 21 #define PRESENTATION_QUEUE_LENGTH 10 /** @brief VdpDevice object parameters */ typedef struct { HandleType type; ///< common type field void *self; ///< link to device. For VdpDeviceData this is link to itself pthread_mutex_t lock; int refcount; Display *display; ///< own X display connection Display *display_orig; ///< supplied X display connection int screen; ///< X screen GLXContext root_glc; ///< master GL context Window root; ///< X drawable (root window) used for offscreen drawing VADisplay va_dpy; ///< VA display int va_available; ///< 1 if VA-API available int va_version_major; int va_version_minor; GLuint watermark_tex_id; ///< GL texture id for watermark } VdpDeviceData; /** @brief VdpVideoMixer object parameters */ typedef struct { HandleType type; ///< handle type VdpDeviceData *device; ///< link to parent pthread_mutex_t lock; } VdpVideoMixerData; /** @brief VdpOutputSurface object parameters */ typedef struct { HandleType type; ///< handle type VdpDeviceData *device; ///< link to parent pthread_mutex_t lock; VdpRGBAFormat rgba_format; ///< RGBA format of data stored GLuint tex_id; ///< associated GL texture id GLuint fbo_id; ///< framebuffer object id uint32_t width; uint32_t height; GLuint gl_internal_format; ///< GL texture format: internal format GLuint gl_format; ///< GL texture format: preferred external format GLuint gl_type; ///< GL texture format: pixel type unsigned int bytes_per_pixel; ///< number of bytes per pixel VdpTime first_presentation_time; ///< first displayed time in queue VdpPresentationQueueStatus status; ///< status in presentation queue VdpTime queued_at; } VdpOutputSurfaceData; /** @brief VdpPresentationQueueTarget object parameters */ typedef struct { HandleType type; ///< handle type VdpDeviceData *device; ///< link to parent pthread_mutex_t lock; int refcount; Drawable drawable; ///< X drawable to output to GLXContext glc; ///< GL context used for output } VdpPresentationQueueTargetData; /** @brief VdpPresentationQueue object parameters */ typedef struct { HandleType type; ///< handle type VdpDeviceData *device; ///< link to parent pthread_mutex_t lock; VdpPresentationQueueTargetData *target; VdpColor bg_color; ///< background color struct { int head; int used; int firstfree; int freelist[PRESENTATION_QUEUE_LENGTH]; struct { VdpTime t; ///< earliest_presentation_time int next; uint32_t clip_width; uint32_t clip_height; VdpOutputSurface surface; } item[PRESENTATION_QUEUE_LENGTH]; } queue; pthread_t worker_thread; pthread_mutex_t queue_mutex; pthread_cond_t new_work_available; } VdpPresentationQueueData; /** @brief VdpVideoSurface object parameters */ typedef struct { HandleType type; ///< handle type VdpDeviceData *device; ///< link to parent pthread_mutex_t lock; VdpChromaType chroma_type; ///< video chroma type uint32_t width; uint32_t stride; ///< distance between first pixels of two consecutive rows ///< in pixels uint32_t height; void *y_plane; ///< luma data (software) void *v_plane; ///< chroma data (software) void *u_plane; ///< chroma data (software) VASurfaceID va_surf; ///< VA-API surface void *va_glx; ///< handle for VA-API/GLX interaction GLuint tex_id; ///< GL texture id (RGBA) VdpDecoder decoder; ///< associated VdpDecoder int32_t rt_idx; ///< index in VdpDecoder's render_targets } VdpVideoSurfaceData; /** @brief VdpBitmapSurface object parameters */ typedef struct { HandleType type; ///< handle type VdpDeviceData *device; ///< link to parent pthread_mutex_t lock; VdpRGBAFormat rgba_format; ///< RGBA format of data stored GLuint tex_id; ///< GL texture id uint32_t width; uint32_t height; VdpBool frequently_accessed;///< 1 if surface should be optimized for frequent access unsigned int bytes_per_pixel; ///< number of bytes per bitmap pixel GLuint gl_internal_format; ///< GL texture format: internal format GLuint gl_format; ///< GL texture format: preferred external format GLuint gl_type; ///< GL texture format: pixel type char *bitmap_data; ///< system-memory buffer for frequently accessed bitmaps int dirty; ///< dirty flag. True if system-memory buffer contains data ///< newer than GPU texture contents } VdpBitmapSurfaceData; /** @brief VdpDecoder object parameters */ typedef struct { HandleType type; ///< handle type VdpDeviceData *device; ///< link to parent pthread_mutex_t lock; VdpDecoderProfile profile; ///< decoder profile uint32_t width; uint32_t height; uint32_t max_references; ///< maximum count of reference frames VAConfigID config_id; ///< VA-API config id VASurfaceID render_targets[MAX_RENDER_TARGETS]; ///< spare VA surfaces int32_t free_list_head; int32_t free_list[MAX_RENDER_TARGETS]; uint32_t num_render_targets; VAContextID context_id; ///< VA-API context id } VdpDecoderData; VdpStatus softVdpDeviceCreateX11(Display *display, int screen, VdpDevice *device, VdpGetProcAddress **get_proc_address); VdpGetApiVersion softVdpGetApiVersion; VdpDecoderQueryCapabilities softVdpDecoderQueryCapabilities; VdpDecoderCreate softVdpDecoderCreate; VdpDecoderDestroy softVdpDecoderDestroy; VdpDecoderGetParameters softVdpDecoderGetParameters; VdpDecoderRender softVdpDecoderRender; VdpOutputSurfaceQueryCapabilities softVdpOutputSurfaceQueryCapabilities; VdpOutputSurfaceQueryGetPutBitsNativeCapabilities softVdpOutputSurfaceQueryGetPutBitsNativeCapabilities; VdpOutputSurfaceQueryPutBitsIndexedCapabilities softVdpOutputSurfaceQueryPutBitsIndexedCapabilities; VdpOutputSurfaceQueryPutBitsYCbCrCapabilities softVdpOutputSurfaceQueryPutBitsYCbCrCapabilities; VdpOutputSurfaceCreate softVdpOutputSurfaceCreate; VdpOutputSurfaceDestroy softVdpOutputSurfaceDestroy; VdpOutputSurfaceGetParameters softVdpOutputSurfaceGetParameters; VdpOutputSurfaceGetBitsNative softVdpOutputSurfaceGetBitsNative; VdpOutputSurfacePutBitsNative softVdpOutputSurfacePutBitsNative; VdpOutputSurfacePutBitsIndexed softVdpOutputSurfacePutBitsIndexed; VdpOutputSurfacePutBitsYCbCr softVdpOutputSurfacePutBitsYCbCr; VdpVideoMixerQueryFeatureSupport softVdpVideoMixerQueryFeatureSupport; VdpVideoMixerQueryParameterSupport softVdpVideoMixerQueryParameterSupport; VdpVideoMixerQueryAttributeSupport softVdpVideoMixerQueryAttributeSupport; VdpVideoMixerQueryParameterValueRange softVdpVideoMixerQueryParameterValueRange; VdpVideoMixerQueryAttributeValueRange softVdpVideoMixerQueryAttributeValueRange; VdpVideoMixerCreate softVdpVideoMixerCreate; VdpVideoMixerSetFeatureEnables softVdpVideoMixerSetFeatureEnables; VdpVideoMixerSetAttributeValues softVdpVideoMixerSetAttributeValues; VdpVideoMixerGetFeatureSupport softVdpVideoMixerGetFeatureSupport; VdpVideoMixerGetFeatureEnables softVdpVideoMixerGetFeatureEnables; VdpVideoMixerGetParameterValues softVdpVideoMixerGetParameterValues; VdpVideoMixerGetAttributeValues softVdpVideoMixerGetAttributeValues; VdpVideoMixerDestroy softVdpVideoMixerDestroy; VdpVideoMixerRender softVdpVideoMixerRender; VdpPresentationQueueTargetDestroy softVdpPresentationQueueTargetDestroy; VdpPresentationQueueCreate softVdpPresentationQueueCreate; VdpPresentationQueueDestroy softVdpPresentationQueueDestroy; VdpPresentationQueueSetBackgroundColor softVdpPresentationQueueSetBackgroundColor; VdpPresentationQueueGetBackgroundColor softVdpPresentationQueueGetBackgroundColor; VdpPresentationQueueGetTime softVdpPresentationQueueGetTime; VdpPresentationQueueDisplay softVdpPresentationQueueDisplay; VdpPresentationQueueBlockUntilSurfaceIdle softVdpPresentationQueueBlockUntilSurfaceIdle; VdpPresentationQueueQuerySurfaceStatus softVdpPresentationQueueQuerySurfaceStatus; VdpVideoSurfaceQueryCapabilities softVdpVideoSurfaceQueryCapabilities; VdpVideoSurfaceQueryGetPutBitsYCbCrCapabilities softVdpVideoSurfaceQueryGetPutBitsYCbCrCapabilities; VdpVideoSurfaceCreate softVdpVideoSurfaceCreate; VdpVideoSurfaceDestroy softVdpVideoSurfaceDestroy; VdpVideoSurfaceGetParameters softVdpVideoSurfaceGetParameters; VdpVideoSurfaceGetBitsYCbCr softVdpVideoSurfaceGetBitsYCbCr; VdpVideoSurfacePutBitsYCbCr softVdpVideoSurfacePutBitsYCbCr; VdpBitmapSurfaceQueryCapabilities softVdpBitmapSurfaceQueryCapabilities; VdpBitmapSurfaceCreate softVdpBitmapSurfaceCreate; VdpBitmapSurfaceDestroy softVdpBitmapSurfaceDestroy; VdpBitmapSurfaceGetParameters softVdpBitmapSurfaceGetParameters; VdpBitmapSurfacePutBitsNative softVdpBitmapSurfacePutBitsNative; VdpDeviceDestroy softVdpDeviceDestroy; VdpGetInformationString softVdpGetInformationString; VdpGenerateCSCMatrix softVdpGenerateCSCMatrix; VdpOutputSurfaceRenderOutputSurface softVdpOutputSurfaceRenderOutputSurface; VdpOutputSurfaceRenderBitmapSurface softVdpOutputSurfaceRenderBitmapSurface; VdpPreemptionCallbackRegister softVdpPreemptionCallbackRegister; VdpPresentationQueueTargetCreateX11 softVdpPresentationQueueTargetCreateX11; VdpGetProcAddress softVdpGetProcAddress; #endif /* VDPAU_SOFT_H_ */ libvdpau-va-gl-0.2.1/vdpau-trace.c000066400000000000000000001612101224142723700167100ustar00rootroot00000000000000/* * Copyright 2013 Rinat Ibragimov * * This file is part of libvdpau-va-gl * * libvdpau-va-gl is distributed under the terms of the LGPLv3. See COPYING for details. */ #include #include #include #include "vdpau-soft.h" #include "vdpau-trace.h" #include "reverse-constant.h" static FILE *tlog = NULL; ///< trace target static const char *trace_header = "[VS] "; static const char *trace_header_blank = " "; static int trace_enabled = 1; static void (*trace_hook)(void *, void *, int, int); static void *trace_hook_longterm_param = NULL; void traceEnableTracing(int flag) { trace_enabled = !!flag; } void traceSetTarget(FILE *target) { tlog = target; } void traceResetTarget(void) { tlog = stdout; } void traceSetHook(void (*hook)(void *param1, void *param2, int origin, int after), void *param) { trace_hook = hook; trace_hook_longterm_param = param; } void traceCallHook(int origin, int after, void *shortterm_param) { if (!trace_enabled) return; if (trace_hook) trace_hook(trace_hook_longterm_param, shortterm_param, origin, after); } void traceSetHeader(const char *header, const char *header_blank) { trace_header = header; trace_header_blank = header_blank; } void traceInfo(const char *fmt, ...) { if (!trace_enabled) return; va_list args; traceCallHook(-2, 0, NULL); fprintf(tlog, "%s", trace_header); va_start(args, fmt); vfprintf(tlog, fmt, args); va_end(args); } void traceError(const char *fmt, ...) { va_list args; fprintf(stderr, "%s", trace_header); va_start(args, fmt); vfprintf(stderr, fmt, args); va_end(args); } static const char * rect2string(VdpRect const *rect) { // use buffer pool to enable printing many rects in one printf expression static char bufs[8][100]; static int i_ptr = 0; i_ptr = (i_ptr + 1) % 8; char *buf = &bufs[i_ptr][0]; if (NULL == rect) { snprintf(buf, 100, "NULL"); } else { snprintf(buf, 100, "(%d,%d,%d,%d)", rect->x0, rect->y0, rect->x1, rect->y1); } return buf; } VdpStatus traceVdpGetApiVersion(uint32_t *api_version) { const char *impl_state = "{full}"; if (!trace_enabled) goto skip; traceCallHook(VDP_FUNC_ID_GET_API_VERSION, 0, NULL); fprintf(tlog, "%s%s VdpGetApiVersion\n", trace_header, impl_state); skip:; VdpStatus ret = softVdpGetApiVersion(api_version); traceCallHook(VDP_FUNC_ID_GET_API_VERSION, 1, (void *)ret); return ret; } VdpStatus traceVdpDecoderQueryCapabilities(VdpDevice device, VdpDecoderProfile profile, VdpBool *is_supported, uint32_t *max_level, uint32_t *max_macroblocks, uint32_t *max_width, uint32_t *max_height) { const char *impl_state = "{part}"; if (!trace_enabled) goto skip; traceCallHook(VDP_FUNC_ID_DECODER_QUERY_CAPABILITIES, 0, NULL); fprintf(tlog, "%s%s VdpDecoderQueryCapabilities device=%d, profile=%s\n", trace_header, impl_state, device, reverse_decoder_profile(profile)); skip:; VdpStatus ret = softVdpDecoderQueryCapabilities(device, profile, is_supported, max_level, max_macroblocks, max_width, max_height); traceCallHook(VDP_FUNC_ID_DECODER_QUERY_CAPABILITIES, 1, (void*)ret); return ret; } VdpStatus traceVdpDecoderCreate(VdpDevice device, VdpDecoderProfile profile, uint32_t width, uint32_t height, uint32_t max_references, VdpDecoder *decoder) { const char *impl_state = "{full}"; if (!trace_enabled) goto skip; traceCallHook(VDP_FUNC_ID_DECODER_CREATE, 0, NULL); fprintf(tlog, "%s%s VdpDecoderCreate device=%d, profile=%s, width=%d, height=%d, " "max_references=%d\n", trace_header, impl_state, device, reverse_decoder_profile(profile), width, height, max_references); skip:; VdpStatus ret = softVdpDecoderCreate(device, profile, width, height, max_references, decoder); traceCallHook(VDP_FUNC_ID_DECODER_CREATE, 1, (void*)ret); return ret; } VdpStatus traceVdpDecoderDestroy(VdpDecoder decoder) { const char *impl_state = "{full}"; if (!trace_enabled) goto skip; traceCallHook(VDP_FUNC_ID_DECODER_DESTROY, 0, NULL); fprintf(tlog, "%s%s VdpDecoderDestroy decoder=%d\n", trace_header, impl_state, decoder); skip:; VdpStatus ret = softVdpDecoderDestroy(decoder); traceCallHook(VDP_FUNC_ID_DECODER_DESTROY, 1, (void*)ret); return ret; } VdpStatus traceVdpDecoderGetParameters(VdpDecoder decoder, VdpDecoderProfile *profile, uint32_t *width, uint32_t *height) { const char *impl_state = "{full}"; if (!trace_enabled) goto skip; traceCallHook(VDP_FUNC_ID_DECODER_GET_PARAMETERS, 0, NULL); fprintf(tlog, "%s%s VdpDecoderGetParameters decoder=%d\n", trace_header, impl_state, decoder); skip:; VdpStatus ret = softVdpDecoderGetParameters(decoder, profile, width, height); traceCallHook(VDP_FUNC_ID_DECODER_GET_PARAMETERS, 1, (void*)ret); return ret; } VdpStatus traceVdpDecoderRender(VdpDecoder decoder, VdpVideoSurface target, VdpPictureInfo const *picture_info, uint32_t bitstream_buffer_count, VdpBitstreamBuffer const *bitstream_buffers) { const char *impl_state = "{part}"; if (!trace_enabled) goto skip; traceCallHook(VDP_FUNC_ID_DECODER_RENDER, 0, NULL); fprintf(tlog, "%s%s VdpDecoderRender decoder=%d, target=%d, picture_info=%p, " "bitstream_buffer_count=%d\n", trace_header, impl_state, decoder, target, picture_info, bitstream_buffer_count); skip:; VdpStatus ret = softVdpDecoderRender(decoder, target, picture_info, bitstream_buffer_count, bitstream_buffers); traceCallHook(VDP_FUNC_ID_DECODER_RENDER, 1, (void*)ret); return ret; } VdpStatus traceVdpOutputSurfaceQueryCapabilities(VdpDevice device, VdpRGBAFormat surface_rgba_format, VdpBool *is_supported, uint32_t *max_width, uint32_t *max_height) { const char *impl_state = "{full}"; if (!trace_enabled) goto skip; traceCallHook(VDP_FUNC_ID_OUTPUT_SURFACE_QUERY_CAPABILITIES, 0, NULL); fprintf(tlog, "%s%s VdpOutputSurfaceQueryCapabilities device=%d, surface_rgba_format=%s\n", trace_header, impl_state, device, reverse_rgba_format(surface_rgba_format)); skip:; VdpStatus ret = softVdpOutputSurfaceQueryCapabilities(device, surface_rgba_format, is_supported, max_width, max_height); traceCallHook(VDP_FUNC_ID_OUTPUT_SURFACE_QUERY_CAPABILITIES, 1, (void*)ret); return ret; } VdpStatus traceVdpOutputSurfaceQueryGetPutBitsNativeCapabilities(VdpDevice device, VdpRGBAFormat surface_rgba_format, VdpBool *is_supported) { const char *impl_state = "{zilch}"; if (!trace_enabled) goto skip; traceCallHook(VDP_FUNC_ID_OUTPUT_SURFACE_QUERY_GET_PUT_BITS_NATIVE_CAPABILITIES, 0, NULL); fprintf(tlog, "%s%s VdpOutputSurfaceQueryGetPutBitsNativeCapabilities device=%d, " "surface_rgba_format=%s\n", trace_header, impl_state, device, reverse_rgba_format(surface_rgba_format)); skip:; VdpStatus ret = softVdpOutputSurfaceQueryGetPutBitsNativeCapabilities(device, surface_rgba_format, is_supported); traceCallHook(VDP_FUNC_ID_OUTPUT_SURFACE_QUERY_GET_PUT_BITS_NATIVE_CAPABILITIES, 1, (void*)ret); return ret; } VdpStatus traceVdpOutputSurfaceQueryPutBitsIndexedCapabilities(VdpDevice device, VdpRGBAFormat surface_rgba_format, VdpIndexedFormat bits_indexed_format, VdpColorTableFormat color_table_format, VdpBool *is_supported) { const char *impl_state = "{zilch}"; if (!trace_enabled) goto skip; traceCallHook(VDP_FUNC_ID_OUTPUT_SURFACE_QUERY_PUT_BITS_INDEXED_CAPABILITIES, 0, NULL); fprintf(tlog, "%s%s VdpOutputSurfaceQueryPutBitsIndexedCapabilities device=%d, " "surface_rgba_format=%s, bits_indexed_format=%s, color_table_format=%s\n", trace_header, impl_state, device, reverse_rgba_format(surface_rgba_format), reverse_indexed_format(bits_indexed_format), reverse_color_table_format(color_table_format)); skip:; VdpStatus ret = softVdpOutputSurfaceQueryPutBitsIndexedCapabilities(device, surface_rgba_format, bits_indexed_format, color_table_format, is_supported); traceCallHook(VDP_FUNC_ID_OUTPUT_SURFACE_QUERY_PUT_BITS_INDEXED_CAPABILITIES, 1, (void*)ret); return ret; } VdpStatus traceVdpOutputSurfaceQueryPutBitsYCbCrCapabilities(VdpDevice device, VdpRGBAFormat surface_rgba_format, VdpYCbCrFormat bits_ycbcr_format, VdpBool *is_supported) { const char *impl_state = "{zilch}"; if (!trace_enabled) goto skip; traceCallHook(VDP_FUNC_ID_OUTPUT_SURFACE_QUERY_PUT_BITS_Y_CB_CR_CAPABILITIES, 0, NULL); fprintf(tlog, "%s%s VdpOutputSurfaceQueryPutBitsYCbCrCapabilities device=%d, " "surface_rgba_format=%s, bits_ycbcr_format=%s\n", trace_header, impl_state, device, reverse_rgba_format(surface_rgba_format), reverse_ycbcr_format(bits_ycbcr_format)); skip:; VdpStatus ret = softVdpOutputSurfaceQueryPutBitsYCbCrCapabilities(device, surface_rgba_format, bits_ycbcr_format, is_supported); traceCallHook(VDP_FUNC_ID_OUTPUT_SURFACE_QUERY_PUT_BITS_Y_CB_CR_CAPABILITIES, 1, (void*)ret); return ret; } VdpStatus traceVdpOutputSurfaceCreate(VdpDevice device, VdpRGBAFormat rgba_format, uint32_t width, uint32_t height, VdpOutputSurface *surface) { const char *impl_state = "{part}"; if (!trace_enabled) goto skip; traceCallHook(VDP_FUNC_ID_OUTPUT_SURFACE_CREATE, 0, NULL); fprintf(tlog, "%s%s VdpOutputSurfaceCreate device=%d, rgba_format=%s, width=%d, height=%d\n", trace_header, impl_state, device, reverse_rgba_format(rgba_format), width, height); skip:; VdpStatus ret = softVdpOutputSurfaceCreate(device, rgba_format, width, height, surface); traceCallHook(VDP_FUNC_ID_OUTPUT_SURFACE_CREATE, 1, (void*)ret); return ret; } VdpStatus traceVdpOutputSurfaceDestroy(VdpOutputSurface surface) { const char *impl_state = "{full}"; if (!trace_enabled) goto skip; traceCallHook(VDP_FUNC_ID_OUTPUT_SURFACE_DESTROY, 0, NULL); fprintf(tlog, "%s%s VdpOutputSurfaceDestroy surface=%d\n", trace_header, impl_state, surface); skip:; VdpStatus ret = softVdpOutputSurfaceDestroy(surface); traceCallHook(VDP_FUNC_ID_OUTPUT_SURFACE_DESTROY, 1, (void*)ret); return ret; } VdpStatus traceVdpOutputSurfaceGetParameters(VdpOutputSurface surface, VdpRGBAFormat *rgba_format, uint32_t *width, uint32_t *height) { const char *impl_state = "{full}"; if (!trace_enabled) goto skip; traceCallHook(VDP_FUNC_ID_OUTPUT_SURFACE_GET_PARAMETERS, 0, NULL); fprintf(tlog, "%s%s VdpOutputSurfaceGetParameters surface=%d\n", trace_header, impl_state, surface); skip:; VdpStatus ret = softVdpOutputSurfaceGetParameters(surface, rgba_format, width, height); traceCallHook(VDP_FUNC_ID_OUTPUT_SURFACE_GET_PARAMETERS, 1, (void*)ret); return ret; } VdpStatus traceVdpOutputSurfaceGetBitsNative(VdpOutputSurface surface, VdpRect const *source_rect, void *const *destination_data, uint32_t const *destination_pitches) { const char *impl_state = "{part}"; if (!trace_enabled) goto skip; traceCallHook(VDP_FUNC_ID_OUTPUT_SURFACE_GET_BITS_NATIVE, 0, NULL); fprintf(tlog, "%s%s VdpOutputSurfaceGetBitsNative surface=%d, source_rect=%s\n", trace_header, impl_state, surface, rect2string(source_rect)); skip:; VdpStatus ret = softVdpOutputSurfaceGetBitsNative(surface, source_rect, destination_data, destination_pitches); traceCallHook(VDP_FUNC_ID_OUTPUT_SURFACE_GET_BITS_NATIVE, 1, (void*)ret); return ret; } VdpStatus traceVdpOutputSurfacePutBitsNative(VdpOutputSurface surface, void const *const *source_data, uint32_t const *source_pitches, VdpRect const *destination_rect) { const char *impl_state = "{full}"; if (!trace_enabled) goto skip; traceCallHook(VDP_FUNC_ID_OUTPUT_SURFACE_PUT_BITS_NATIVE, 0, NULL); fprintf(tlog, "%s%s VdpOutputSurfacePutBitsNative surface=%d, destination_rect=%s\n", trace_header, impl_state, surface, rect2string(destination_rect)); skip:; VdpStatus ret = softVdpOutputSurfacePutBitsNative(surface, source_data, source_pitches, destination_rect); traceCallHook(VDP_FUNC_ID_OUTPUT_SURFACE_PUT_BITS_NATIVE, 1, (void*)ret); return ret; } VdpStatus traceVdpOutputSurfacePutBitsIndexed(VdpOutputSurface surface, VdpIndexedFormat source_indexed_format, void const *const *source_data, uint32_t const *source_pitch, VdpRect const *destination_rect, VdpColorTableFormat color_table_format, void const *color_table) { const char *impl_state = "{part}"; if (!trace_enabled) goto skip; traceCallHook(VDP_FUNC_ID_OUTPUT_SURFACE_PUT_BITS_INDEXED, 0, NULL); fprintf(tlog, "%s%s VdpOutputSurfacePutBitsIndexed surface=%d, source_indexed_format=%s, " "destination_rect=%s, color_table_format=%s\n", trace_header, impl_state, surface, reverse_indexed_format(source_indexed_format), rect2string(destination_rect), reverse_color_table_format(color_table_format)); skip:; VdpStatus ret = softVdpOutputSurfacePutBitsIndexed(surface, source_indexed_format, source_data, source_pitch, destination_rect, color_table_format, color_table); traceCallHook(VDP_FUNC_ID_OUTPUT_SURFACE_PUT_BITS_INDEXED, 1, (void*)ret); return ret; } VdpStatus traceVdpOutputSurfacePutBitsYCbCr(VdpOutputSurface surface, VdpYCbCrFormat source_ycbcr_format, void const *const *source_data, uint32_t const *source_pitches, VdpRect const *destination_rect, VdpCSCMatrix const *csc_matrix) { const char *impl_state = "{zilch}"; if (!trace_enabled) goto skip; traceCallHook(VDP_FUNC_ID_OUTPUT_SURFACE_PUT_BITS_Y_CB_CR, 0, NULL); fprintf(tlog, "%s%s VdpOutputSurfacePutBitsYCbCr surface=%d, source_ycbcr_format=%s, " "destination_rect=%s, csc_matrix=%p\n", trace_header, impl_state, surface, reverse_ycbcr_format(source_ycbcr_format), rect2string(destination_rect), csc_matrix); skip:; VdpStatus ret = softVdpOutputSurfacePutBitsYCbCr(surface, source_ycbcr_format, source_data, source_pitches, destination_rect, csc_matrix); traceCallHook(VDP_FUNC_ID_OUTPUT_SURFACE_PUT_BITS_Y_CB_CR, 1, (void*)ret); return ret; } VdpStatus traceVdpVideoMixerQueryFeatureSupport(VdpDevice device, VdpVideoMixerFeature feature, VdpBool *is_supported) { const char *impl_state = "{zilch}"; if (!trace_enabled) goto skip; traceCallHook(VDP_FUNC_ID_VIDEO_MIXER_QUERY_FEATURE_SUPPORT, 0, NULL); fprintf(tlog, "%s%s VdpVideoMixerQueryFeatureSupport device=%d, feature=%s\n", trace_header, impl_state, device, reverse_video_mixer_feature(feature)); skip:; VdpStatus ret = softVdpVideoMixerQueryFeatureSupport(device, feature, is_supported); traceCallHook(VDP_FUNC_ID_VIDEO_MIXER_QUERY_FEATURE_SUPPORT, 1, (void*)ret); return ret; } VdpStatus traceVdpVideoMixerQueryParameterSupport(VdpDevice device, VdpVideoMixerParameter parameter, VdpBool *is_supported) { const char *impl_state = "{zilch}"; if (!trace_enabled) goto skip; traceCallHook(VDP_FUNC_ID_VIDEO_MIXER_QUERY_PARAMETER_SUPPORT, 0, NULL); fprintf(tlog, "%s%s VdpVideoMixerQueryParameterSupport device=%d, parameter=%s\n", trace_header, impl_state, device, reverse_video_mixer_parameter(parameter)); skip:; VdpStatus ret = softVdpVideoMixerQueryParameterSupport(device, parameter, is_supported); traceCallHook(VDP_FUNC_ID_VIDEO_MIXER_QUERY_PARAMETER_SUPPORT, 1, (void*)ret); return ret; } VdpStatus traceVdpVideoMixerQueryAttributeSupport(VdpDevice device, VdpVideoMixerAttribute attribute, VdpBool *is_supported) { const char *impl_state = "{zilch}"; if (!trace_enabled) goto skip; traceCallHook(VDP_FUNC_ID_VIDEO_MIXER_QUERY_ATTRIBUTE_SUPPORT, 0, NULL); fprintf(tlog, "%s%s VdpVideoMixerQueryAttributeSupport device=%d, attribute=%s\n", trace_header, impl_state, device, reverse_video_mixer_attribute(attribute)); skip:; VdpStatus ret = softVdpVideoMixerQueryAttributeSupport(device, attribute, is_supported); traceCallHook(VDP_FUNC_ID_VIDEO_MIXER_QUERY_ATTRIBUTE_SUPPORT, 1, (void*)ret); return ret; } VdpStatus traceVdpVideoMixerQueryParameterValueRange(VdpDevice device, VdpVideoMixerParameter parameter, void *min_value, void *max_value) { const char *impl_state = "{zilch}"; if (!trace_enabled) goto skip; traceCallHook(VDP_FUNC_ID_VIDEO_MIXER_QUERY_PARAMETER_VALUE_RANGE, 0, NULL); fprintf(tlog, "%s%s VdpVideoMixerQueryParameterValueRange device=%d, parameter=%s\n", trace_header, impl_state, device, reverse_video_mixer_parameter(parameter)); skip:; VdpStatus ret = softVdpVideoMixerQueryParameterValueRange(device, parameter, min_value, max_value); traceCallHook(VDP_FUNC_ID_VIDEO_MIXER_QUERY_PARAMETER_VALUE_RANGE, 1, (void*)ret); return ret; } VdpStatus traceVdpVideoMixerQueryAttributeValueRange(VdpDevice device, VdpVideoMixerAttribute attribute, void *min_value, void *max_value) { const char *impl_state = "{zilch}"; if (!trace_enabled) goto skip; traceCallHook(VDP_FUNC_ID_VIDEO_MIXER_QUERY_ATTRIBUTE_VALUE_RANGE, 0, NULL); fprintf(tlog, "%s%s VdpVideoMixerQueryAttributeValueRange device=%d, attribute=%s\n", trace_header, impl_state, device, reverse_video_mixer_attribute(attribute)); skip:; VdpStatus ret = softVdpVideoMixerQueryAttributeValueRange(device, attribute, min_value, max_value); traceCallHook(VDP_FUNC_ID_VIDEO_MIXER_QUERY_ATTRIBUTE_VALUE_RANGE, 1, (void*)ret); return ret; } VdpStatus traceVdpVideoMixerCreate(VdpDevice device, uint32_t feature_count, VdpVideoMixerFeature const *features, uint32_t parameter_count, VdpVideoMixerParameter const *parameters, void const *const *parameter_values, VdpVideoMixer *mixer) { const char *impl_state = "{part}"; if (!trace_enabled) goto skip; traceCallHook(VDP_FUNC_ID_VIDEO_MIXER_CREATE, 0, NULL); fprintf(tlog, "%s%s VdpVideoMixerCreate device=%d, feature_count=%d, parameter_count=%d\n", trace_header, impl_state, device, feature_count, parameter_count); for (uint32_t k = 0; k < feature_count; k ++) fprintf(tlog, "%s feature %s\n", trace_header_blank, reverse_video_mixer_feature(features[k])); for (uint32_t k = 0; k < parameter_count; k ++) { fprintf(tlog, "%s parameter ", trace_header_blank); switch (parameters[k]) { case VDP_VIDEO_MIXER_PARAMETER_VIDEO_SURFACE_WIDTH: fprintf(tlog, "video surface width = %d\n", *(uint32_t*)parameter_values[k]); break; case VDP_VIDEO_MIXER_PARAMETER_VIDEO_SURFACE_HEIGHT: fprintf(tlog, "video surface height = %d\n", *(uint32_t*)parameter_values[k]); break; case VDP_VIDEO_MIXER_PARAMETER_CHROMA_TYPE: fprintf(tlog, "chroma type = %s\n", reverse_chroma_type(*(uint32_t*)parameter_values[k])); break; case VDP_VIDEO_MIXER_PARAMETER_LAYERS: fprintf(tlog, "layers = %d\n", *(uint32_t*)parameter_values[k]); break; default: fprintf(tlog, "invalid\n"); break; } } skip:; VdpStatus ret = softVdpVideoMixerCreate(device, feature_count, features, parameter_count, parameters, parameter_values, mixer); traceCallHook(VDP_FUNC_ID_VIDEO_MIXER_CREATE, 1, (void*)ret); return ret; } VdpStatus traceVdpVideoMixerSetFeatureEnables(VdpVideoMixer mixer, uint32_t feature_count, VdpVideoMixerFeature const *features, VdpBool const *feature_enables) { const char *impl_state = "{part}"; if (!trace_enabled) goto skip; traceCallHook(VDP_FUNC_ID_VIDEO_MIXER_SET_FEATURE_ENABLES, 0, NULL); fprintf(tlog, "%s%s VdpVideoMixerSetFeatureEnables mixer=%d, feature_count=%d\n", trace_header, impl_state, mixer, feature_count); for (uint32_t k = 0; k < feature_count; k ++) { fprintf(tlog, "%s feature %d (%s) %s\n", trace_header_blank, features[k], reverse_video_mixer_feature(features[k]), feature_enables[k] ? "enabled" : "disabled"); } skip:; VdpStatus ret = softVdpVideoMixerSetFeatureEnables(mixer, feature_count, features, feature_enables); traceCallHook(VDP_FUNC_ID_VIDEO_MIXER_SET_FEATURE_ENABLES, 1, (void*)ret); return ret; } VdpStatus traceVdpVideoMixerSetAttributeValues(VdpVideoMixer mixer, uint32_t attribute_count, VdpVideoMixerAttribute const *attributes, void const *const *attribute_values) { const char *impl_state = "{part}"; if (!trace_enabled) goto skip; traceCallHook(VDP_FUNC_ID_VIDEO_MIXER_SET_ATTRIBUTE_VALUES, 0, NULL); fprintf(tlog, "%s%s VdpVideoMixerSetAttributeValues mixer=%d, attribute_count=%d\n", trace_header, impl_state, mixer, attribute_count); for (uint32_t k = 0; k < attribute_count; k ++) { fprintf(tlog, "%s attribute %d (%s)\n", trace_header_blank, attributes[k], reverse_video_mixer_attribute(attributes[k])); if (VDP_VIDEO_MIXER_ATTRIBUTE_CSC_MATRIX == attributes[k]) { VdpCSCMatrix *matrix = (VdpCSCMatrix *)(attribute_values[k]); for (uint32_t j1 = 0; j1 < 3; j1 ++) { fprintf(tlog, "%s ", trace_header_blank); for (uint32_t j2 = 0; j2 < 4; j2 ++) { fprintf(tlog, "%11f", (double)((*matrix)[j1][j2])); } fprintf(tlog, "\n"); } } } skip:; VdpStatus ret = softVdpVideoMixerSetAttributeValues(mixer, attribute_count, attributes, attribute_values); traceCallHook(VDP_FUNC_ID_VIDEO_MIXER_SET_ATTRIBUTE_VALUES, 1, (void*)ret); return ret; } VdpStatus traceVdpVideoMixerGetFeatureSupport(VdpVideoMixer mixer, uint32_t feature_count, VdpVideoMixerFeature const *features, VdpBool *feature_supports) { const char *impl_state = "{zilch}"; if (!trace_enabled) goto skip; traceCallHook(VDP_FUNC_ID_VIDEO_MIXER_GET_FEATURE_SUPPORT, 0, NULL); fprintf(tlog, "%s%s VdpVideoMixerGetFeatureSupport mixer=%d, feature_count=%d\n", trace_header, impl_state, mixer, feature_count); for (unsigned int k = 0; k < feature_count; k ++) fprintf(tlog, "%s feature %s\n", trace_header_blank, reverse_video_mixer_feature(features[k])); skip:; VdpStatus ret = softVdpVideoMixerGetFeatureSupport(mixer, feature_count, features, feature_supports); traceCallHook(VDP_FUNC_ID_VIDEO_MIXER_GET_FEATURE_SUPPORT, 1, (void*)ret); return ret; } VdpStatus traceVdpVideoMixerGetFeatureEnables(VdpVideoMixer mixer, uint32_t feature_count, VdpVideoMixerFeature const *features, VdpBool *feature_enables) { const char *impl_state = "{zilch}"; if (!trace_enabled) goto skip; traceCallHook(VDP_FUNC_ID_VIDEO_MIXER_GET_FEATURE_ENABLES, 0, NULL); fprintf(tlog, "%s%s VdpVideoMixerGetFeatureEnables mixer=%d, feature_count=%d\n", trace_header, impl_state, mixer, feature_count); for (unsigned int k = 0; k < feature_count; k ++) fprintf(tlog, "%s feature %s\n", trace_header_blank, reverse_video_mixer_feature(features[k])); skip:; VdpStatus ret = softVdpVideoMixerGetFeatureEnables(mixer, feature_count, features, feature_enables); traceCallHook(VDP_FUNC_ID_VIDEO_MIXER_GET_FEATURE_ENABLES, 1, (void*)ret); return ret; } VdpStatus traceVdpVideoMixerGetParameterValues(VdpVideoMixer mixer, uint32_t parameter_count, VdpVideoMixerParameter const *parameters, void *const *parameter_values) { const char *impl_state = "{zilch}"; if (!trace_enabled) goto skip; traceCallHook(VDP_FUNC_ID_VIDEO_MIXER_GET_PARAMETER_VALUES, 0, NULL); fprintf(tlog, "%s%s VdpVideoMixerGetParameterValues mixer=%d, parameter_count=%d\n", trace_header, impl_state, mixer, parameter_count); for (unsigned int k = 0; k < parameter_count; k ++) fprintf(tlog, "%s parameter %s\n", trace_header_blank, reverse_video_mixer_parameter(parameters[k])); skip:; VdpStatus ret = softVdpVideoMixerGetParameterValues(mixer, parameter_count, parameters, parameter_values); traceCallHook(VDP_FUNC_ID_VIDEO_MIXER_GET_PARAMETER_VALUES, 1, (void*)ret); return ret; } VdpStatus traceVdpVideoMixerGetAttributeValues(VdpVideoMixer mixer, uint32_t attribute_count, VdpVideoMixerAttribute const *attributes, void *const *attribute_values) { const char *impl_state = "{zilch}"; if (!trace_enabled) goto skip; traceCallHook(VDP_FUNC_ID_VIDEO_MIXER_GET_ATTRIBUTE_VALUES, 0, NULL); fprintf(tlog, "%s%s VdpVideoMixerGetAttributeValues mixer=%d, attribute_count=%d\n", trace_header, impl_state, mixer, attribute_count); for (unsigned int k = 0; k < attribute_count; k ++) fprintf(tlog, "%s attribute %s\n", trace_header_blank, reverse_video_mixer_attribute(attributes[k])); skip:; VdpStatus ret = softVdpVideoMixerGetAttributeValues(mixer, attribute_count, attributes, attribute_values); traceCallHook(VDP_FUNC_ID_VIDEO_MIXER_GET_ATTRIBUTE_VALUES, 1, (void*)ret); return ret; } VdpStatus traceVdpVideoMixerDestroy(VdpVideoMixer mixer) { const char *impl_state = "{full}"; if (!trace_enabled) goto skip; traceCallHook(VDP_FUNC_ID_VIDEO_MIXER_DESTROY, 0, NULL); fprintf(tlog, "%s%s VdpVideoMixerDestroy mixer=%d\n", trace_header, impl_state, mixer); skip:; VdpStatus ret = softVdpVideoMixerDestroy(mixer); traceCallHook(VDP_FUNC_ID_VIDEO_MIXER_DESTROY, 1, (void*)ret); return ret; } VdpStatus traceVdpVideoMixerRender(VdpVideoMixer mixer, VdpOutputSurface background_surface, VdpRect const *background_source_rect, VdpVideoMixerPictureStructure current_picture_structure, uint32_t video_surface_past_count, VdpVideoSurface const *video_surface_past, VdpVideoSurface video_surface_current, uint32_t video_surface_future_count, VdpVideoSurface const *video_surface_future, VdpRect const *video_source_rect, VdpOutputSurface destination_surface, VdpRect const *destination_rect, VdpRect const *destination_video_rect, uint32_t layer_count, VdpLayer const *layers) { const char *impl_state = "{part}"; if (!trace_enabled) goto skip; traceCallHook(VDP_FUNC_ID_VIDEO_MIXER_RENDER, 0, NULL); fprintf(tlog, "%s%s VdpVideoMixerRender mixer=%d, background_surface=%d, " "background_source_rect=%s,\n", trace_header, impl_state, mixer, background_surface, rect2string(background_source_rect)); fprintf(tlog, "%s current_picture_structure=%s, video_surface_past=[", trace_header_blank, reverser_video_mixer_picture_structure(current_picture_structure)); for (uint32_t k = 0; k < video_surface_past_count; k ++) { if (0 != k) fprintf(tlog, ","); fprintf(tlog, "%d", video_surface_past[k]); } fprintf(tlog, "],\n%s video_surface_current=%d, video_surface_future=[", trace_header_blank, video_surface_current); for (uint32_t k = 0; k < video_surface_future_count; k ++) { if (0 != k) fprintf(tlog, ","); fprintf(tlog, "%d", video_surface_future[k]); } fprintf(tlog, "],\n%s video_source_rect=%s, destination_surface=%d, destination_rect=%s, " "destination_video_rect=%s, layers=[", trace_header_blank, rect2string(video_source_rect), destination_surface, rect2string(destination_rect), rect2string(destination_video_rect)); for (uint32_t k = 0; k < layer_count; k ++) { if (0 != k) fprintf(tlog, ","); fprintf(tlog, "{%d,src:%s,dst:%s}", layers[k].source_surface, rect2string(layers[k].source_rect), rect2string(layers[k].destination_rect)); } fprintf(tlog, "]\n"); skip:; VdpStatus ret = softVdpVideoMixerRender(mixer, background_surface, background_source_rect, current_picture_structure, video_surface_past_count, video_surface_past, video_surface_current, video_surface_future_count, video_surface_future, video_source_rect, destination_surface, destination_rect, destination_video_rect, layer_count, layers); traceCallHook(VDP_FUNC_ID_VIDEO_MIXER_RENDER, 1, (void*)ret); return ret; } VdpStatus traceVdpPresentationQueueTargetDestroy(VdpPresentationQueueTarget presentation_queue_target) { const char *impl_state = "{full}"; if (!trace_enabled) goto skip; traceCallHook(VDP_FUNC_ID_PRESENTATION_QUEUE_TARGET_DESTROY, 0, NULL); fprintf(tlog, "%s%s VdpPresentationQueueTargetDestroy presentation_queue_target=%d\n", trace_header, impl_state, presentation_queue_target); skip:; VdpStatus ret = softVdpPresentationQueueTargetDestroy(presentation_queue_target); traceCallHook(VDP_FUNC_ID_PRESENTATION_QUEUE_TARGET_DESTROY, 1, (void*)ret); return ret; } VdpStatus traceVdpPresentationQueueCreate(VdpDevice device, VdpPresentationQueueTarget presentation_queue_target, VdpPresentationQueue *presentation_queue) { const char *impl_state = "{full}"; if (!trace_enabled) goto skip; traceCallHook(VDP_FUNC_ID_PRESENTATION_QUEUE_CREATE, 0, NULL); fprintf(tlog, "%s%s VdpPresentationQueueCreate device=%d, presentation_queue_target=%d\n", trace_header, impl_state, device, presentation_queue_target); skip:; VdpStatus ret = softVdpPresentationQueueCreate(device, presentation_queue_target, presentation_queue); traceCallHook(VDP_FUNC_ID_PRESENTATION_QUEUE_CREATE, 1, (void*)ret); return ret; } VdpStatus traceVdpPresentationQueueDestroy(VdpPresentationQueue presentation_queue) { const char *impl_state = "{full}"; if (!trace_enabled) goto skip; traceCallHook(VDP_FUNC_ID_PRESENTATION_QUEUE_DESTROY, 0, NULL); fprintf(tlog, "%s%s VdpPresentationQueueDestroy presentation_queue=%d\n", trace_header, impl_state, presentation_queue); skip:; VdpStatus ret = softVdpPresentationQueueDestroy(presentation_queue); traceCallHook(VDP_FUNC_ID_PRESENTATION_QUEUE_DESTROY, 1, (void*)ret); return ret; } VdpStatus traceVdpPresentationQueueSetBackgroundColor(VdpPresentationQueue presentation_queue, VdpColor *const background_color) { const char *impl_state = "{full}"; if (!trace_enabled) goto skip; traceCallHook(VDP_FUNC_ID_PRESENTATION_QUEUE_SET_BACKGROUND_COLOR, 0, NULL); fprintf(tlog, "%s%s VdpPresentationQueueSetBackgroundColor presentation_queue=%d, " "background_color=", trace_header, impl_state, presentation_queue); if (background_color) { fprintf(tlog, "(%.2f,%.2f,%.2f,%.2f)\n", background_color->red, background_color->green, background_color->blue, background_color->alpha); } else { fprintf(tlog, "NULL\n"); } skip:; VdpStatus ret = softVdpPresentationQueueSetBackgroundColor(presentation_queue, background_color); traceCallHook(VDP_FUNC_ID_PRESENTATION_QUEUE_SET_BACKGROUND_COLOR, 1, (void*)ret); return ret; } VdpStatus traceVdpPresentationQueueGetBackgroundColor(VdpPresentationQueue presentation_queue, VdpColor *background_color) { const char *impl_state = "{full}"; if (!trace_enabled) goto skip; traceCallHook(VDP_FUNC_ID_PRESENTATION_QUEUE_GET_BACKGROUND_COLOR, 0, NULL); fprintf(tlog, "%s%s VdpPresentationQueueGetBackgroundColor presentation_queue=%d\n", trace_header, impl_state, presentation_queue); skip:; VdpStatus ret = softVdpPresentationQueueGetBackgroundColor(presentation_queue, background_color); traceCallHook(VDP_FUNC_ID_PRESENTATION_QUEUE_GET_BACKGROUND_COLOR, 1, (void*)ret); return ret; } VdpStatus traceVdpPresentationQueueGetTime(VdpPresentationQueue presentation_queue, VdpTime *current_time) { const char *impl_state = "{full}"; if (!trace_enabled) goto skip; traceCallHook(VDP_FUNC_ID_PRESENTATION_QUEUE_GET_TIME, 0, NULL); fprintf(tlog, "%s%s VdpPresentationQueueGetTime presentation_queue=%d\n", trace_header, impl_state, presentation_queue); skip:; VdpStatus ret = softVdpPresentationQueueGetTime(presentation_queue, current_time); traceCallHook(VDP_FUNC_ID_PRESENTATION_QUEUE_GET_TIME, 1, (void*)ret); return ret; } VdpStatus traceVdpPresentationQueueDisplay(VdpPresentationQueue presentation_queue, VdpOutputSurface surface, uint32_t clip_width, uint32_t clip_height, VdpTime earliest_presentation_time) { const char *impl_state = "{full}"; if (!trace_enabled) goto skip; traceCallHook(VDP_FUNC_ID_PRESENTATION_QUEUE_DISPLAY, 0, NULL); fprintf(tlog, "%s%s VdpPresentationQueueDisplay presentation_queue=%d, surface=%d, " "clip_width=%d, clip_height=%d,\n", trace_header, impl_state, presentation_queue, surface, clip_width, clip_height); fprintf(tlog, "%s earliest_presentation_time=%"PRIu64"\n", trace_header_blank, earliest_presentation_time); skip:; VdpStatus ret = softVdpPresentationQueueDisplay(presentation_queue, surface, clip_width, clip_height, earliest_presentation_time); traceCallHook(VDP_FUNC_ID_PRESENTATION_QUEUE_DISPLAY, 1, (void*)ret); return ret; } VdpStatus traceVdpPresentationQueueBlockUntilSurfaceIdle(VdpPresentationQueue presentation_queue, VdpOutputSurface surface, VdpTime *first_presentation_time) { const char *impl_state = "{full}"; if (!trace_enabled) goto skip; traceCallHook(VDP_FUNC_ID_PRESENTATION_QUEUE_BLOCK_UNTIL_SURFACE_IDLE, 0, NULL); fprintf(tlog, "%s%s VdpPresentationQueueBlockUntilSurfaceIdle presentation_queue=%d, " "surface=%d\n", trace_header, impl_state, presentation_queue, surface); skip:; VdpStatus ret = softVdpPresentationQueueBlockUntilSurfaceIdle(presentation_queue, surface, first_presentation_time); traceCallHook(VDP_FUNC_ID_PRESENTATION_QUEUE_BLOCK_UNTIL_SURFACE_IDLE, 1, (void*)ret); return ret; } VdpStatus traceVdpPresentationQueueQuerySurfaceStatus(VdpPresentationQueue presentation_queue, VdpOutputSurface surface, VdpPresentationQueueStatus *status, VdpTime *first_presentation_time) { const char *impl_state = "{full}"; if (!trace_enabled) goto skip; traceCallHook(VDP_FUNC_ID_PRESENTATION_QUEUE_QUERY_SURFACE_STATUS, 0, NULL); fprintf(tlog, "%s%s VdpPresentationQueueQuerySurfaceStatus presentation_queue=%d, " "surface=%d\n", trace_header, impl_state, presentation_queue, surface); skip:; VdpStatus ret = softVdpPresentationQueueQuerySurfaceStatus(presentation_queue, surface, status, first_presentation_time); traceCallHook(VDP_FUNC_ID_PRESENTATION_QUEUE_QUERY_SURFACE_STATUS, 1, (void*)ret); return ret; } VdpStatus traceVdpVideoSurfaceQueryCapabilities(VdpDevice device, VdpChromaType surface_chroma_type, VdpBool *is_supported, uint32_t *max_width, uint32_t *max_height) { const char *impl_state = "{part}"; if (!trace_enabled) goto skip; traceCallHook(VDP_FUNC_ID_VIDEO_SURFACE_QUERY_CAPABILITIES, 0, NULL); fprintf(tlog, "%s%s VdpVideoSurfaceQueryCapabilities device=%d, surface_chroma_type=%s\n", trace_header, impl_state, device, reverse_chroma_type(surface_chroma_type)); skip:; VdpStatus ret = softVdpVideoSurfaceQueryCapabilities(device, surface_chroma_type, is_supported, max_width, max_height); traceCallHook(VDP_FUNC_ID_VIDEO_SURFACE_QUERY_CAPABILITIES, 1, (void*)ret); return ret; } VdpStatus traceVdpVideoSurfaceQueryGetPutBitsYCbCrCapabilities(VdpDevice device, VdpChromaType surface_chroma_type, VdpYCbCrFormat bits_ycbcr_format, VdpBool *is_supported) { const char *impl_state = "{part}"; if (!trace_enabled) goto skip; traceCallHook(VDP_FUNC_ID_VIDEO_SURFACE_QUERY_GET_PUT_BITS_Y_CB_CR_CAPABILITIES, 0, NULL); fprintf(tlog, "%s%s VdpVideoSurfaceQueryGetPutBitsYCbCrCapabilities device=%d, " "surface_chroma_type=%s, bits_ycbcr_format=%s\n", trace_header, impl_state, device, reverse_chroma_type(surface_chroma_type), reverse_ycbcr_format(bits_ycbcr_format)); skip:; VdpStatus ret = softVdpVideoSurfaceQueryGetPutBitsYCbCrCapabilities(device, surface_chroma_type, bits_ycbcr_format, is_supported); traceCallHook(VDP_FUNC_ID_VIDEO_SURFACE_QUERY_GET_PUT_BITS_Y_CB_CR_CAPABILITIES, 1, (void*)ret); return ret; } VdpStatus traceVdpVideoSurfaceCreate(VdpDevice device, VdpChromaType chroma_type, uint32_t width, uint32_t height, VdpVideoSurface *surface) { const char *impl_state = "{part}"; if (!trace_enabled) goto skip; traceCallHook(VDP_FUNC_ID_VIDEO_SURFACE_CREATE, 0, NULL); fprintf(tlog, "%s%s VdpVideoSurfaceCreate, device=%d, chroma_type=%s, width=%d, height=%d\n", trace_header, impl_state, device, reverse_chroma_type(chroma_type), width, height); skip:; VdpStatus ret = softVdpVideoSurfaceCreate(device, chroma_type, width, height, surface); traceCallHook(VDP_FUNC_ID_VIDEO_SURFACE_CREATE, 1, (void*)ret); return ret; } VdpStatus traceVdpVideoSurfaceDestroy(VdpVideoSurface surface) { const char *impl_state = "{full}"; if (!trace_enabled) goto skip; traceCallHook(VDP_FUNC_ID_VIDEO_SURFACE_DESTROY, 0, NULL); fprintf(tlog, "%s%s VdpVideoSurfaceDestroy surface=%d\n", trace_header, impl_state, surface); skip:; VdpStatus ret = softVdpVideoSurfaceDestroy(surface); traceCallHook(VDP_FUNC_ID_VIDEO_SURFACE_DESTROY, 1, (void*)ret); return ret; } VdpStatus traceVdpVideoSurfaceGetParameters(VdpVideoSurface surface, VdpChromaType *chroma_type, uint32_t *width, uint32_t *height) { const char *impl_state = "{full}"; if (!trace_enabled) goto skip; traceCallHook(VDP_FUNC_ID_VIDEO_SURFACE_GET_PARAMETERS, 0, NULL); fprintf(tlog, "%s%s VdpVideoSurfaceGetParameters surface=%d\n", trace_header, impl_state, surface); skip:; VdpStatus ret = softVdpVideoSurfaceGetParameters(surface, chroma_type, width, height); traceCallHook(VDP_FUNC_ID_VIDEO_SURFACE_GET_PARAMETERS, 1, (void*)ret); return ret; } VdpStatus traceVdpVideoSurfaceGetBitsYCbCr(VdpVideoSurface surface, VdpYCbCrFormat destination_ycbcr_format, void *const *destination_data, uint32_t const *destination_pitches) { const char *impl_state = "{part}"; if (!trace_enabled) goto skip; traceCallHook(VDP_FUNC_ID_VIDEO_SURFACE_GET_BITS_Y_CB_CR, 0, NULL); fprintf(tlog, "%s%s VdpVideoSurfaceGetBitsYCbCr surface=%d, destination_ycbcr_format=%s\n", trace_header, impl_state, surface, reverse_ycbcr_format(destination_ycbcr_format)); skip:; VdpStatus ret = softVdpVideoSurfaceGetBitsYCbCr(surface, destination_ycbcr_format, destination_data, destination_pitches); traceCallHook(VDP_FUNC_ID_VIDEO_SURFACE_GET_BITS_Y_CB_CR, 1, (void*)ret); return ret; } VdpStatus traceVdpVideoSurfacePutBitsYCbCr(VdpVideoSurface surface, VdpYCbCrFormat source_ycbcr_format, void const *const *source_data, uint32_t const *source_pitches) { const char *impl_state = "{part}"; if (!trace_enabled) goto skip; traceCallHook(VDP_FUNC_ID_VIDEO_SURFACE_PUT_BITS_Y_CB_CR, 0, NULL); fprintf(tlog, "%s%s VdpVideoSurfacePutBitsYCbCr surface=%d, source_ycbcr_format=%s\n", trace_header, impl_state, surface, reverse_ycbcr_format(source_ycbcr_format)); skip:; VdpStatus ret = softVdpVideoSurfacePutBitsYCbCr(surface, source_ycbcr_format, source_data, source_pitches); traceCallHook(VDP_FUNC_ID_VIDEO_SURFACE_PUT_BITS_Y_CB_CR, 1, (void*)ret); return ret; } VdpStatus traceVdpBitmapSurfaceQueryCapabilities(VdpDevice device, VdpRGBAFormat surface_rgba_format, VdpBool *is_supported, uint32_t *max_width, uint32_t *max_height) { const char *impl_state = "{full}"; if (!trace_enabled) goto skip; traceCallHook(VDP_FUNC_ID_BITMAP_SURFACE_QUERY_CAPABILITIES, 0, NULL); fprintf(tlog, "%s%s VdpBitmapSurfaceQueryCapabilities device=%d, surface_rgba_format=%s\n", trace_header, impl_state, device, reverse_rgba_format(surface_rgba_format)); skip:; VdpStatus ret = softVdpBitmapSurfaceQueryCapabilities(device, surface_rgba_format, is_supported, max_width, max_height); traceCallHook(VDP_FUNC_ID_BITMAP_SURFACE_QUERY_CAPABILITIES, 1, (void*)ret); return ret; } VdpStatus traceVdpBitmapSurfaceCreate(VdpDevice device, VdpRGBAFormat rgba_format, uint32_t width, uint32_t height, VdpBool frequently_accessed, VdpBitmapSurface *surface) { const char *impl_state = "{full}"; if (!trace_enabled) goto skip; traceCallHook(VDP_FUNC_ID_BITMAP_SURFACE_CREATE, 0, NULL); fprintf(tlog, "%s%s VdpBitmapSurfaceCreate device=%d, rgba_format=%s, width=%d, height=%d,\n" "%s frequently_accessed=%d\n", trace_header, impl_state, device, reverse_rgba_format(rgba_format), width, height, trace_header_blank, frequently_accessed); skip:; VdpStatus ret = softVdpBitmapSurfaceCreate(device, rgba_format, width, height, frequently_accessed, surface); traceCallHook(VDP_FUNC_ID_BITMAP_SURFACE_CREATE, 1, (void*)ret); return ret; } VdpStatus traceVdpBitmapSurfaceDestroy(VdpBitmapSurface surface) { const char *impl_state = "{full}"; if (!trace_enabled) goto skip; traceCallHook(VDP_FUNC_ID_BITMAP_SURFACE_DESTROY, 0, NULL); fprintf(tlog, "%s%s VdpBitmapSurfaceDestroy surface=%d\n", trace_header, impl_state, surface); skip:; VdpStatus ret = softVdpBitmapSurfaceDestroy(surface); traceCallHook(VDP_FUNC_ID_BITMAP_SURFACE_DESTROY, 1, (void*)ret); return ret; } VdpStatus traceVdpBitmapSurfaceGetParameters(VdpBitmapSurface surface, VdpRGBAFormat *rgba_format, uint32_t *width, uint32_t *height, VdpBool *frequently_accessed) { const char *impl_state = "{full}"; if (!trace_enabled) goto skip; traceCallHook(VDP_FUNC_ID_BITMAP_SURFACE_GET_PARAMETERS, 0, NULL); fprintf(tlog, "%s%s VdpBitmapSurfaceGetParameters surface=%d\n", trace_header, impl_state, surface); skip:; VdpStatus ret = softVdpBitmapSurfaceGetParameters(surface, rgba_format, width, height, frequently_accessed); traceCallHook(VDP_FUNC_ID_BITMAP_SURFACE_GET_PARAMETERS, 1, (void*)ret); return ret; } VdpStatus traceVdpBitmapSurfacePutBitsNative(VdpBitmapSurface surface, void const *const *source_data, uint32_t const *source_pitches, VdpRect const *destination_rect) { const char *impl_state = "{full}"; if (!trace_enabled) goto skip; traceCallHook(VDP_FUNC_ID_BITMAP_SURFACE_PUT_BITS_NATIVE, 0, NULL); fprintf(tlog, "%s%s VdpBitmapSurfacePutBitsNative surface=%d, destination_rect=%s\n", trace_header, impl_state, surface, rect2string(destination_rect)); skip:; VdpStatus ret = softVdpBitmapSurfacePutBitsNative(surface, source_data, source_pitches, destination_rect); traceCallHook(VDP_FUNC_ID_BITMAP_SURFACE_PUT_BITS_NATIVE, 1, (void*)ret); return ret; } VdpStatus traceVdpDeviceDestroy(VdpDevice device) { const char *impl_state = "{full}"; if (!trace_enabled) goto skip; traceCallHook(VDP_FUNC_ID_DEVICE_DESTROY, 0, NULL); fprintf(tlog, "%s%s VdpDeviceDestroy device=%d\n", trace_header, impl_state, device); skip:; VdpStatus ret = softVdpDeviceDestroy(device); traceCallHook(VDP_FUNC_ID_DEVICE_DESTROY, 1, (void*)ret); return ret; } VdpStatus traceVdpGetInformationString(char const **information_string) { const char *impl_state = "{full}"; if (!trace_enabled) goto skip; traceCallHook(VDP_FUNC_ID_GET_INFORMATION_STRING, 0, NULL); fprintf(tlog, "%s%s VdpGetInformationString\n", trace_header, impl_state); skip:; VdpStatus ret = softVdpGetInformationString(information_string); traceCallHook(VDP_FUNC_ID_GET_INFORMATION_STRING, 1, (void*)ret); return ret; } VdpStatus traceVdpGenerateCSCMatrix(VdpProcamp *procamp, VdpColorStandard standard, VdpCSCMatrix *csc_matrix) { const char *impl_state = "{part}"; if (!trace_enabled) goto skip; traceCallHook(VDP_FUNC_ID_GENERATE_CSC_MATRIX, 0, NULL); fprintf(tlog, "%s%s VdpGenerateCSCMatrix ", trace_header, impl_state); if (procamp) { fprintf(tlog, "brightness=%f, contrast=%f, saturation=%f, ", procamp->brightness, procamp->contrast, procamp->saturation); } fprintf(tlog, "standard=%s\n", reverse_color_standard(standard)); skip:; VdpStatus ret = softVdpGenerateCSCMatrix(procamp, standard, csc_matrix); traceCallHook(VDP_FUNC_ID_GENERATE_CSC_MATRIX, 1, (void*)ret); return ret; } VdpStatus traceVdpOutputSurfaceRenderOutputSurface(VdpOutputSurface destination_surface, VdpRect const *destination_rect, VdpOutputSurface source_surface, VdpRect const *source_rect, VdpColor const *colors, VdpOutputSurfaceRenderBlendState const *blend_state, uint32_t flags) { const char *impl_state = "{full}"; if (!trace_enabled) goto skip; traceCallHook(VDP_FUNC_ID_OUTPUT_SURFACE_RENDER_OUTPUT_SURFACE, 0, NULL); fprintf(tlog, "%s%s VdpOutputSurfaceRenderOutputSurface destination_surface=%d, " "destination_rect=%s,\n", trace_header, impl_state, destination_surface, rect2string(destination_rect)); fprintf(tlog, "%s source_surface=%d, source_rect=%s\n", trace_header_blank, source_surface, rect2string(source_rect)); if (blend_state) { fprintf(tlog, "%s blend_state.blend_factor_source_color=%s\n" "%s blend_state.blend_factor_destination_color=%s\n" "%s blend_state.blend_factor_source_alpha=%s\n" "%s blend_state.blend_factor_destination_alpha=%s\n" "%s blend_state.blend_equation_color=%s\n" "%s blend_state.blend_equation_alpha=%s\n" "%s blend_constant = (%11f, %11f, %11f, %11f)\n", trace_header_blank, reverse_blend_factor(blend_state->blend_factor_source_color), trace_header_blank, reverse_blend_factor(blend_state->blend_factor_destination_color), trace_header_blank, reverse_blend_factor(blend_state->blend_factor_source_alpha), trace_header_blank, reverse_blend_factor(blend_state->blend_factor_destination_alpha), trace_header_blank, reverse_blend_equation(blend_state->blend_equation_color), trace_header_blank, reverse_blend_equation(blend_state->blend_equation_alpha), trace_header_blank, blend_state->blend_constant.red, blend_state->blend_constant.green, blend_state->blend_constant.blue, blend_state->blend_constant.alpha); } else { fprintf(tlog, "%s blend_state=NULL\n", trace_header_blank); } fprintf(tlog, "%s flags = %s", trace_header_blank, reverse_output_surface_render_rotate(flags)); if (flags & VDP_OUTPUT_SURFACE_RENDER_COLOR_PER_VERTEX) fprintf(tlog, "| VDP_OUTPUT_SURFACE_RENDER_COLOR_PER_VERTEX"); fprintf(tlog, "\n"); int color_count = 0; if (colors) { if (flags & VDP_OUTPUT_SURFACE_RENDER_COLOR_PER_VERTEX) color_count = 4; else color_count = 1; } fprintf(tlog, "%s colors=[", trace_header_blank); for (int k = 0; k < color_count; k ++) { if (k > 0) fprintf(tlog, ", "); fprintf(tlog, "(%f,%f,%f,%f)", colors[k].red, colors[k].green, colors[k].blue, colors[k].alpha); } fprintf(tlog, "]\n"); skip:; VdpStatus ret = softVdpOutputSurfaceRenderOutputSurface(destination_surface, destination_rect, source_surface, source_rect, colors, blend_state, flags); traceCallHook(VDP_FUNC_ID_OUTPUT_SURFACE_RENDER_OUTPUT_SURFACE, 1, (void*)ret); return ret; } VdpStatus traceVdpOutputSurfaceRenderBitmapSurface(VdpOutputSurface destination_surface, VdpRect const *destination_rect, VdpBitmapSurface source_surface, VdpRect const *source_rect, VdpColor const *colors, VdpOutputSurfaceRenderBlendState const *blend_state, uint32_t flags) { const char *impl_state = "{full}"; if (!trace_enabled) goto skip; traceCallHook(VDP_FUNC_ID_OUTPUT_SURFACE_RENDER_BITMAP_SURFACE, 0, NULL); fprintf(tlog, "%s%s VdpOutputSurfaceRenderBitmapSurface destination_surface=%d, " "destination_rect=%s,\n", trace_header, impl_state, destination_surface, rect2string(destination_rect)); fprintf(tlog, "%s source_surface=%d, source_rect=%s\n", trace_header_blank, source_surface, rect2string(source_rect)); if (blend_state) { fprintf(tlog, "%s blend_state.blend_factor_source_color=%s\n" "%s blend_state.blend_factor_destination_color=%s\n" "%s blend_state.blend_factor_source_alpha=%s\n" "%s blend_state.blend_factor_destination_alpha=%s\n" "%s blend_state.blend_equation_color=%s\n" "%s blend_state.blend_equation_alpha=%s\n" "%s blend_constant = (%11f, %11f, %11f, %11f)\n", trace_header_blank, reverse_blend_factor(blend_state->blend_factor_source_color), trace_header_blank, reverse_blend_factor(blend_state->blend_factor_destination_color), trace_header_blank, reverse_blend_factor(blend_state->blend_factor_source_alpha), trace_header_blank, reverse_blend_factor(blend_state->blend_factor_destination_alpha), trace_header_blank, reverse_blend_equation(blend_state->blend_equation_color), trace_header_blank, reverse_blend_equation(blend_state->blend_equation_alpha), trace_header_blank, blend_state->blend_constant.red, blend_state->blend_constant.green, blend_state->blend_constant.blue, blend_state->blend_constant.alpha); } else { fprintf(tlog, "%s blend_state=NULL\n", trace_header_blank); } fprintf(tlog, "%s flags = %s", trace_header_blank, reverse_output_surface_render_rotate(flags)); if (flags & VDP_OUTPUT_SURFACE_RENDER_COLOR_PER_VERTEX) fprintf(tlog, "| VDP_OUTPUT_SURFACE_RENDER_COLOR_PER_VERTEX"); fprintf(tlog, "\n"); int color_count = 0; if (colors) { if (flags & VDP_OUTPUT_SURFACE_RENDER_COLOR_PER_VERTEX) color_count = 4; else color_count = 1; } fprintf(tlog, "%s colors=[", trace_header_blank); for (int k = 0; k < color_count; k ++) { if (k > 0) fprintf(tlog, ", "); fprintf(tlog, "(%f,%f,%f,%f)", colors[k].red, colors[k].green, colors[k].blue, colors[k].alpha); } fprintf(tlog, "]\n"); skip:; VdpStatus ret = softVdpOutputSurfaceRenderBitmapSurface(destination_surface, destination_rect, source_surface, source_rect, colors, blend_state, flags); traceCallHook(VDP_FUNC_ID_OUTPUT_SURFACE_RENDER_BITMAP_SURFACE, 1, (void*)ret); return ret; } VdpStatus traceVdpPreemptionCallbackRegister(VdpDevice device, VdpPreemptionCallback callback, void *context) { const char *impl_state = "{zilch/fake success}"; if (!trace_enabled) goto skip; traceCallHook(VDP_FUNC_ID_PREEMPTION_CALLBACK_REGISTER, 0, NULL); fprintf(tlog, "%s%s VdpPreemptionCallbackRegister device=%d, callback=%p, context=%p\n", trace_header, impl_state, device, callback, context); skip:; VdpStatus ret = softVdpPreemptionCallbackRegister(device, callback, context); traceCallHook(VDP_FUNC_ID_PREEMPTION_CALLBACK_REGISTER, 1, (void*)ret); return ret; } VdpStatus traceVdpPresentationQueueTargetCreateX11(VdpDevice device, Drawable drawable, VdpPresentationQueueTarget *target) { const char *impl_state = "{full}"; if (!trace_enabled) goto skip; traceCallHook(VDP_FUNC_ID_PRESENTATION_QUEUE_TARGET_CREATE_X11, 0, NULL); fprintf(tlog, "%s%s VdpPresentationQueueTargetCreateX11, device=%d, drawable=%u\n", trace_header, impl_state, device, ((unsigned int)drawable)); skip:; VdpStatus ret = softVdpPresentationQueueTargetCreateX11(device, drawable, target); traceCallHook(VDP_FUNC_ID_PRESENTATION_QUEUE_TARGET_CREATE_X11, 1, (void*)ret); return ret; } VdpStatus traceVdpGetProcAddress(VdpDevice device, VdpFuncId function_id, void **function_pointer) { const char *impl_state = "{full}"; if (!trace_enabled) goto skip; traceCallHook(VDP_FUNC_ID_GET_PROC_ADDRESS, 0, NULL); fprintf(tlog, "%s%s VdpGetProcAddress, device=%d, function_id=%s\n", trace_header, impl_state, device, reverse_func_id(function_id)); skip:; VdpStatus ret = softVdpGetProcAddress(device, function_id, function_pointer); traceCallHook(VDP_FUNC_ID_GET_PROC_ADDRESS, 1, (void*)ret); return ret; } VdpStatus traceVdpDeviceCreateX11(Display *display, int screen, VdpDevice *device, VdpGetProcAddress **get_proc_address) { const char *impl_state = "{full}"; if (!trace_enabled) goto skip; traceCallHook(-1, 0, NULL); fprintf(tlog, "%s%s vdp_imp_device_create_x11 display=%p, screen=%d\n", trace_header, impl_state, display, screen); skip:; VdpStatus ret = softVdpDeviceCreateX11(display, screen, device, get_proc_address); traceCallHook(-1, 1, (void*)ret); return ret; } libvdpau-va-gl-0.2.1/vdpau-trace.h000066400000000000000000000113011224142723700167100ustar00rootroot00000000000000/* * Copyright 2013 Rinat Ibragimov * * This file is part of libvdpau-va-gl * * libvdpau-va-gl is distributed under the terms of the LGPLv3. See COPYING for details. */ #include #include #include #include "reverse-constant.h" void traceEnableTracing(int flag); void traceSetTarget(FILE *target); void traceResetTarget(void); void traceSetHeader(const char *header, const char *header_blank); void traceInfo(const char *buf, ...); void traceError(const char *buf, ...); void traceSetHook(void (*hook)(void *param1, void *param2, int origin, int after), void *param); void traceCallHook(int origin, int after, void *shortterm_param); VdpStatus traceVdpDeviceCreateX11(Display *display, int screen, VdpDevice *device, VdpGetProcAddress **get_proc_address); VdpGetApiVersion traceVdpGetApiVersion; VdpDecoderQueryCapabilities traceVdpDecoderQueryCapabilities; VdpDecoderCreate traceVdpDecoderCreate; VdpDecoderDestroy traceVdpDecoderDestroy; VdpDecoderGetParameters traceVdpDecoderGetParameters; VdpDecoderRender traceVdpDecoderRender; VdpOutputSurfaceQueryCapabilities traceVdpOutputSurfaceQueryCapabilities; VdpOutputSurfaceQueryGetPutBitsNativeCapabilities traceVdpOutputSurfaceQueryGetPutBitsNativeCapabilities; VdpOutputSurfaceQueryPutBitsIndexedCapabilities traceVdpOutputSurfaceQueryPutBitsIndexedCapabilities; VdpOutputSurfaceQueryPutBitsYCbCrCapabilities traceVdpOutputSurfaceQueryPutBitsYCbCrCapabilities; VdpOutputSurfaceCreate traceVdpOutputSurfaceCreate; VdpOutputSurfaceDestroy traceVdpOutputSurfaceDestroy; VdpOutputSurfaceGetParameters traceVdpOutputSurfaceGetParameters; VdpOutputSurfaceGetBitsNative traceVdpOutputSurfaceGetBitsNative; VdpOutputSurfacePutBitsNative traceVdpOutputSurfacePutBitsNative; VdpOutputSurfacePutBitsIndexed traceVdpOutputSurfacePutBitsIndexed; VdpOutputSurfacePutBitsYCbCr traceVdpOutputSurfacePutBitsYCbCr; VdpVideoMixerQueryFeatureSupport traceVdpVideoMixerQueryFeatureSupport; VdpVideoMixerQueryParameterSupport traceVdpVideoMixerQueryParameterSupport; VdpVideoMixerQueryAttributeSupport traceVdpVideoMixerQueryAttributeSupport; VdpVideoMixerQueryParameterValueRange traceVdpVideoMixerQueryParameterValueRange; VdpVideoMixerQueryAttributeValueRange traceVdpVideoMixerQueryAttributeValueRange; VdpVideoMixerCreate traceVdpVideoMixerCreate; VdpVideoMixerSetFeatureEnables traceVdpVideoMixerSetFeatureEnables; VdpVideoMixerSetAttributeValues traceVdpVideoMixerSetAttributeValues; VdpVideoMixerGetFeatureSupport traceVdpVideoMixerGetFeatureSupport; VdpVideoMixerGetFeatureEnables traceVdpVideoMixerGetFeatureEnables; VdpVideoMixerGetParameterValues traceVdpVideoMixerGetParameterValues; VdpVideoMixerGetAttributeValues traceVdpVideoMixerGetAttributeValues; VdpVideoMixerDestroy traceVdpVideoMixerDestroy; VdpVideoMixerRender traceVdpVideoMixerRender; VdpPresentationQueueTargetDestroy traceVdpPresentationQueueTargetDestroy; VdpPresentationQueueCreate traceVdpPresentationQueueCreate; VdpPresentationQueueDestroy traceVdpPresentationQueueDestroy; VdpPresentationQueueSetBackgroundColor traceVdpPresentationQueueSetBackgroundColor; VdpPresentationQueueGetBackgroundColor traceVdpPresentationQueueGetBackgroundColor; VdpPresentationQueueGetTime traceVdpPresentationQueueGetTime; VdpPresentationQueueDisplay traceVdpPresentationQueueDisplay; VdpPresentationQueueBlockUntilSurfaceIdle traceVdpPresentationQueueBlockUntilSurfaceIdle; VdpPresentationQueueQuerySurfaceStatus traceVdpPresentationQueueQuerySurfaceStatus; VdpVideoSurfaceQueryCapabilities traceVdpVideoSurfaceQueryCapabilities; VdpVideoSurfaceQueryGetPutBitsYCbCrCapabilities traceVdpVideoSurfaceQueryGetPutBitsYCbCrCapabilities; VdpVideoSurfaceCreate traceVdpVideoSurfaceCreate; VdpVideoSurfaceDestroy traceVdpVideoSurfaceDestroy; VdpVideoSurfaceGetParameters traceVdpVideoSurfaceGetParameters; VdpVideoSurfaceGetBitsYCbCr traceVdpVideoSurfaceGetBitsYCbCr; VdpVideoSurfacePutBitsYCbCr traceVdpVideoSurfacePutBitsYCbCr; VdpBitmapSurfaceQueryCapabilities traceVdpBitmapSurfaceQueryCapabilities; VdpBitmapSurfaceCreate traceVdpBitmapSurfaceCreate; VdpBitmapSurfaceDestroy traceVdpBitmapSurfaceDestroy; VdpBitmapSurfaceGetParameters traceVdpBitmapSurfaceGetParameters; VdpBitmapSurfacePutBitsNative traceVdpBitmapSurfacePutBitsNative; VdpDeviceDestroy traceVdpDeviceDestroy; VdpGetInformationString traceVdpGetInformationString; VdpGenerateCSCMatrix traceVdpGenerateCSCMatrix; VdpOutputSurfaceRenderOutputSurface traceVdpOutputSurfaceRenderOutputSurface; VdpOutputSurfaceRenderBitmapSurface traceVdpOutputSurfaceRenderBitmapSurface; VdpPreemptionCallbackRegister traceVdpPreemptionCallbackRegister; VdpPresentationQueueTargetCreateX11 traceVdpPresentationQueueTargetCreateX11; VdpGetProcAddress traceVdpGetProcAddress; libvdpau-va-gl-0.2.1/watermark.c000066400000000000000000000067201224142723700164760ustar00rootroot00000000000000/* * Copyright 2013 Rinat Ibragimov * * This file is part of libvdpau-va-gl * * libvdpau-va-gl is distributed under the terms of the LGPLv3. See COPYING for details. */ #include "watermark.h" // grayscale 50x24 image with text "va_gl" on it const char *watermark_data = "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0" "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0" "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0" "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0" "\0\0\0\0\0\0\0\0\0\0\0\xff\xff\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0" "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xff\xff\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0" "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xff\xff\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0" "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xff\xff\0\0\0\0\0\x13\xf7\xfb\x17" "\0\0\0\0\x0a\xef\xe8\x04\0\x44\xb3\xef\xf5\xc5\x42\0\0\0\0\0\0\0\0\0\0\0\0\0\x22\xb1\xf3\xeb" "\x97\x09\xff\xff\0\0\0\xff\xff\0\0\0\0\0\0\xa6\xff\x70\0\0\0\0\x62\xff\x8a\0\0\xff\xff\xff" "\xff\xff\xf9\x32\0\0\0\0\0\0\0\0\0\0\0\x13\xe5\xff\xff\xff\xff\xac\xff\xff\0\0\0\xff\xff\0\0" "\0\0\0\0\x40\xff\xcc\0\0\0\0\xca\xff\x27\0\0\xaa\x31\x06\x21\xad\xff\xa6\0\0\0\0\0\0\0\0\0\0" "\0\x8a\xff\xde\x3e\x0b\x3e\xdf\xff\xff\0\0\0\xff\xff\0\0\0\0\0\0\0\xd9\xff\x28\0\0\x31\xff" "\xc4\0\0\0\0\0\0\0\x14\xff\xe1\0\0\0\0\0\0\0\0\0\0\0\xd7\xff\x4b\0\0\0\x4c\xff\xff\0\0\0\xff" "\xff\0\0\0\0\0\0\0\x74\xff\x83\0\0\x98\xff\x62\0\0\x02\x73\xce\xf4\xff\xff\xff\xf8\0\0\0\0\0" "\0\0\0\0\0\0\xf7\xff\x0f\0\0\0\x10\xff\xff\0\0\0\xff\xff\0\0\0\0\0\0\0\x14\xf8\xdd\x01\x0c" "\xf2\xf2\x0b\0\0\x8d\xff\xff\xff\xff\xff\xff\xff\0\0\0\0\0\0\0\0\0\0\0\xf7\xff\x0f\0\0\0\x10" "\xff\xff\0\0\0\xff\xff\0\0\0\0\0\0\0\0\xa8\xff\x3b\x67\xff\x9c\0\0\0\xec\xff\x5d\x11\x02\x11" "\xff\xff\0\0\0\0\0\0\0\0\0\0\0\xd7\xff\x4b\0\0\0\x4b\xff\xff\0\0\0\xff\xff\0\0\0\0\0\0\0\0" "\x42\xff\x97\xce\xff\x39\0\0\0\xed\xff\x4f\x06\x21\xab\xff\xff\0\0\0\0\0\0\0\0\0\0\0\x8b\xff" "\xdd\x3c\x0a\x3c\xdd\xff\xff\0\0\0\xff\xff\0\0\0\0\0\0\0\0\x01\xda\xf7\xff\xd6\0\0\0\0\xa4" "\xff\xff\xff\xff\xae\xff\xff\0\0\0\0\0\0\0\0\0\0\0\x15\xe6\xff\xff\xff\xff\xaf\xff\xfa\0\0\0" "\xff\xff\0\0\0\0\0\0\0\0\0\x76\xff\xff\x74\0\0\0\0\x11\xa3\xec\xee\x9d\x0b\xff\xff\0\0\0\0\0" "\0\0\0\0\0\0\0\x24\xb3\xf4\xec\x98\x28\xff\xe9\0\0\0\xff\xff\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0" "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x6b\xff\xbe\0\0\0\0\0\0\0\0\0\0\0\0\0\0" "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xaf\x3c\x0d\x18\x64\xf0\xff\x6b\0\0" "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xff\xff\xff\xff\xff\xff\xff\xff" "\xff\0\0\xff\xff\xff\xff\xff\xff\xcb\x05\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0" "\0\0\0\0\0\xff\xff\xff\xff\xff\xff\xff\xff\xff\0\0\x49\xb7\xe5\xf8\xdb\x89\x0d\0\0\0\0\0\0\0" "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0" "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0" "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0" "\0\0\0\0\0\0\0\0\0\0\0\0\0\0"; const int watermark_width = 50; const int watermark_height = 24; libvdpau-va-gl-0.2.1/watermark.h000066400000000000000000000005371224142723700165030ustar00rootroot00000000000000/* * Copyright 2013 Rinat Ibragimov * * This file is part of libvdpau-va-gl * * libvdpau-va-gl is distributed under the terms of the LGPLv3. See COPYING for details. */ #ifndef _WATERMARK_H #define _WATERMARK_H extern const char *watermark_data; extern const int watermark_width; extern const int watermark_height; #endif /* _WATERMARK_H */ libvdpau-va-gl-0.2.1/xinitthreads.c000066400000000000000000000005261224142723700172050ustar00rootroot00000000000000/* * Copyright 2013 Rinat Ibragimov * * This file is part of libvdpau-va-gl * * libvdpau-va-gl is distributed under the terms of the LGPLv3. See COPYING for details. */ #include #include __attribute__((constructor)) static void library_constructor(void) { XInitThreads(); printf("XInitThreads()\n"); }