pax_global_header 0000666 0000000 0000000 00000000064 12665616006 0014522 g ustar 00root root 0000000 0000000 52 comment=bec326670372ceb4bab64ef7b1862f09cf392920
libvdpau-va-gl-0.3.6/ 0000775 0000000 0000000 00000000000 12665616006 0014342 5 ustar 00root root 0000000 0000000 libvdpau-va-gl-0.3.6/.gitignore 0000664 0000000 0000000 00000000007 12665616006 0016327 0 ustar 00root root 0000000 0000000 build/
libvdpau-va-gl-0.3.6/CMakeLists.txt 0000664 0000000 0000000 00000002274 12665616006 0017107 0 ustar 00root root 0000000 0000000 project (libvdpau-va-gl)
cmake_minimum_required (VERSION 2.8.8)
add_definitions(-std=gnu99 -Wall -fvisibility=hidden -fPIC)
find_package(PkgConfig REQUIRED)
find_package(X11 REQUIRED)
pkg_check_modules(VDPAU vdpau REQUIRED)
pkg_check_modules(GLIB20 glib-2.0 REQUIRED)
pkg_check_modules(LIBVA libva-x11 REQUIRED)
pkg_check_modules(LIBSWSCALE libswscale REQUIRED)
pkg_check_modules(GL gl glu REQUIRED)
set(DRIVER_NAME "vdpau_va_gl" CACHE STRING "driver name")
set(LIB_SUFFIX "" CACHE STRING "library path suffix (if needed)")
set(LIB_INSTALL_DIR "${CMAKE_INSTALL_PREFIX}/lib${LIB_SUFFIX}/vdpau" CACHE PATH "library installation path")
include_directories (
${X11_INCLUDE_DIRS}
${VDPAU_INCLUDE_DIRS}
${GLIB20_INCLUDE_DIRS}
${LIBVA_INCLUDE_DIRS}
${LIBSWSCALE_INCLUDE_DIRS}
${GL_INCLUDE_DIRS}
${GENERATED_INCLUDE_DIRS}
${CMAKE_BINARY_DIR}
src
)
add_custom_target(check COMMAND ${CMAKE_CTEST_COMMAND})
add_custom_target(build-tests)
add_dependencies(check build-tests)
add_subdirectory(glsl)
enable_testing()
add_subdirectory(tests)
# put compiled library to the build directory root
set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR})
add_subdirectory(src)
libvdpau-va-gl-0.3.6/COPYING 0000664 0000000 0000000 00000001074 12665616006 0015377 0 ustar 00root root 0000000 0000000 libvdpau-va-gl is free software: you can redistribute it and/or modify
it under the terms of version 3 of the GNU Lesser General Public
License as published by the Free Software Foundation.
libvdpau-va-gl is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with Foobar. If not, see
.
libvdpau-va-gl-0.3.6/COPYING.GPLv3 0000664 0000000 0000000 00000104513 12665616006 0016273 0 ustar 00root root 0000000 0000000 GNU GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc.
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The GNU General Public License is a free, copyleft license for
software and other kinds of works.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
the GNU General Public License is intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users. We, the Free Software Foundation, use the
GNU General Public License for most of our software; it applies also to
any other work released this way by its authors. You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.
To protect your rights, we need to prevent others from denying you
these rights or asking you to surrender the rights. Therefore, you have
certain responsibilities if you distribute copies of the software, or if
you modify it: responsibilities to respect the freedom of others.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must pass on to the recipients the same
freedoms that you received. You must make sure that they, too, receive
or can get the source code. And you must show them these terms so they
know their rights.
Developers that use the GNU GPL protect your rights with two steps:
(1) assert copyright on the software, and (2) offer you this License
giving you legal permission to copy, distribute and/or modify it.
For the developers' and authors' protection, the GPL clearly explains
that there is no warranty for this free software. For both users' and
authors' sake, the GPL requires that modified versions be marked as
changed, so that their problems will not be attributed erroneously to
authors of previous versions.
Some devices are designed to deny users access to install or run
modified versions of the software inside them, although the manufacturer
can do so. This is fundamentally incompatible with the aim of
protecting users' freedom to change the software. The systematic
pattern of such abuse occurs in the area of products for individuals to
use, which is precisely where it is most unacceptable. Therefore, we
have designed this version of the GPL to prohibit the practice for those
products. If such problems arise substantially in other domains, we
stand ready to extend this provision to those domains in future versions
of the GPL, as needed to protect the freedom of users.
Finally, every program is threatened constantly by software patents.
States should not allow patents to restrict development and use of
software on general-purpose computers, but in those that do, we wish to
avoid the special danger that patents applied to a free program could
make it effectively proprietary. To prevent this, the GPL assures that
patents cannot be used to render the program non-free.
The precise terms and conditions for copying, distribution and
modification follow.
TERMS AND CONDITIONS
0. Definitions.
"This License" refers to version 3 of the GNU General Public License.
"Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.
"The Program" refers to any copyrightable work licensed under this
License. Each licensee is addressed as "you". "Licensees" and
"recipients" may be individuals or organizations.
To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy. The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.
A "covered work" means either the unmodified Program or a work based
on the Program.
To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.
To "convey" a work means any kind of propagation that enables other
parties to make or receive copies. Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.
An interactive user interface displays "Appropriate Legal Notices"
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License. If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.
1. Source Code.
The "source code" for a work means the preferred form of the work
for making modifications to it. "Object code" means any non-source
form of a work.
A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.
The "System Libraries" of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form. A
"Major Component", in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.
The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities. However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work. For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.
The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.
The Corresponding Source for a work in source code form is that
same work.
2. Basic Permissions.
All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met. This License explicitly affirms your unlimited
permission to run the unmodified Program. The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work. This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.
You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force. You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright. Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under
the conditions stated below. Sublicensing is not allowed; section 10
makes it unnecessary.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.
When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified
it, and giving a relevant date.
b) The work must carry prominent notices stating that it is
released under this License and any conditions added under section
7. This requirement modifies the requirement in section 4 to
"keep intact all notices".
c) You must license the entire work, as a whole, under this
License to anyone who comes into possession of a copy. This
License will therefore apply, along with any applicable section 7
additional terms, to the whole of the work, and all its parts,
regardless of how they are packaged. This License gives no
permission to license the work in any other way, but it does not
invalidate such permission if you have separately received it.
d) If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your
work need not make them do so.
A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit. Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:
a) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by the
Corresponding Source fixed on a durable physical medium
customarily used for software interchange.
b) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by a
written offer, valid for at least three years and valid for as
long as you offer spare parts or customer support for that product
model, to give anyone who possesses the object code either (1) a
copy of the Corresponding Source for all the software in the
product that is covered by this License, on a durable physical
medium customarily used for software interchange, for a price no
more than your reasonable cost of physically performing this
conveying of source, or (2) access to copy the
Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the
written offer to provide the Corresponding Source. This
alternative is allowed only occasionally and noncommercially, and
only if you received the object code with such an offer, in accord
with subsection 6b.
d) Convey the object code by offering access from a designated
place (gratis or for a charge), and offer equivalent access to the
Corresponding Source in the same way through the same place at no
further charge. You need not require recipients to copy the
Corresponding Source along with the object code. If the place to
copy the object code is a network server, the Corresponding Source
may be on a different server (operated by you or a third party)
that supports equivalent copying facilities, provided you maintain
clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the
Corresponding Source, you remain obligated to ensure that it is
available for as long as needed to satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided
you inform other peers where the object code and Corresponding
Source of the work are being offered to the general public at no
charge under subsection 6d.
A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.
A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling. In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage. For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product. A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.
"Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source. The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.
If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information. But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).
The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed. Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.
7. Additional Terms.
"Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law. If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it. (Additional permissions may be written to require their own
removal in certain cases when you modify the work.) You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the
terms of sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or
author attributions in that material or in the Appropriate Legal
Notices displayed by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in
reasonable ways as different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or
authors of the material; or
e) Declining to grant rights under trademark law for use of some
trade names, trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that
material by anyone who conveys the material (or modified versions of
it) with contractual assumptions of liability to the recipient, for
any liability that these contractual assumptions directly impose on
those licensors and authors.
All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term. If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly
provided under this License. Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).
However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.
Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or
run a copy of the Program. Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance. However,
nothing other than this License grants you permission to propagate or
modify any covered work. These actions infringe copyright if you do
not accept this License. Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License. You are not responsible
for enforcing compliance by third parties with this License.
An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License. For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.
11. Patents.
A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based. The
work thus licensed is called the contributor's "contributor version".
A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version. For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.
In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement). To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.
If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients. "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.
A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License. You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all. For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.
13. Use with the GNU Affero General Public License.
Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU Affero General Public License into a single
combined work, and to convey the resulting work. The terms of this
License will continue to apply to the part which is the covered work,
but the special requirements of the GNU Affero General Public License,
section 13, concerning interaction through a network will apply to the
combination as such.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of
the GNU General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the GNU General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation. If the Program does not specify a version number of the
GNU General Public License, you may choose any version ever published
by the Free Software Foundation.
If the Program specifies that a proxy can decide which future
versions of the GNU General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.
Later license versions may give you additional or different
permissions. However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
Copyright (C)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see .
Also add information on how to contact you by electronic and paper mail.
If the program does terminal interaction, make it output a short
notice like this when it starts in an interactive mode:
Copyright (C)
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
This is free software, and you are welcome to redistribute it
under certain conditions; type `show c' for details.
The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License. Of course, your program's commands
might be different; for a GUI interface, you would use an "about box".
You should also get your employer (if you work as a programmer) or school,
if any, to sign a "copyright disclaimer" for the program, if necessary.
For more information on this, and how to apply and follow the GNU GPL, see
.
The GNU General Public License does not permit incorporating your program
into proprietary programs. If your program is a subroutine library, you
may consider it more useful to permit linking proprietary applications with
the library. If this is what you want to do, use the GNU Lesser General
Public License instead of this License. But first, please read
.
libvdpau-va-gl-0.3.6/COPYING.LGPLv3 0000664 0000000 0000000 00000016743 12665616006 0016416 0 ustar 00root root 0000000 0000000 GNU LESSER GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc.
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
This version of the GNU Lesser General Public License incorporates
the terms and conditions of version 3 of the GNU General Public
License, supplemented by the additional permissions listed below.
0. Additional Definitions.
As used herein, "this License" refers to version 3 of the GNU Lesser
General Public License, and the "GNU GPL" refers to version 3 of the GNU
General Public License.
"The Library" refers to a covered work governed by this License,
other than an Application or a Combined Work as defined below.
An "Application" is any work that makes use of an interface provided
by the Library, but which is not otherwise based on the Library.
Defining a subclass of a class defined by the Library is deemed a mode
of using an interface provided by the Library.
A "Combined Work" is a work produced by combining or linking an
Application with the Library. The particular version of the Library
with which the Combined Work was made is also called the "Linked
Version".
The "Minimal Corresponding Source" for a Combined Work means the
Corresponding Source for the Combined Work, excluding any source code
for portions of the Combined Work that, considered in isolation, are
based on the Application, and not on the Linked Version.
The "Corresponding Application Code" for a Combined Work means the
object code and/or source code for the Application, including any data
and utility programs needed for reproducing the Combined Work from the
Application, but excluding the System Libraries of the Combined Work.
1. Exception to Section 3 of the GNU GPL.
You may convey a covered work under sections 3 and 4 of this License
without being bound by section 3 of the GNU GPL.
2. Conveying Modified Versions.
If you modify a copy of the Library, and, in your modifications, a
facility refers to a function or data to be supplied by an Application
that uses the facility (other than as an argument passed when the
facility is invoked), then you may convey a copy of the modified
version:
a) under this License, provided that you make a good faith effort to
ensure that, in the event an Application does not supply the
function or data, the facility still operates, and performs
whatever part of its purpose remains meaningful, or
b) under the GNU GPL, with none of the additional permissions of
this License applicable to that copy.
3. Object Code Incorporating Material from Library Header Files.
The object code form of an Application may incorporate material from
a header file that is part of the Library. You may convey such object
code under terms of your choice, provided that, if the incorporated
material is not limited to numerical parameters, data structure
layouts and accessors, or small macros, inline functions and templates
(ten or fewer lines in length), you do both of the following:
a) Give prominent notice with each copy of the object code that the
Library is used in it and that the Library and its use are
covered by this License.
b) Accompany the object code with a copy of the GNU GPL and this license
document.
4. Combined Works.
You may convey a Combined Work under terms of your choice that,
taken together, effectively do not restrict modification of the
portions of the Library contained in the Combined Work and reverse
engineering for debugging such modifications, if you also do each of
the following:
a) Give prominent notice with each copy of the Combined Work that
the Library is used in it and that the Library and its use are
covered by this License.
b) Accompany the Combined Work with a copy of the GNU GPL and this license
document.
c) For a Combined Work that displays copyright notices during
execution, include the copyright notice for the Library among
these notices, as well as a reference directing the user to the
copies of the GNU GPL and this license document.
d) Do one of the following:
0) Convey the Minimal Corresponding Source under the terms of this
License, and the Corresponding Application Code in a form
suitable for, and under terms that permit, the user to
recombine or relink the Application with a modified version of
the Linked Version to produce a modified Combined Work, in the
manner specified by section 6 of the GNU GPL for conveying
Corresponding Source.
1) Use a suitable shared library mechanism for linking with the
Library. A suitable mechanism is one that (a) uses at run time
a copy of the Library already present on the user's computer
system, and (b) will operate properly with a modified version
of the Library that is interface-compatible with the Linked
Version.
e) Provide Installation Information, but only if you would otherwise
be required to provide such information under section 6 of the
GNU GPL, and only to the extent that such information is
necessary to install and execute a modified version of the
Combined Work produced by recombining or relinking the
Application with a modified version of the Linked Version. (If
you use option 4d0, the Installation Information must accompany
the Minimal Corresponding Source and Corresponding Application
Code. If you use option 4d1, you must provide the Installation
Information in the manner specified by section 6 of the GNU GPL
for conveying Corresponding Source.)
5. Combined Libraries.
You may place library facilities that are a work based on the
Library side by side in a single library together with other library
facilities that are not Applications and are not covered by this
License, and convey such a combined library under terms of your
choice, if you do both of the following:
a) Accompany the combined library with a copy of the same work based
on the Library, uncombined with any other library facilities,
conveyed under the terms of this License.
b) Give prominent notice with the combined library that part of it
is a work based on the Library, and explaining where to find the
accompanying uncombined form of the same work.
6. Revised Versions of the GNU Lesser General Public License.
The Free Software Foundation may publish revised and/or new versions
of the GNU Lesser General Public License from time to time. Such new
versions will be similar in spirit to the present version, but may
differ in detail to address new problems or concerns.
Each version is given a distinguishing version number. If the
Library as you received it specifies that a certain numbered version
of the GNU Lesser General Public License "or any later version"
applies to it, you have the option of following the terms and
conditions either of that published version or of any later version
published by the Free Software Foundation. If the Library as you
received it does not specify a version number of the GNU Lesser
General Public License, you may choose any version of the GNU Lesser
General Public License ever published by the Free Software Foundation.
If the Library as you received it specifies that a proxy can decide
whether future versions of the GNU Lesser General Public License shall
apply, that proxy's public statement of acceptance of any version is
permanent authorization for you to choose that version for the
Library.
libvdpau-va-gl-0.3.6/ChangeLog 0000664 0000000 0000000 00000004272 12665616006 0016121 0 ustar 00root root 0000000 0000000 2016-03-02 Rinat Ibragimov
* tag v0.3.6
* core: handle Constrained Baseline profile for H.264
* misc: bugfixes
2016-02-21 Rinat Ibragimov
* tag: v0.3.5
* misc: bugfixes
* misc: compatibility with some other ffmpeg versions
2014-01-19 Rinat Ibragimov
* tag: v0.3.0
* core: avoid NULL dereference on proprietary drivers
* core: minor refactoring
2014-01-13 Rinat Ibragimov
* core: mitigate memleaks on glx contexts by using own
vaCopySurfaceGLX implementation
2014-01-11 Rinat Ibragimov
* doc: add known-issues.md
* core: use glsl shaders for colorspace conversion and texture
components reordering
* core: split large source code file into smaller pieces relevant
to various parts of API
* tests: use static linking with core code. `make check` now
checks current source, not current VDPAU driver.
* core: render to pixmap with bitblitting afterwards
* core: mitigate GLX context leaks from per thread context pool
* core: fix some concurrency bugs
2013-11-15 Rinat Ibragimov
* tag: v0.2.1
* core: fix some NULL dereferences, memcpy warning, deadlock in
presentation thread
* core: implement rotation and coloring in surface rendering functions
* core: reuse render_target entries (VdpDecoder)
2013-09-21 Rinat Ibragimov
* core: start splitting vdpau-soft.c into smaller pieces
* core: use separate thread for presentation queue
* core: drop global locking, try to use fine grained locking instead
2013-07-06 Rinat Ibragimov
* tag: v0.1.0
* core: remove miscellaneous unused code
* core: add basic documentation on structures
* core: fix build system to simplify further packaging
* core: enable compiling against new libva 1.2.1 release
2013-05-14 Rinat Ibragimov
* libvdpau-va-gl use common GL context pool across several vdp
devices. Shrinks memory overhead from ~100% to ~50%
* libvdpau-va-gl add new quirk named AvoidVA, which disables using
VA-API even on systems that have it
* libvdpau-va-gl start writing changes to ChangeLog
libvdpau-va-gl-0.3.6/Doxyfile 0000664 0000000 0000000 00000225111 12665616006 0016052 0 ustar 00root root 0000000 0000000 # Doxyfile 1.8.1.2
# This file describes the settings to be used by the documentation system
# doxygen (www.doxygen.org) for a project.
#
# All text after a hash (#) is considered a comment and will be ignored.
# The format is:
# TAG = value [value, ...]
# For lists items can also be appended using:
# TAG += value [value, ...]
# Values that contain spaces should be placed between quotes (" ").
#---------------------------------------------------------------------------
# Project related configuration options
#---------------------------------------------------------------------------
# This tag specifies the encoding used for all characters in the config file
# that follow. The default is UTF-8 which is also the encoding used for all
# text before the first occurrence of this tag. Doxygen uses libiconv (or the
# iconv built into libc) for the transcoding. See
# http://www.gnu.org/software/libiconv for the list of possible encodings.
DOXYFILE_ENCODING = UTF-8
# The PROJECT_NAME tag is a single word (or sequence of words) that should
# identify the project. Note that if you do not use Doxywizard you need
# to put quotes around the project name if it contains spaces.
PROJECT_NAME = "libvdpau-va-gl"
# The PROJECT_NUMBER tag can be used to enter a project or revision number.
# This could be handy for archiving the generated documentation or
# if some version control system is used.
PROJECT_NUMBER =
# Using the PROJECT_BRIEF tag one can provide an optional one line description
# for a project that appears at the top of each page and should give viewer
# a quick idea about the purpose of the project. Keep the description short.
PROJECT_BRIEF = "VDPAU driver with OpenGL/VA-API backend"
# With the PROJECT_LOGO tag one can specify an logo or icon that is
# included in the documentation. The maximum height of the logo should not
# exceed 55 pixels and the maximum width should not exceed 200 pixels.
# Doxygen will copy the logo to the output directory.
PROJECT_LOGO =
# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute)
# base path where the generated documentation will be put.
# If a relative path is entered, it will be relative to the location
# where doxygen was started. If left blank the current directory will be used.
OUTPUT_DIRECTORY =
# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create
# 4096 sub-directories (in 2 levels) under the output directory of each output
# format and will distribute the generated files over these directories.
# Enabling this option can be useful when feeding doxygen a huge amount of
# source files, where putting all generated files in the same directory would
# otherwise cause performance problems for the file system.
CREATE_SUBDIRS = NO
# The OUTPUT_LANGUAGE tag is used to specify the language in which all
# documentation generated by doxygen is written. Doxygen will use this
# information to generate all constant output in the proper language.
# The default language is English, other supported languages are:
# Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-Traditional,
# Croatian, Czech, Danish, Dutch, Esperanto, Farsi, Finnish, French, German,
# Greek, Hungarian, Italian, Japanese, Japanese-en (Japanese with English
# messages), Korean, Korean-en, Lithuanian, Norwegian, Macedonian, Persian,
# Polish, Portuguese, Romanian, Russian, Serbian, Serbian-Cyrillic, Slovak,
# Slovene, Spanish, Swedish, Ukrainian, and Vietnamese.
OUTPUT_LANGUAGE = English
# If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will
# include brief member descriptions after the members that are listed in
# the file and class documentation (similar to JavaDoc).
# Set to NO to disable this.
BRIEF_MEMBER_DESC = YES
# If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend
# the brief description of a member or function before the detailed description.
# Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the
# brief descriptions will be completely suppressed.
REPEAT_BRIEF = YES
# This tag implements a quasi-intelligent brief description abbreviator
# that is used to form the text in various listings. Each string
# in this list, if found as the leading text of the brief description, will be
# stripped from the text and the result after processing the whole list, is
# used as the annotated text. Otherwise, the brief description is used as-is.
# If left blank, the following values are used ("$name" is automatically
# replaced with the name of the entity): "The $name class" "The $name widget"
# "The $name file" "is" "provides" "specifies" "contains"
# "represents" "a" "an" "the"
ABBREVIATE_BRIEF =
# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then
# Doxygen will generate a detailed section even if there is only a brief
# description.
ALWAYS_DETAILED_SEC = NO
# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all
# inherited members of a class in the documentation of that class as if those
# members were ordinary class members. Constructors, destructors and assignment
# operators of the base classes will not be shown.
INLINE_INHERITED_MEMB = NO
# If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full
# path before files name in the file list and in the header files. If set
# to NO the shortest path that makes the file name unique will be used.
FULL_PATH_NAMES = YES
# If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag
# can be used to strip a user-defined part of the path. Stripping is
# only done if one of the specified strings matches the left-hand part of
# the path. The tag can be used to show relative paths in the file list.
# If left blank the directory from which doxygen is run is used as the
# path to strip.
STRIP_FROM_PATH =
# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of
# the path mentioned in the documentation of a class, which tells
# the reader which header file to include in order to use a class.
# If left blank only the name of the header file containing the class
# definition is used. Otherwise one should specify the include paths that
# are normally passed to the compiler using the -I flag.
STRIP_FROM_INC_PATH =
# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter
# (but less readable) file names. This can be useful if your file system
# doesn't support long names like on DOS, Mac, or CD-ROM.
SHORT_NAMES = NO
# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen
# will interpret the first line (until the first dot) of a JavaDoc-style
# comment as the brief description. If set to NO, the JavaDoc
# comments will behave just like regular Qt-style comments
# (thus requiring an explicit @brief command for a brief description.)
JAVADOC_AUTOBRIEF = NO
# If the QT_AUTOBRIEF tag is set to YES then Doxygen will
# interpret the first line (until the first dot) of a Qt-style
# comment as the brief description. If set to NO, the comments
# will behave just like regular Qt-style comments (thus requiring
# an explicit \brief command for a brief description.)
QT_AUTOBRIEF = NO
# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen
# treat a multi-line C++ special comment block (i.e. a block of //! or ///
# comments) as a brief description. This used to be the default behaviour.
# The new default is to treat a multi-line C++ comment block as a detailed
# description. Set this tag to YES if you prefer the old behaviour instead.
MULTILINE_CPP_IS_BRIEF = NO
# If the INHERIT_DOCS tag is set to YES (the default) then an undocumented
# member inherits the documentation from any documented member that it
# re-implements.
INHERIT_DOCS = YES
# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce
# a new page for each member. If set to NO, the documentation of a member will
# be part of the file/class/namespace that contains it.
SEPARATE_MEMBER_PAGES = NO
# The TAB_SIZE tag can be used to set the number of spaces in a tab.
# Doxygen uses this value to replace tabs by spaces in code fragments.
TAB_SIZE = 8
# This tag can be used to specify a number of aliases that acts
# as commands in the documentation. An alias has the form "name=value".
# For example adding "sideeffect=\par Side Effects:\n" will allow you to
# put the command \sideeffect (or @sideeffect) in the documentation, which
# will result in a user-defined paragraph with heading "Side Effects:".
# You can put \n's in the value part of an alias to insert newlines.
ALIASES =
# This tag can be used to specify a number of word-keyword mappings (TCL only).
# A mapping has the form "name=value". For example adding
# "class=itcl::class" will allow you to use the command class in the
# itcl::class meaning.
TCL_SUBST =
# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C
# sources only. Doxygen will then generate output that is more tailored for C.
# For instance, some of the names that are used will be different. The list
# of all members will be omitted, etc.
OPTIMIZE_OUTPUT_FOR_C = NO
# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java
# sources only. Doxygen will then generate output that is more tailored for
# Java. For instance, namespaces will be presented as packages, qualified
# scopes will look different, etc.
OPTIMIZE_OUTPUT_JAVA = NO
# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran
# sources only. Doxygen will then generate output that is more tailored for
# Fortran.
OPTIMIZE_FOR_FORTRAN = NO
# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL
# sources. Doxygen will then generate output that is tailored for
# VHDL.
OPTIMIZE_OUTPUT_VHDL = NO
# Doxygen selects the parser to use depending on the extension of the files it
# parses. With this tag you can assign which parser to use for a given extension.
# Doxygen has a built-in mapping, but you can override or extend it using this
# tag. The format is ext=language, where ext is a file extension, and language
# is one of the parsers supported by doxygen: IDL, Java, Javascript, CSharp, C,
# C++, D, PHP, Objective-C, Python, Fortran, VHDL, C, C++. For instance to make
# doxygen treat .inc files as Fortran files (default is PHP), and .f files as C
# (default is Fortran), use: inc=Fortran f=C. Note that for custom extensions
# you also need to set FILE_PATTERNS otherwise the files are not read by doxygen.
EXTENSION_MAPPING =
# If MARKDOWN_SUPPORT is enabled (the default) then doxygen pre-processes all
# comments according to the Markdown format, which allows for more readable
# documentation. See http://daringfireball.net/projects/markdown/ for details.
# The output of markdown processing is further processed by doxygen, so you
# can mix doxygen, HTML, and XML commands with Markdown formatting.
# Disable only in case of backward compatibilities issues.
MARKDOWN_SUPPORT = YES
# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want
# to include (a tag file for) the STL sources as input, then you should
# set this tag to YES in order to let doxygen match functions declarations and
# definitions whose arguments contain STL classes (e.g. func(std::string); v.s.
# func(std::string) {}). This also makes the inheritance and collaboration
# diagrams that involve STL classes more complete and accurate.
BUILTIN_STL_SUPPORT = NO
# If you use Microsoft's C++/CLI language, you should set this option to YES to
# enable parsing support.
CPP_CLI_SUPPORT = NO
# Set the SIP_SUPPORT tag to YES if your project consists of sip sources only.
# Doxygen will parse them like normal C++ but will assume all classes use public
# instead of private inheritance when no explicit protection keyword is present.
SIP_SUPPORT = NO
# For Microsoft's IDL there are propget and propput attributes to indicate getter
# and setter methods for a property. Setting this option to YES (the default)
# will make doxygen replace the get and set methods by a property in the
# documentation. This will only work if the methods are indeed getting or
# setting a simple type. If this is not the case, or you want to show the
# methods anyway, you should set this option to NO.
IDL_PROPERTY_SUPPORT = YES
# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC
# tag is set to YES, then doxygen will reuse the documentation of the first
# member in the group (if any) for the other members of the group. By default
# all members of a group must be documented explicitly.
DISTRIBUTE_GROUP_DOC = NO
# Set the SUBGROUPING tag to YES (the default) to allow class member groups of
# the same type (for instance a group of public functions) to be put as a
# subgroup of that type (e.g. under the Public Functions section). Set it to
# NO to prevent subgrouping. Alternatively, this can be done per class using
# the \nosubgrouping command.
SUBGROUPING = YES
# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and
# unions are shown inside the group in which they are included (e.g. using
# @ingroup) instead of on a separate page (for HTML and Man pages) or
# section (for LaTeX and RTF).
INLINE_GROUPED_CLASSES = NO
# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and
# unions with only public data fields will be shown inline in the documentation
# of the scope in which they are defined (i.e. file, namespace, or group
# documentation), provided this scope is documented. If set to NO (the default),
# structs, classes, and unions are shown on a separate page (for HTML and Man
# pages) or section (for LaTeX and RTF).
INLINE_SIMPLE_STRUCTS = NO
# When TYPEDEF_HIDES_STRUCT is enabled, a typedef of a struct, union, or enum
# is documented as struct, union, or enum with the name of the typedef. So
# typedef struct TypeS {} TypeT, will appear in the documentation as a struct
# with name TypeT. When disabled the typedef will appear as a member of a file,
# namespace, or class. And the struct will be named TypeS. This can typically
# be useful for C code in case the coding convention dictates that all compound
# types are typedef'ed and only the typedef is referenced, never the tag name.
TYPEDEF_HIDES_STRUCT = NO
# The SYMBOL_CACHE_SIZE determines the size of the internal cache use to
# determine which symbols to keep in memory and which to flush to disk.
# When the cache is full, less often used symbols will be written to disk.
# For small to medium size projects (<1000 input files) the default value is
# probably good enough. For larger projects a too small cache size can cause
# doxygen to be busy swapping symbols to and from disk most of the time
# causing a significant performance penalty.
# If the system has enough physical memory increasing the cache will improve the
# performance by keeping more symbols in memory. Note that the value works on
# a logarithmic scale so increasing the size by one will roughly double the
# memory usage. The cache size is given by this formula:
# 2^(16+SYMBOL_CACHE_SIZE). The valid range is 0..9, the default is 0,
# corresponding to a cache size of 2^16 = 65536 symbols.
SYMBOL_CACHE_SIZE = 0
# Similar to the SYMBOL_CACHE_SIZE the size of the symbol lookup cache can be
# set using LOOKUP_CACHE_SIZE. This cache is used to resolve symbols given
# their name and scope. Since this can be an expensive process and often the
# same symbol appear multiple times in the code, doxygen keeps a cache of
# pre-resolved symbols. If the cache is too small doxygen will become slower.
# If the cache is too large, memory is wasted. The cache size is given by this
# formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range is 0..9, the default is 0,
# corresponding to a cache size of 2^16 = 65536 symbols.
LOOKUP_CACHE_SIZE = 0
#---------------------------------------------------------------------------
# Build related configuration options
#---------------------------------------------------------------------------
# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in
# documentation are documented, even if no documentation was available.
# Private class members and static file members will be hidden unless
# the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES
EXTRACT_ALL = NO
# If the EXTRACT_PRIVATE tag is set to YES all private members of a class
# will be included in the documentation.
EXTRACT_PRIVATE = NO
# If the EXTRACT_PACKAGE tag is set to YES all members with package or internal scope will be included in the documentation.
EXTRACT_PACKAGE = NO
# If the EXTRACT_STATIC tag is set to YES all static members of a file
# will be included in the documentation.
EXTRACT_STATIC = NO
# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs)
# defined locally in source files will be included in the documentation.
# If set to NO only classes defined in header files are included.
EXTRACT_LOCAL_CLASSES = YES
# This flag is only useful for Objective-C code. When set to YES local
# methods, which are defined in the implementation section but not in
# the interface are included in the documentation.
# If set to NO (the default) only methods in the interface are included.
EXTRACT_LOCAL_METHODS = NO
# If this flag is set to YES, the members of anonymous namespaces will be
# extracted and appear in the documentation as a namespace called
# 'anonymous_namespace{file}', where file will be replaced with the base
# name of the file that contains the anonymous namespace. By default
# anonymous namespaces are hidden.
EXTRACT_ANON_NSPACES = NO
# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all
# undocumented members of documented classes, files or namespaces.
# If set to NO (the default) these members will be included in the
# various overviews, but no documentation section is generated.
# This option has no effect if EXTRACT_ALL is enabled.
HIDE_UNDOC_MEMBERS = NO
# If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all
# undocumented classes that are normally visible in the class hierarchy.
# If set to NO (the default) these classes will be included in the various
# overviews. This option has no effect if EXTRACT_ALL is enabled.
HIDE_UNDOC_CLASSES = NO
# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all
# friend (class|struct|union) declarations.
# If set to NO (the default) these declarations will be included in the
# documentation.
HIDE_FRIEND_COMPOUNDS = NO
# If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any
# documentation blocks found inside the body of a function.
# If set to NO (the default) these blocks will be appended to the
# function's detailed documentation block.
HIDE_IN_BODY_DOCS = NO
# The INTERNAL_DOCS tag determines if documentation
# that is typed after a \internal command is included. If the tag is set
# to NO (the default) then the documentation will be excluded.
# Set it to YES to include the internal documentation.
INTERNAL_DOCS = NO
# If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate
# file names in lower-case letters. If set to YES upper-case letters are also
# allowed. This is useful if you have classes or files whose names only differ
# in case and if your file system supports case sensitive file names. Windows
# and Mac users are advised to set this option to NO.
CASE_SENSE_NAMES = YES
# If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen
# will show members with their full class and namespace scopes in the
# documentation. If set to YES the scope will be hidden.
HIDE_SCOPE_NAMES = NO
# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen
# will put a list of the files that are included by a file in the documentation
# of that file.
SHOW_INCLUDE_FILES = YES
# If the FORCE_LOCAL_INCLUDES tag is set to YES then Doxygen
# will list include files with double quotes in the documentation
# rather than with sharp brackets.
FORCE_LOCAL_INCLUDES = NO
# If the INLINE_INFO tag is set to YES (the default) then a tag [inline]
# is inserted in the documentation for inline members.
INLINE_INFO = YES
# If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen
# will sort the (detailed) documentation of file and class members
# alphabetically by member name. If set to NO the members will appear in
# declaration order.
SORT_MEMBER_DOCS = YES
# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the
# brief documentation of file, namespace and class members alphabetically
# by member name. If set to NO (the default) the members will appear in
# declaration order.
SORT_BRIEF_DOCS = NO
# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen
# will sort the (brief and detailed) documentation of class members so that
# constructors and destructors are listed first. If set to NO (the default)
# the constructors will appear in the respective orders defined by
# SORT_MEMBER_DOCS and SORT_BRIEF_DOCS.
# This tag will be ignored for brief docs if SORT_BRIEF_DOCS is set to NO
# and ignored for detailed docs if SORT_MEMBER_DOCS is set to NO.
SORT_MEMBERS_CTORS_1ST = NO
# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the
# hierarchy of group names into alphabetical order. If set to NO (the default)
# the group names will appear in their defined order.
SORT_GROUP_NAMES = NO
# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be
# sorted by fully-qualified names, including namespaces. If set to
# NO (the default), the class list will be sorted only by class name,
# not including the namespace part.
# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES.
# Note: This option applies only to the class list, not to the
# alphabetical list.
SORT_BY_SCOPE_NAME = NO
# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to
# do proper type resolution of all parameters of a function it will reject a
# match between the prototype and the implementation of a member function even
# if there is only one candidate or it is obvious which candidate to choose
# by doing a simple string match. By disabling STRICT_PROTO_MATCHING doxygen
# will still accept a match between prototype and implementation in such cases.
STRICT_PROTO_MATCHING = NO
# The GENERATE_TODOLIST tag can be used to enable (YES) or
# disable (NO) the todo list. This list is created by putting \todo
# commands in the documentation.
GENERATE_TODOLIST = YES
# The GENERATE_TESTLIST tag can be used to enable (YES) or
# disable (NO) the test list. This list is created by putting \test
# commands in the documentation.
GENERATE_TESTLIST = YES
# The GENERATE_BUGLIST tag can be used to enable (YES) or
# disable (NO) the bug list. This list is created by putting \bug
# commands in the documentation.
GENERATE_BUGLIST = YES
# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or
# disable (NO) the deprecated list. This list is created by putting
# \deprecated commands in the documentation.
GENERATE_DEPRECATEDLIST= YES
# The ENABLED_SECTIONS tag can be used to enable conditional
# documentation sections, marked by \if sectionname ... \endif.
ENABLED_SECTIONS =
# The MAX_INITIALIZER_LINES tag determines the maximum number of lines
# the initial value of a variable or macro consists of for it to appear in
# the documentation. If the initializer consists of more lines than specified
# here it will be hidden. Use a value of 0 to hide initializers completely.
# The appearance of the initializer of individual variables and macros in the
# documentation can be controlled using \showinitializer or \hideinitializer
# command in the documentation regardless of this setting.
MAX_INITIALIZER_LINES = 30
# Set the SHOW_USED_FILES tag to NO to disable the list of files generated
# at the bottom of the documentation of classes and structs. If set to YES the
# list will mention the files that were used to generate the documentation.
SHOW_USED_FILES = YES
# Set the SHOW_FILES tag to NO to disable the generation of the Files page.
# This will remove the Files entry from the Quick Index and from the
# Folder Tree View (if specified). The default is YES.
SHOW_FILES = YES
# Set the SHOW_NAMESPACES tag to NO to disable the generation of the
# Namespaces page.
# This will remove the Namespaces entry from the Quick Index
# and from the Folder Tree View (if specified). The default is YES.
SHOW_NAMESPACES = YES
# The FILE_VERSION_FILTER tag can be used to specify a program or script that
# doxygen should invoke to get the current version for each file (typically from
# the version control system). Doxygen will invoke the program by executing (via
# popen()) the command , where is the value of
# the FILE_VERSION_FILTER tag, and is the name of an input file
# provided by doxygen. Whatever the program writes to standard output
# is used as the file version. See the manual for examples.
FILE_VERSION_FILTER =
# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed
# by doxygen. The layout file controls the global structure of the generated
# output files in an output format independent way. To create the layout file
# that represents doxygen's defaults, run doxygen with the -l option.
# You can optionally specify a file name after the option, if omitted
# DoxygenLayout.xml will be used as the name of the layout file.
LAYOUT_FILE =
# The CITE_BIB_FILES tag can be used to specify one or more bib files
# containing the references data. This must be a list of .bib files. The
# .bib extension is automatically appended if omitted. Using this command
# requires the bibtex tool to be installed. See also
# http://en.wikipedia.org/wiki/BibTeX for more info. For LaTeX the style
# of the bibliography can be controlled using LATEX_BIB_STYLE. To use this
# feature you need bibtex and perl available in the search path.
CITE_BIB_FILES =
#---------------------------------------------------------------------------
# configuration options related to warning and progress messages
#---------------------------------------------------------------------------
# The QUIET tag can be used to turn on/off the messages that are generated
# by doxygen. Possible values are YES and NO. If left blank NO is used.
QUIET = NO
# The WARNINGS tag can be used to turn on/off the warning messages that are
# generated by doxygen. Possible values are YES and NO. If left blank
# NO is used.
WARNINGS = YES
# If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings
# for undocumented members. If EXTRACT_ALL is set to YES then this flag will
# automatically be disabled.
WARN_IF_UNDOCUMENTED = YES
# If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for
# potential errors in the documentation, such as not documenting some
# parameters in a documented function, or documenting parameters that
# don't exist or using markup commands wrongly.
WARN_IF_DOC_ERROR = YES
# The WARN_NO_PARAMDOC option can be enabled to get warnings for
# functions that are documented, but have no documentation for their parameters
# or return value. If set to NO (the default) doxygen will only warn about
# wrong or incomplete parameter documentation, but not about the absence of
# documentation.
WARN_NO_PARAMDOC = NO
# The WARN_FORMAT tag determines the format of the warning messages that
# doxygen can produce. The string should contain the $file, $line, and $text
# tags, which will be replaced by the file and line number from which the
# warning originated and the warning text. Optionally the format may contain
# $version, which will be replaced by the version of the file (if it could
# be obtained via FILE_VERSION_FILTER)
WARN_FORMAT = "$file:$line: $text"
# The WARN_LOGFILE tag can be used to specify a file to which warning
# and error messages should be written. If left blank the output is written
# to stderr.
WARN_LOGFILE =
#---------------------------------------------------------------------------
# configuration options related to the input files
#---------------------------------------------------------------------------
# The INPUT tag can be used to specify the files and/or directories that contain
# documented source files. You may enter file names like "myfile.cpp" or
# directories like "/usr/src/myproject". Separate the files or directories
# with spaces.
INPUT =
# This tag can be used to specify the character encoding of the source files
# that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is
# also the default input encoding. Doxygen uses libiconv (or the iconv built
# into libc) for the transcoding. See http://www.gnu.org/software/libiconv for
# the list of possible encodings.
INPUT_ENCODING = UTF-8
# If the value of the INPUT tag contains directories, you can use the
# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
# and *.h) to filter out the source-files in the directories. If left
# blank the following patterns are tested:
# *.c *.cc *.cxx *.cpp *.c++ *.d *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh
# *.hxx *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.dox *.py
# *.f90 *.f *.for *.vhd *.vhdl
FILE_PATTERNS =
# The RECURSIVE tag can be used to turn specify whether or not subdirectories
# should be searched for input files as well. Possible values are YES and NO.
# If left blank NO is used.
RECURSIVE = NO
# The EXCLUDE tag can be used to specify files and/or directories that should be
# excluded from the INPUT source files. This way you can easily exclude a
# subdirectory from a directory tree whose root is specified with the INPUT tag.
# Note that relative paths are relative to the directory from which doxygen is
# run.
EXCLUDE =
# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or
# directories that are symbolic links (a Unix file system feature) are excluded
# from the input.
EXCLUDE_SYMLINKS = NO
# If the value of the INPUT tag contains directories, you can use the
# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude
# certain files from those directories. Note that the wildcards are matched
# against the file with absolute path, so to exclude all test directories
# for example use the pattern */test/*
EXCLUDE_PATTERNS =
# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names
# (namespaces, classes, functions, etc.) that should be excluded from the
# output. The symbol name can be a fully qualified name, a word, or if the
# wildcard * is used, a substring. Examples: ANamespace, AClass,
# AClass::ANamespace, ANamespace::*Test
EXCLUDE_SYMBOLS =
# The EXAMPLE_PATH tag can be used to specify one or more files or
# directories that contain example code fragments that are included (see
# the \include command).
EXAMPLE_PATH =
# If the value of the EXAMPLE_PATH tag contains directories, you can use the
# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
# and *.h) to filter out the source-files in the directories. If left
# blank all files are included.
EXAMPLE_PATTERNS =
# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be
# searched for input files to be used with the \include or \dontinclude
# commands irrespective of the value of the RECURSIVE tag.
# Possible values are YES and NO. If left blank NO is used.
EXAMPLE_RECURSIVE = NO
# The IMAGE_PATH tag can be used to specify one or more files or
# directories that contain image that are included in the documentation (see
# the \image command).
IMAGE_PATH =
# The INPUT_FILTER tag can be used to specify a program that doxygen should
# invoke to filter for each input file. Doxygen will invoke the filter program
# by executing (via popen()) the command , where
# is the value of the INPUT_FILTER tag, and is the name of an
# input file. Doxygen will then use the output that the filter program writes
# to standard output.
# If FILTER_PATTERNS is specified, this tag will be
# ignored.
INPUT_FILTER =
# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern
# basis.
# Doxygen will compare the file name with each pattern and apply the
# filter if there is a match.
# The filters are a list of the form:
# pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further
# info on how filters are used. If FILTER_PATTERNS is empty or if
# non of the patterns match the file name, INPUT_FILTER is applied.
FILTER_PATTERNS =
# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using
# INPUT_FILTER) will be used to filter the input files when producing source
# files to browse (i.e. when SOURCE_BROWSER is set to YES).
FILTER_SOURCE_FILES = NO
# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file
# pattern. A pattern will override the setting for FILTER_PATTERN (if any)
# and it is also possible to disable source filtering for a specific pattern
# using *.ext= (so without naming a filter). This option only has effect when
# FILTER_SOURCE_FILES is enabled.
FILTER_SOURCE_PATTERNS =
#---------------------------------------------------------------------------
# configuration options related to source browsing
#---------------------------------------------------------------------------
# If the SOURCE_BROWSER tag is set to YES then a list of source files will
# be generated. Documented entities will be cross-referenced with these sources.
# Note: To get rid of all source code in the generated output, make sure also
# VERBATIM_HEADERS is set to NO.
SOURCE_BROWSER = NO
# Setting the INLINE_SOURCES tag to YES will include the body
# of functions and classes directly in the documentation.
INLINE_SOURCES = NO
# Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct
# doxygen to hide any special comment blocks from generated source code
# fragments. Normal C, C++ and Fortran comments will always remain visible.
STRIP_CODE_COMMENTS = YES
# If the REFERENCED_BY_RELATION tag is set to YES
# then for each documented function all documented
# functions referencing it will be listed.
REFERENCED_BY_RELATION = NO
# If the REFERENCES_RELATION tag is set to YES
# then for each documented function all documented entities
# called/used by that function will be listed.
REFERENCES_RELATION = NO
# If the REFERENCES_LINK_SOURCE tag is set to YES (the default)
# and SOURCE_BROWSER tag is set to YES, then the hyperlinks from
# functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will
# link to the source code.
# Otherwise they will link to the documentation.
REFERENCES_LINK_SOURCE = YES
# If the USE_HTAGS tag is set to YES then the references to source code
# will point to the HTML generated by the htags(1) tool instead of doxygen
# built-in source browser. The htags tool is part of GNU's global source
# tagging system (see http://www.gnu.org/software/global/global.html). You
# will need version 4.8.6 or higher.
USE_HTAGS = NO
# If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen
# will generate a verbatim copy of the header file for each class for
# which an include is specified. Set to NO to disable this.
VERBATIM_HEADERS = YES
#---------------------------------------------------------------------------
# configuration options related to the alphabetical class index
#---------------------------------------------------------------------------
# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index
# of all compounds will be generated. Enable this if the project
# contains a lot of classes, structs, unions or interfaces.
ALPHABETICAL_INDEX = YES
# If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then
# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns
# in which this list will be split (can be a number in the range [1..20])
COLS_IN_ALPHA_INDEX = 5
# In case all classes in a project start with a common prefix, all
# classes will be put under the same header in the alphabetical index.
# The IGNORE_PREFIX tag can be used to specify one or more prefixes that
# should be ignored while generating the index headers.
IGNORE_PREFIX =
#---------------------------------------------------------------------------
# configuration options related to the HTML output
#---------------------------------------------------------------------------
# If the GENERATE_HTML tag is set to YES (the default) Doxygen will
# generate HTML output.
GENERATE_HTML = YES
# The HTML_OUTPUT tag is used to specify where the HTML docs will be put.
# If a relative path is entered the value of OUTPUT_DIRECTORY will be
# put in front of it. If left blank `html' will be used as the default path.
HTML_OUTPUT = html
# The HTML_FILE_EXTENSION tag can be used to specify the file extension for
# each generated HTML page (for example: .htm,.php,.asp). If it is left blank
# doxygen will generate files with .html extension.
HTML_FILE_EXTENSION = .html
# The HTML_HEADER tag can be used to specify a personal HTML header for
# each generated HTML page. If it is left blank doxygen will generate a
# standard header. Note that when using a custom header you are responsible
# for the proper inclusion of any scripts and style sheets that doxygen
# needs, which is dependent on the configuration options used.
# It is advised to generate a default header using "doxygen -w html
# header.html footer.html stylesheet.css YourConfigFile" and then modify
# that header. Note that the header is subject to change so you typically
# have to redo this when upgrading to a newer version of doxygen or when
# changing the value of configuration settings such as GENERATE_TREEVIEW!
HTML_HEADER =
# The HTML_FOOTER tag can be used to specify a personal HTML footer for
# each generated HTML page. If it is left blank doxygen will generate a
# standard footer.
HTML_FOOTER =
# The HTML_STYLESHEET tag can be used to specify a user-defined cascading
# style sheet that is used by each HTML page. It can be used to
# fine-tune the look of the HTML output. If the tag is left blank doxygen
# will generate a default style sheet. Note that doxygen will try to copy
# the style sheet file to the HTML output directory, so don't put your own
# style sheet in the HTML output directory as well, or it will be erased!
HTML_STYLESHEET =
# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or
# other source files which should be copied to the HTML output directory. Note
# that these files will be copied to the base HTML output directory. Use the
# $relpath$ marker in the HTML_HEADER and/or HTML_FOOTER files to load these
# files. In the HTML_STYLESHEET file, use the file name only. Also note that
# the files will be copied as-is; there are no commands or markers available.
HTML_EXTRA_FILES =
# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output.
# Doxygen will adjust the colors in the style sheet and background images
# according to this color. Hue is specified as an angle on a colorwheel,
# see http://en.wikipedia.org/wiki/Hue for more information.
# For instance the value 0 represents red, 60 is yellow, 120 is green,
# 180 is cyan, 240 is blue, 300 purple, and 360 is red again.
# The allowed range is 0 to 359.
HTML_COLORSTYLE_HUE = 220
# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of
# the colors in the HTML output. For a value of 0 the output will use
# grayscales only. A value of 255 will produce the most vivid colors.
HTML_COLORSTYLE_SAT = 100
# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to
# the luminance component of the colors in the HTML output. Values below
# 100 gradually make the output lighter, whereas values above 100 make
# the output darker. The value divided by 100 is the actual gamma applied,
# so 80 represents a gamma of 0.8, The value 220 represents a gamma of 2.2,
# and 100 does not change the gamma.
HTML_COLORSTYLE_GAMMA = 80
# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML
# page will contain the date and time when the page was generated. Setting
# this to NO can help when comparing the output of multiple runs.
HTML_TIMESTAMP = YES
# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML
# documentation will contain sections that can be hidden and shown after the
# page has loaded.
HTML_DYNAMIC_SECTIONS = NO
# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of
# entries shown in the various tree structured indices initially; the user
# can expand and collapse entries dynamically later on. Doxygen will expand
# the tree to such a level that at most the specified number of entries are
# visible (unless a fully collapsed tree already exceeds this amount).
# So setting the number of entries 1 will produce a full collapsed tree by
# default. 0 is a special value representing an infinite number of entries
# and will result in a full expanded tree by default.
HTML_INDEX_NUM_ENTRIES = 100
# If the GENERATE_DOCSET tag is set to YES, additional index files
# will be generated that can be used as input for Apple's Xcode 3
# integrated development environment, introduced with OSX 10.5 (Leopard).
# To create a documentation set, doxygen will generate a Makefile in the
# HTML output directory. Running make will produce the docset in that
# directory and running "make install" will install the docset in
# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find
# it at startup.
# See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html
# for more information.
GENERATE_DOCSET = NO
# When GENERATE_DOCSET tag is set to YES, this tag determines the name of the
# feed. A documentation feed provides an umbrella under which multiple
# documentation sets from a single provider (such as a company or product suite)
# can be grouped.
DOCSET_FEEDNAME = "Doxygen generated docs"
# When GENERATE_DOCSET tag is set to YES, this tag specifies a string that
# should uniquely identify the documentation set bundle. This should be a
# reverse domain-name style string, e.g. com.mycompany.MyDocSet. Doxygen
# will append .docset to the name.
DOCSET_BUNDLE_ID = org.doxygen.Project
# When GENERATE_PUBLISHER_ID tag specifies a string that should uniquely identify
# the documentation publisher. This should be a reverse domain-name style
# string, e.g. com.mycompany.MyDocSet.documentation.
DOCSET_PUBLISHER_ID = org.doxygen.Publisher
# The GENERATE_PUBLISHER_NAME tag identifies the documentation publisher.
DOCSET_PUBLISHER_NAME = Publisher
# If the GENERATE_HTMLHELP tag is set to YES, additional index files
# will be generated that can be used as input for tools like the
# Microsoft HTML help workshop to generate a compiled HTML help file (.chm)
# of the generated HTML documentation.
GENERATE_HTMLHELP = NO
# If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can
# be used to specify the file name of the resulting .chm file. You
# can add a path in front of the file if the result should not be
# written to the html output directory.
CHM_FILE =
# If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can
# be used to specify the location (absolute path including file name) of
# the HTML help compiler (hhc.exe). If non-empty doxygen will try to run
# the HTML help compiler on the generated index.hhp.
HHC_LOCATION =
# If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag
# controls if a separate .chi index file is generated (YES) or that
# it should be included in the master .chm file (NO).
GENERATE_CHI = NO
# If the GENERATE_HTMLHELP tag is set to YES, the CHM_INDEX_ENCODING
# is used to encode HtmlHelp index (hhk), content (hhc) and project file
# content.
CHM_INDEX_ENCODING =
# If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag
# controls whether a binary table of contents is generated (YES) or a
# normal table of contents (NO) in the .chm file.
BINARY_TOC = NO
# The TOC_EXPAND flag can be set to YES to add extra items for group members
# to the contents of the HTML help documentation and to the tree view.
TOC_EXPAND = NO
# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and
# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated
# that can be used as input for Qt's qhelpgenerator to generate a
# Qt Compressed Help (.qch) of the generated HTML documentation.
GENERATE_QHP = NO
# If the QHG_LOCATION tag is specified, the QCH_FILE tag can
# be used to specify the file name of the resulting .qch file.
# The path specified is relative to the HTML output folder.
QCH_FILE =
# The QHP_NAMESPACE tag specifies the namespace to use when generating
# Qt Help Project output. For more information please see
# http://doc.trolltech.com/qthelpproject.html#namespace
QHP_NAMESPACE = org.doxygen.Project
# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating
# Qt Help Project output. For more information please see
# http://doc.trolltech.com/qthelpproject.html#virtual-folders
QHP_VIRTUAL_FOLDER = doc
# If QHP_CUST_FILTER_NAME is set, it specifies the name of a custom filter to
# add. For more information please see
# http://doc.trolltech.com/qthelpproject.html#custom-filters
QHP_CUST_FILTER_NAME =
# The QHP_CUST_FILT_ATTRS tag specifies the list of the attributes of the
# custom filter to add. For more information please see
#
# Qt Help Project / Custom Filters.
QHP_CUST_FILTER_ATTRS =
# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this
# project's
# filter section matches.
#
# Qt Help Project / Filter Attributes.
QHP_SECT_FILTER_ATTRS =
# If the GENERATE_QHP tag is set to YES, the QHG_LOCATION tag can
# be used to specify the location of Qt's qhelpgenerator.
# If non-empty doxygen will try to run qhelpgenerator on the generated
# .qhp file.
QHG_LOCATION =
# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files
# will be generated, which together with the HTML files, form an Eclipse help
# plugin. To install this plugin and make it available under the help contents
# menu in Eclipse, the contents of the directory containing the HTML and XML
# files needs to be copied into the plugins directory of eclipse. The name of
# the directory within the plugins directory should be the same as
# the ECLIPSE_DOC_ID value. After copying Eclipse needs to be restarted before
# the help appears.
GENERATE_ECLIPSEHELP = NO
# A unique identifier for the eclipse help plugin. When installing the plugin
# the directory name containing the HTML and XML files should also have
# this name.
ECLIPSE_DOC_ID = org.doxygen.Project
# The DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs)
# at top of each HTML page. The value NO (the default) enables the index and
# the value YES disables it. Since the tabs have the same information as the
# navigation tree you can set this option to NO if you already set
# GENERATE_TREEVIEW to YES.
DISABLE_INDEX = NO
# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index
# structure should be generated to display hierarchical information.
# If the tag value is set to YES, a side panel will be generated
# containing a tree-like index structure (just like the one that
# is generated for HTML Help). For this to work a browser that supports
# JavaScript, DHTML, CSS and frames is required (i.e. any modern browser).
# Windows users are probably better off using the HTML help feature.
# Since the tree basically has the same information as the tab index you
# could consider to set DISABLE_INDEX to NO when enabling this option.
GENERATE_TREEVIEW = NO
# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values
# (range [0,1..20]) that doxygen will group on one line in the generated HTML
# documentation. Note that a value of 0 will completely suppress the enum
# values from appearing in the overview section.
ENUM_VALUES_PER_LINE = 4
# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be
# used to set the initial width (in pixels) of the frame in which the tree
# is shown.
TREEVIEW_WIDTH = 250
# When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open
# links to external symbols imported via tag files in a separate window.
EXT_LINKS_IN_WINDOW = NO
# Use this tag to change the font size of Latex formulas included
# as images in the HTML documentation. The default is 10. Note that
# when you change the font size after a successful doxygen run you need
# to manually remove any form_*.png images from the HTML output directory
# to force them to be regenerated.
FORMULA_FONTSIZE = 10
# Use the FORMULA_TRANPARENT tag to determine whether or not the images
# generated for formulas are transparent PNGs. Transparent PNGs are
# not supported properly for IE 6.0, but are supported on all modern browsers.
# Note that when changing this option you need to delete any form_*.png files
# in the HTML output before the changes have effect.
FORMULA_TRANSPARENT = YES
# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax
# (see http://www.mathjax.org) which uses client side Javascript for the
# rendering instead of using prerendered bitmaps. Use this if you do not
# have LaTeX installed or if you want to formulas look prettier in the HTML
# output. When enabled you may also need to install MathJax separately and
# configure the path to it using the MATHJAX_RELPATH option.
USE_MATHJAX = NO
# When MathJax is enabled you need to specify the location relative to the
# HTML output directory using the MATHJAX_RELPATH option. The destination
# directory should contain the MathJax.js script. For instance, if the mathjax
# directory is located at the same level as the HTML output directory, then
# MATHJAX_RELPATH should be ../mathjax. The default value points to
# the MathJax Content Delivery Network so you can quickly see the result without
# installing MathJax.
# However, it is strongly recommended to install a local
# copy of MathJax from http://www.mathjax.org before deployment.
MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest
# The MATHJAX_EXTENSIONS tag can be used to specify one or MathJax extension
# names that should be enabled during MathJax rendering.
MATHJAX_EXTENSIONS =
# When the SEARCHENGINE tag is enabled doxygen will generate a search box
# for the HTML output. The underlying search engine uses javascript
# and DHTML and should work on any modern browser. Note that when using
# HTML help (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets
# (GENERATE_DOCSET) there is already a search function so this one should
# typically be disabled. For large projects the javascript based search engine
# can be slow, then enabling SERVER_BASED_SEARCH may provide a better solution.
SEARCHENGINE = YES
# When the SERVER_BASED_SEARCH tag is enabled the search engine will be
# implemented using a PHP enabled web server instead of at the web client
# using Javascript. Doxygen will generate the search PHP script and index
# file to put on the web server. The advantage of the server
# based approach is that it scales better to large projects and allows
# full text search. The disadvantages are that it is more difficult to setup
# and does not have live searching capabilities.
SERVER_BASED_SEARCH = NO
#---------------------------------------------------------------------------
# configuration options related to the LaTeX output
#---------------------------------------------------------------------------
# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will
# generate Latex output.
GENERATE_LATEX = YES
# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put.
# If a relative path is entered the value of OUTPUT_DIRECTORY will be
# put in front of it. If left blank `latex' will be used as the default path.
LATEX_OUTPUT = latex
# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be
# invoked. If left blank `latex' will be used as the default command name.
# Note that when enabling USE_PDFLATEX this option is only used for
# generating bitmaps for formulas in the HTML output, but not in the
# Makefile that is written to the output directory.
LATEX_CMD_NAME = latex
# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to
# generate index for LaTeX. If left blank `makeindex' will be used as the
# default command name.
MAKEINDEX_CMD_NAME = makeindex
# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact
# LaTeX documents. This may be useful for small projects and may help to
# save some trees in general.
COMPACT_LATEX = NO
# The PAPER_TYPE tag can be used to set the paper type that is used
# by the printer. Possible values are: a4, letter, legal and
# executive. If left blank a4wide will be used.
PAPER_TYPE = a4
# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX
# packages that should be included in the LaTeX output.
EXTRA_PACKAGES =
# The LATEX_HEADER tag can be used to specify a personal LaTeX header for
# the generated latex document. The header should contain everything until
# the first chapter. If it is left blank doxygen will generate a
# standard header. Notice: only use this tag if you know what you are doing!
LATEX_HEADER =
# The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for
# the generated latex document. The footer should contain everything after
# the last chapter. If it is left blank doxygen will generate a
# standard footer. Notice: only use this tag if you know what you are doing!
LATEX_FOOTER =
# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated
# is prepared for conversion to pdf (using ps2pdf). The pdf file will
# contain links (just like the HTML output) instead of page references
# This makes the output suitable for online browsing using a pdf viewer.
PDF_HYPERLINKS = YES
# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of
# plain latex in the generated Makefile. Set this option to YES to get a
# higher quality PDF documentation.
USE_PDFLATEX = YES
# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode.
# command to the generated LaTeX files. This will instruct LaTeX to keep
# running if errors occur, instead of asking the user for help.
# This option is also used when generating formulas in HTML.
LATEX_BATCHMODE = NO
# If LATEX_HIDE_INDICES is set to YES then doxygen will not
# include the index chapters (such as File Index, Compound Index, etc.)
# in the output.
LATEX_HIDE_INDICES = NO
# If LATEX_SOURCE_CODE is set to YES then doxygen will include
# source code with syntax highlighting in the LaTeX output.
# Note that which sources are shown also depends on other settings
# such as SOURCE_BROWSER.
LATEX_SOURCE_CODE = NO
# The LATEX_BIB_STYLE tag can be used to specify the style to use for the
# bibliography, e.g. plainnat, or ieeetr. The default style is "plain". See
# http://en.wikipedia.org/wiki/BibTeX for more info.
LATEX_BIB_STYLE = plain
#---------------------------------------------------------------------------
# configuration options related to the RTF output
#---------------------------------------------------------------------------
# If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output
# The RTF output is optimized for Word 97 and may not look very pretty with
# other RTF readers or editors.
GENERATE_RTF = NO
# The RTF_OUTPUT tag is used to specify where the RTF docs will be put.
# If a relative path is entered the value of OUTPUT_DIRECTORY will be
# put in front of it. If left blank `rtf' will be used as the default path.
RTF_OUTPUT = rtf
# If the COMPACT_RTF tag is set to YES Doxygen generates more compact
# RTF documents. This may be useful for small projects and may help to
# save some trees in general.
COMPACT_RTF = NO
# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated
# will contain hyperlink fields. The RTF file will
# contain links (just like the HTML output) instead of page references.
# This makes the output suitable for online browsing using WORD or other
# programs which support those fields.
# Note: wordpad (write) and others do not support links.
RTF_HYPERLINKS = NO
# Load style sheet definitions from file. Syntax is similar to doxygen's
# config file, i.e. a series of assignments. You only have to provide
# replacements, missing definitions are set to their default value.
RTF_STYLESHEET_FILE =
# Set optional variables used in the generation of an rtf document.
# Syntax is similar to doxygen's config file.
RTF_EXTENSIONS_FILE =
#---------------------------------------------------------------------------
# configuration options related to the man page output
#---------------------------------------------------------------------------
# If the GENERATE_MAN tag is set to YES (the default) Doxygen will
# generate man pages
GENERATE_MAN = NO
# The MAN_OUTPUT tag is used to specify where the man pages will be put.
# If a relative path is entered the value of OUTPUT_DIRECTORY will be
# put in front of it. If left blank `man' will be used as the default path.
MAN_OUTPUT = man
# The MAN_EXTENSION tag determines the extension that is added to
# the generated man pages (default is the subroutine's section .3)
MAN_EXTENSION = .3
# If the MAN_LINKS tag is set to YES and Doxygen generates man output,
# then it will generate one additional man file for each entity
# documented in the real man page(s). These additional files
# only source the real man page, but without them the man command
# would be unable to find the correct page. The default is NO.
MAN_LINKS = NO
#---------------------------------------------------------------------------
# configuration options related to the XML output
#---------------------------------------------------------------------------
# If the GENERATE_XML tag is set to YES Doxygen will
# generate an XML file that captures the structure of
# the code including all documentation.
GENERATE_XML = NO
# The XML_OUTPUT tag is used to specify where the XML pages will be put.
# If a relative path is entered the value of OUTPUT_DIRECTORY will be
# put in front of it. If left blank `xml' will be used as the default path.
XML_OUTPUT = xml
# The XML_SCHEMA tag can be used to specify an XML schema,
# which can be used by a validating XML parser to check the
# syntax of the XML files.
XML_SCHEMA =
# The XML_DTD tag can be used to specify an XML DTD,
# which can be used by a validating XML parser to check the
# syntax of the XML files.
XML_DTD =
# If the XML_PROGRAMLISTING tag is set to YES Doxygen will
# dump the program listings (including syntax highlighting
# and cross-referencing information) to the XML output. Note that
# enabling this will significantly increase the size of the XML output.
XML_PROGRAMLISTING = YES
#---------------------------------------------------------------------------
# configuration options for the AutoGen Definitions output
#---------------------------------------------------------------------------
# If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will
# generate an AutoGen Definitions (see autogen.sf.net) file
# that captures the structure of the code including all
# documentation. Note that this feature is still experimental
# and incomplete at the moment.
GENERATE_AUTOGEN_DEF = NO
#---------------------------------------------------------------------------
# configuration options related to the Perl module output
#---------------------------------------------------------------------------
# If the GENERATE_PERLMOD tag is set to YES Doxygen will
# generate a Perl module file that captures the structure of
# the code including all documentation. Note that this
# feature is still experimental and incomplete at the
# moment.
GENERATE_PERLMOD = NO
# If the PERLMOD_LATEX tag is set to YES Doxygen will generate
# the necessary Makefile rules, Perl scripts and LaTeX code to be able
# to generate PDF and DVI output from the Perl module output.
PERLMOD_LATEX = NO
# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be
# nicely formatted so it can be parsed by a human reader.
# This is useful
# if you want to understand what is going on.
# On the other hand, if this
# tag is set to NO the size of the Perl module output will be much smaller
# and Perl will parse it just the same.
PERLMOD_PRETTY = YES
# The names of the make variables in the generated doxyrules.make file
# are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX.
# This is useful so different doxyrules.make files included by the same
# Makefile don't overwrite each other's variables.
PERLMOD_MAKEVAR_PREFIX =
#---------------------------------------------------------------------------
# Configuration options related to the preprocessor
#---------------------------------------------------------------------------
# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will
# evaluate all C-preprocessor directives found in the sources and include
# files.
ENABLE_PREPROCESSING = YES
# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro
# names in the source code. If set to NO (the default) only conditional
# compilation will be performed. Macro expansion can be done in a controlled
# way by setting EXPAND_ONLY_PREDEF to YES.
MACRO_EXPANSION = NO
# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES
# then the macro expansion is limited to the macros specified with the
# PREDEFINED and EXPAND_AS_DEFINED tags.
EXPAND_ONLY_PREDEF = NO
# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files
# pointed to by INCLUDE_PATH will be searched when a #include is found.
SEARCH_INCLUDES = YES
# The INCLUDE_PATH tag can be used to specify one or more directories that
# contain include files that are not input files but should be processed by
# the preprocessor.
INCLUDE_PATH =
# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard
# patterns (like *.h and *.hpp) to filter out the header-files in the
# directories. If left blank, the patterns specified with FILE_PATTERNS will
# be used.
INCLUDE_FILE_PATTERNS =
# The PREDEFINED tag can be used to specify one or more macro names that
# are defined before the preprocessor is started (similar to the -D option of
# gcc). The argument of the tag is a list of macros of the form: name
# or name=definition (no spaces). If the definition and the = are
# omitted =1 is assumed. To prevent a macro definition from being
# undefined via #undef or recursively expanded use the := operator
# instead of the = operator.
PREDEFINED =
# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then
# this tag can be used to specify a list of macro names that should be expanded.
# The macro definition that is found in the sources will be used.
# Use the PREDEFINED tag if you want to use a different macro definition that
# overrules the definition found in the source code.
EXPAND_AS_DEFINED =
# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then
# doxygen's preprocessor will remove all references to function-like macros
# that are alone on a line, have an all uppercase name, and do not end with a
# semicolon, because these will confuse the parser if not removed.
SKIP_FUNCTION_MACROS = YES
#---------------------------------------------------------------------------
# Configuration::additions related to external references
#---------------------------------------------------------------------------
# The TAGFILES option can be used to specify one or more tagfiles. For each
# tag file the location of the external documentation should be added. The
# format of a tag file without this location is as follows:
#
# TAGFILES = file1 file2 ...
# Adding location for the tag files is done as follows:
#
# TAGFILES = file1=loc1 "file2 = loc2" ...
# where "loc1" and "loc2" can be relative or absolute paths
# or URLs. Note that each tag file must have a unique name (where the name does
# NOT include the path). If a tag file is not located in the directory in which
# doxygen is run, you must also specify the path to the tagfile here.
TAGFILES =
# When a file name is specified after GENERATE_TAGFILE, doxygen will create
# a tag file that is based on the input files it reads.
GENERATE_TAGFILE =
# If the ALLEXTERNALS tag is set to YES all external classes will be listed
# in the class index. If set to NO only the inherited external classes
# will be listed.
ALLEXTERNALS = NO
# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed
# in the modules index. If set to NO, only the current project's groups will
# be listed.
EXTERNAL_GROUPS = YES
# The PERL_PATH should be the absolute path and name of the perl script
# interpreter (i.e. the result of `which perl').
PERL_PATH = /usr/bin/perl
#---------------------------------------------------------------------------
# Configuration options related to the dot tool
#---------------------------------------------------------------------------
# If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will
# generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base
# or super classes. Setting the tag to NO turns the diagrams off. Note that
# this option also works with HAVE_DOT disabled, but it is recommended to
# install and use dot, since it yields more powerful graphs.
CLASS_DIAGRAMS = YES
# You can define message sequence charts within doxygen comments using the \msc
# command. Doxygen will then run the mscgen tool (see
# http://www.mcternan.me.uk/mscgen/) to produce the chart and insert it in the
# documentation. The MSCGEN_PATH tag allows you to specify the directory where
# the mscgen tool resides. If left empty the tool is assumed to be found in the
# default search path.
MSCGEN_PATH =
# If set to YES, the inheritance and collaboration graphs will hide
# inheritance and usage relations if the target is undocumented
# or is not a class.
HIDE_UNDOC_RELATIONS = YES
# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is
# available from the path. This tool is part of Graphviz, a graph visualization
# toolkit from AT&T and Lucent Bell Labs. The other options in this section
# have no effect if this option is set to NO (the default)
HAVE_DOT = NO
# The DOT_NUM_THREADS specifies the number of dot invocations doxygen is
# allowed to run in parallel. When set to 0 (the default) doxygen will
# base this on the number of processors available in the system. You can set it
# explicitly to a value larger than 0 to get control over the balance
# between CPU load and processing speed.
DOT_NUM_THREADS = 0
# By default doxygen will use the Helvetica font for all dot files that
# doxygen generates. When you want a differently looking font you can specify
# the font name using DOT_FONTNAME. You need to make sure dot is able to find
# the font, which can be done by putting it in a standard location or by setting
# the DOTFONTPATH environment variable or by setting DOT_FONTPATH to the
# directory containing the font.
DOT_FONTNAME = Helvetica
# The DOT_FONTSIZE tag can be used to set the size of the font of dot graphs.
# The default size is 10pt.
DOT_FONTSIZE = 10
# By default doxygen will tell dot to use the Helvetica font.
# If you specify a different font using DOT_FONTNAME you can use DOT_FONTPATH to
# set the path where dot can find it.
DOT_FONTPATH =
# If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen
# will generate a graph for each documented class showing the direct and
# indirect inheritance relations. Setting this tag to YES will force the
# CLASS_DIAGRAMS tag to NO.
CLASS_GRAPH = YES
# If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen
# will generate a graph for each documented class showing the direct and
# indirect implementation dependencies (inheritance, containment, and
# class references variables) of the class with other documented classes.
COLLABORATION_GRAPH = YES
# If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen
# will generate a graph for groups, showing the direct groups dependencies
GROUP_GRAPHS = YES
# If the UML_LOOK tag is set to YES doxygen will generate inheritance and
# collaboration diagrams in a style similar to the OMG's Unified Modeling
# Language.
UML_LOOK = NO
# If the UML_LOOK tag is enabled, the fields and methods are shown inside
# the class node. If there are many fields or methods and many nodes the
# graph may become too big to be useful. The UML_LIMIT_NUM_FIELDS
# threshold limits the number of items for each type to make the size more
# managable. Set this to 0 for no limit. Note that the threshold may be
# exceeded by 50% before the limit is enforced.
UML_LIMIT_NUM_FIELDS = 10
# If set to YES, the inheritance and collaboration graphs will show the
# relations between templates and their instances.
TEMPLATE_RELATIONS = NO
# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT
# tags are set to YES then doxygen will generate a graph for each documented
# file showing the direct and indirect include dependencies of the file with
# other documented files.
INCLUDE_GRAPH = YES
# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and
# HAVE_DOT tags are set to YES then doxygen will generate a graph for each
# documented header file showing the documented files that directly or
# indirectly include this file.
INCLUDED_BY_GRAPH = YES
# If the CALL_GRAPH and HAVE_DOT options are set to YES then
# doxygen will generate a call dependency graph for every global function
# or class method. Note that enabling this option will significantly increase
# the time of a run. So in most cases it will be better to enable call graphs
# for selected functions only using the \callgraph command.
CALL_GRAPH = NO
# If the CALLER_GRAPH and HAVE_DOT tags are set to YES then
# doxygen will generate a caller dependency graph for every global function
# or class method. Note that enabling this option will significantly increase
# the time of a run. So in most cases it will be better to enable caller
# graphs for selected functions only using the \callergraph command.
CALLER_GRAPH = NO
# If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen
# will generate a graphical hierarchy of all classes instead of a textual one.
GRAPHICAL_HIERARCHY = YES
# If the DIRECTORY_GRAPH and HAVE_DOT tags are set to YES
# then doxygen will show the dependencies a directory has on other directories
# in a graphical way. The dependency relations are determined by the #include
# relations between the files in the directories.
DIRECTORY_GRAPH = YES
# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images
# generated by dot. Possible values are svg, png, jpg, or gif.
# If left blank png will be used. If you choose svg you need to set
# HTML_FILE_EXTENSION to xhtml in order to make the SVG files
# visible in IE 9+ (other browsers do not have this requirement).
DOT_IMAGE_FORMAT = png
# If DOT_IMAGE_FORMAT is set to svg, then this option can be set to YES to
# enable generation of interactive SVG images that allow zooming and panning.
# Note that this requires a modern browser other than Internet Explorer.
# Tested and working are Firefox, Chrome, Safari, and Opera. For IE 9+ you
# need to set HTML_FILE_EXTENSION to xhtml in order to make the SVG files
# visible. Older versions of IE do not have SVG support.
INTERACTIVE_SVG = NO
# The tag DOT_PATH can be used to specify the path where the dot tool can be
# found. If left blank, it is assumed the dot tool can be found in the path.
DOT_PATH =
# The DOTFILE_DIRS tag can be used to specify one or more directories that
# contain dot files that are included in the documentation (see the
# \dotfile command).
DOTFILE_DIRS =
# The MSCFILE_DIRS tag can be used to specify one or more directories that
# contain msc files that are included in the documentation (see the
# \mscfile command).
MSCFILE_DIRS =
# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of
# nodes that will be shown in the graph. If the number of nodes in a graph
# becomes larger than this value, doxygen will truncate the graph, which is
# visualized by representing a node as a red box. Note that doxygen if the
# number of direct children of the root node in a graph is already larger than
# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note
# that the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH.
DOT_GRAPH_MAX_NODES = 50
# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the
# graphs generated by dot. A depth value of 3 means that only nodes reachable
# from the root by following a path via at most 3 edges will be shown. Nodes
# that lay further from the root node will be omitted. Note that setting this
# option to 1 or 2 may greatly reduce the computation time needed for large
# code bases. Also note that the size of a graph can be further restricted by
# DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction.
MAX_DOT_GRAPH_DEPTH = 0
# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent
# background. This is disabled by default, because dot on Windows does not
# seem to support this out of the box. Warning: Depending on the platform used,
# enabling this option may lead to badly anti-aliased labels on the edges of
# a graph (i.e. they become hard to read).
DOT_TRANSPARENT = NO
# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output
# files in one run (i.e. multiple -o and -T options on the command line). This
# makes dot run faster, but since only newer versions of dot (>1.8.10)
# support this, this feature is disabled by default.
DOT_MULTI_TARGETS = YES
# If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will
# generate a legend page explaining the meaning of the various boxes and
# arrows in the dot generated graphs.
GENERATE_LEGEND = YES
# If the DOT_CLEANUP tag is set to YES (the default) Doxygen will
# remove the intermediate dot files that are used to generate
# the various graphs.
DOT_CLEANUP = YES
libvdpau-va-gl-0.3.6/README.md 0000664 0000000 0000000 00000006026 12665616006 0015625 0 ustar 00root root 0000000 0000000 About
=====
Briefly, this is the [VDPAU](http://en.wikipedia.org/wiki/VDPAU) driver with
[VA-API](http://en.wikipedia.org/wiki/Video_Acceleration_API)/OpenGL backend.
There are applications exists that can use VDPAU. Amongst them are Adobe Flash Player
and Mplayer. They both can use VDPAU, but since there is no VDPAU available on Intel
chips, they fall back to different drawing techniques. And while Mplayer can use
XVideo extension to offload scaling to GPU, Flash Player can not and does all
scaling in software. If there was VDPAU available, CPU usage could be significantly
lower.
VDPAU is not vendor-locked technology. Even official documentation mentions
possibility of other drivers. They should be named as `libvdpau_drivername.so.1` and
placed where linker could find them. `/usr/lib` usually works fine.
Which driver to use is determined by asking X server about current driver name or by
using `VDPAU_DRIVER` environment variable.
Here is one. Named libvdpau_va_gl.so.1, it uses OpenGL under the hood to
accelerate drawing and scaling and VA-API (if available) to accelerate video
decoding. For now VA-API is available on some Intel chips, and on some AMD video
adapters with help of [xvba-va-driver](http://cgit.freedesktop.org/vaapi/xvba-driver/).
OpenGL is available, you know, on systems with OpenGL available.
Install
=======
1. `sudo apt-get install cmake libvdpau-dev libva-dev libglib2.0-dev libswscale-dev libgl1-mesa-dev libglu1-mesa-dev`
2. `mkdir build; cd build`
3. `cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr ..`
4. `sudo make install`
5. Add `VDPAU_DRIVER=va_gl` to your environment
Commands above should work for any Debian-based distro. Fedora names packages in a
different way, so package installation step will look like:
`sudo yum install cmake libvdpau-devel libva-devel glib2-devel ffmpeg-devel mesa-libGL-devel mesa-libGLU-devel`.
Note you need to add rpmfusion repo to get `ffmpeg-devel`.
Run time configuration
======================
Besides `VDPAU_DRIVER` variable which selects which driver to use there are other
variables that control runtime behavior of va_gl driver.
`VDPAU_LOG` enables or disables tracing. `0` disables, `1` enables.
`VDPAU_QUIRKS` contains comma-separated list of enabled quirks. Here is the list:
* `XCloseDisplay` Disables calling of XCloseDisplay which may segfault on systems with some AMD cards
* `ShowWatermark` Enables displaying string "va_gl" in bottom-right corner of window
* `LogThreadId` Adds thread id to trace output
* `LogCallDuration` Adds call duration to trace output
* `LogPqDelay` Adds presentation queue introduced delay to trace output
* `LogTimestamp` Displays timestamps
* `AvoidVA` Makes libvdpau-va-gl NOT use VA-API
Parameters of VDPAU_QUIRKS are case-insensetive.
Copying
=======
libvdpau-va-gl is distributed under the terms of the LGPLv3. See files
COPYING, COPYING.GPLv3, and COPYING.LGPLv3 for details.
Contact
=======
Author can be reached at email
`ibragimovrinat-at-mail.ru` or at github: https://github.com/i-rinat/
libvdpau-va-gl-0.3.6/doc/ 0000775 0000000 0000000 00000000000 12665616006 0015107 5 ustar 00root root 0000000 0000000 libvdpau-va-gl-0.3.6/doc/flash-wmode.js 0000664 0000000 0000000 00000002570 12665616006 0017657 0 ustar 00root root 0000000 0000000 // ==UserScript==
// @name Set wmode to 'direct'
// @namespace None
// @description Sets embed's and object's wmode parameter to 'direct' to enable hw acceleration
// @include *
// @grant none
// ==/UserScript==
(function ()
{
nodeInserted();
})();
document.addEventListener("DOMNodeInserted", nodeInserted, false);
function nodeInserted()
{
for (var objs = document.getElementsByTagName("object"), i = 0, obj; obj = objs[i]; i++)
{
if (obj.type == 'application/x-shockwave-flash')
{
var skip = false;
for (var params = obj.getElementsByTagName("param"), j = 0, param; param = params[j]; j++)
{
if (param.getAttribute("name") == "wmode")
{
param.setAttribute("value", "direct");
skip = true;
break;
}
}
if(skip) continue;
var param = document.createElement("param");
param.setAttribute("name", "wmode");
param.setAttribute("value", "direct");
obj.appendChild(param);
}
}
for (var embeds = document.getElementsByTagName("embed"), i = 0, embed; embed = embeds[i]; i++) {
if (embed.type != 'application/x-shockwave-flash') continue;
if ((embed.getAttribute('wmode') && embed.getAttribute('wmode') == 'direct')) continue;
embed.setAttribute('wmode', 'direct');
var html = embed.outerHTML;
embed.insertAdjacentHTML('beforeBegin', embed.outerHTML);
embed.parentNode.removeChild(embed);
}
}
libvdpau-va-gl-0.3.6/doc/known-issues.md 0000664 0000000 0000000 00000006410 12665616006 0020077 0 ustar 00root root 0000000 0000000 Known issues
============
Flash Player is slow
--------------------
The issue consists of two: hw decoding (D in VDPAU) and hw
presentation (P in VDPAU, mostly scaling). See below.
No hardware accelerated decoding in Flash Player
------------------------------------------------
Flash Player have hardware accelerated decoding turned off by
default. To enable, add line `EnableLinuxHWVideoDecode=1` to file
`/etc/adobe/mms.cfg`. Create that file if necessary. You must reload plugin,
easiest way to reload plugin is to restart browser.
No hardware accelerated presentation in Flash
---------------------------------------------
First, you may check whenever application uses VDPAU via libvdpau-va-gl,
by adding `ShowWatermark` to `VDPAU_QUIRKS` environment variable. That
will display "va_gl" at bottom right corner of video. If you see it,
you are fine. Otherwise, you can try user script [doc/flash-wmode.js](flash-wmode.js)
which will force wmode parameter value to be 'direct'. Here is how and why
it works.
Flash Player is an NPAPI plugin. Such plugins are separate binaries which
output is embedded in a web page by one of two different ways. Plugin can ask browser
either windowed or windowless operation. First way browser creates a
window and passes it to a plugin. Then plugin can draw on that window
when and how it wants to. Second way plugin does content display only on
browser demand by filling data buffer. VDPAU requires an X drawable
to display on, so it can be used only in windowed plugin mode.
Usually nothing can be displayed over that drawable. VDPAU will
overwrite everything else. On the other hand, browser plugins have
`wmode` parameter which controls how their content is managed by browser.
You can search for exact `wmode` semantics on the Internet. But here is
the crucial part: if `wmode` set to anything but `direct`, plugin can not
use hardware acceleration, since it forces windowless operation which
in turn prevents VDPAU usage.
Script above forces all plugin instances to have `wmode=direct`. That
solves some problems, but has own drawbacks. If web page was
desined to have something to be displayed over Flash movie, that will
become hidden. That may be subtitles, or video player controls. They
may become unusable.
If you know any better working solution for this problem, please let me know.
Flash is still slow
-------------------
Flash movies (.swf) must use StageVideo to make use of hardware acceleration.
If author for some reason have not used it, there is nothing can be done
on our side. For example, Vimeo player does use hardware decoding, but then it
downloads decoded frames back to CPU, where they scaled with their own
scaler implemented in ActionScript.
Mplayer have higher CPU usage with VDPAU than with Xv
-----------------------------------------------------
If you omit `-vc ffh264vdpau`, Mplayer will use software decoder, then
then output YCbCr images via VDPAU. At the moment YCbCr to RGB conversion
is done with help of libswscale, which can eat decent amount of CPU time.
Ensure you have hardware accelerated codecs enabled.
Mplayer shows weird errors for 10-bit H.264
---------------------------------------------
VDPAU at the moment has no support for Hi10P, so 10bit videos will fail. There is
nothing can be done in libvdpau-va-gl to fix this.
libvdpau-va-gl-0.3.6/glsl/ 0000775 0000000 0000000 00000000000 12665616006 0015303 5 ustar 00root root 0000000 0000000 libvdpau-va-gl-0.3.6/glsl/CMakeLists.txt 0000664 0000000 0000000 00000001565 12665616006 0020052 0 ustar 00root root 0000000 0000000 set(shader_list_no_path
NV12_RGBA.glsl
YV12_RGBA.glsl
red_to_alpha_swizzle.glsl
)
set(GENERATED_INCLUDE_DIRS ${CMAKE_CURRENT_BINARY_DIR} PARENT_SCOPE)
set(shader_list)
foreach(item ${shader_list_no_path})
list(APPEND shader_list ${CMAKE_CURRENT_SOURCE_DIR}/${item})
endforeach(item)
add_custom_command(
OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/shaders.c ${CMAKE_CURRENT_BINARY_DIR}/shaders.h
COMMAND ${CMAKE_CURRENT_BINARY_DIR}/shader-bundle-tool ${CMAKE_CURRENT_BINARY_DIR}/shaders.h ${CMAKE_CURRENT_BINARY_DIR}/shaders.c ${shader_list}
DEPENDS ${shader_list} shader-bundle-tool
)
add_custom_target(shader-bundle-src DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/shaders.c ${CMAKE_CURRENT_BINARY_DIR}/shaders.h)
add_executable(shader-bundle-tool shader-bundle-tool.c)
add_library(shader-bundle STATIC ${CMAKE_CURRENT_BINARY_DIR}/shaders.c)
add_dependencies(shader-bundle shader-bundle-src)
libvdpau-va-gl-0.3.6/glsl/NV12_RGBA.glsl 0000664 0000000 0000000 00000000562 12665616006 0017452 0 ustar 00root root 0000000 0000000 #version 110
uniform sampler2D tex[2];
void main()
{
vec2 y_coord = gl_TexCoord[0].xy;
float y = texture2D(tex[0], y_coord).r;
float cb = texture2D(tex[1], y_coord).r - 0.5;
float cr = texture2D(tex[1], y_coord).g - 0.5;
gl_FragColor = vec4(
y + 1.4021 * cr,
y - 0.34482 * cb - 0.71405 * cr,
y + 1.7713 * cb,
1.0);
}
libvdpau-va-gl-0.3.6/glsl/YV12_RGBA.glsl 0000664 0000000 0000000 00000000742 12665616006 0017465 0 ustar 00root root 0000000 0000000 #version 110
uniform sampler2D tex[2];
void main()
{
vec2 y_coord = gl_TexCoord[0].xy;
vec2 cb_coord = vec2(y_coord.x, y_coord.y/2.0);
vec2 cr_coord = vec2(y_coord.x, y_coord.y/2.0 + 0.5);
float y = texture2D(tex[0], y_coord).r;
float cb = texture2D(tex[1], cb_coord).r - 0.5;
float cr = texture2D(tex[1], cr_coord).r - 0.5;
gl_FragColor = vec4(
y + 1.4021 * cr,
y - 0.34482 * cb - 0.71405 * cr,
y + 1.7713 * cb,
1.0);
}
libvdpau-va-gl-0.3.6/glsl/red_to_alpha_swizzle.glsl 0000664 0000000 0000000 00000000222 12665616006 0022372 0 ustar 00root root 0000000 0000000 #version 110
uniform sampler2D tex_0;
void main()
{
gl_FragColor = gl_Color * vec4(1.0, 1.0, 1.0, texture2D(tex_0, gl_TexCoord[0].xy).r);
}
libvdpau-va-gl-0.3.6/glsl/shader-bundle-tool.c 0000664 0000000 0000000 00000006115 12665616006 0021142 0 ustar 00root root 0000000 0000000 #include
#include
#include
#include
#include
#include
#include
#include
int
main(int argc, char *argv[])
{
if (argc <= 2) {
printf("not enough arguments\n");
return 1;
}
FILE *fp_h = fopen(argv[1], "w");
if (!fp_h) {
printf("can't open %s\n", argv[1]);
return 2;
}
FILE *fp_c = fopen(argv[2], "w");
if (!fp_c) {
printf("can't open %s\n", argv[2]);
return 2;
}
// h file
fprintf(fp_h,
"// generated file, all changes will be lost\n\n"
"#ifndef VA_GL_GLSL_SHADERS_H\n"
"#define VA_GL_GLSL_SHADERS_H\n"
"\n"
"#include \n"
"\n"
"struct shader_s {\n"
" const char *body;\n"
" int len;\n"
"};\n"
"\n"
"extern struct shader_s glsl_shaders[%d];\n\n", argc - 3);
fprintf(fp_h, "#define SHADER_COUNT %d\n\n", argc - 3);
fprintf(fp_h, "enum {\n");
for (int k = 3; k < argc; k ++) {
char *fname = strdup(argv[k]);
char *bname = basename(fname);
char *last = strchr(bname, '.');
if (!last)
continue;
fprintf(fp_h, " glsl_%.*s = %d,\n", (int)(last - bname), bname, k - 3);
free(fname);
}
fprintf(fp_h,
"};\n\n");
fprintf(fp_h, "#endif /* VA_GL_GLSL_SHADERS_H */\n");
fclose(fp_h);
// c file
fprintf(fp_c, "// generated file, all changes will be lost\n\n");
char *tmps = strdup(argv[1]);
char *h_name = basename(tmps);
fprintf(fp_c, "#include \"%s\"\n", h_name);
free(tmps);
fprintf(fp_c, "\n");
fprintf(fp_c, "struct shader_s glsl_shaders[%d] = {\n", argc - 3);
for (int k = 3; k < argc; k ++) {
FILE *fp_tmp = fopen(argv[k], "r");
if (!fp_tmp) {
printf("can't open %s\n", argv[k]);
return 2;
}
struct stat sb;
if (fstat(fileno(fp_tmp), &sb) != 0) {
printf("can't fstat, errno = %d\n", errno);
return 4;
}
char *buf = malloc(sb.st_size);
if (!buf) {
printf("not enough memory\n");
return 3;
}
if (fread(buf, sb.st_size, 1, fp_tmp) != 1) {
printf("can't read data from file\n");
return 5;
}
fclose(fp_tmp);
fprintf(fp_c, " {\n");
fprintf(fp_c, " .body =\n");
fprintf(fp_c, " \"");
int len = 0;
for (unsigned int j = 0; j < sb.st_size; j ++) {
switch (buf[j]) {
case '\n':
fprintf(fp_c, "\\n\"\n \"");
len ++;
break;
case '\r':
break;
default:
fprintf(fp_c, "%c", buf[j]);
len ++;
break;
}
}
fprintf(fp_c, "\",\n");
fprintf(fp_c, " .len = %d,\n", len);
fprintf(fp_c, " },\n");
free(buf);
}
fprintf(fp_c, "};\n");
fclose(fp_c);
return 0;
}
libvdpau-va-gl-0.3.6/src/ 0000775 0000000 0000000 00000000000 12665616006 0015131 5 ustar 00root root 0000000 0000000 libvdpau-va-gl-0.3.6/src/CMakeLists.txt 0000664 0000000 0000000 00000002127 12665616006 0017673 0 ustar 00root root 0000000 0000000 include_directories(
${GENERATED_INCLUDE_DIRS}
)
link_directories (
${X11_LIBRARY_DIRS}
${GLIB20_LIBRARY_DIRS}
${LIBVA_LIBRARY_DIRS}
${LIBSWSCALE_LIBRARY_DIRS}
${GL_LIBRARY_DIRS}
)
add_library (${DRIVER_NAME}_obj OBJECT
api-bitmap-surface.c
api-csc-matrix.c
api-device.c
api-output-surface.c
api-presentation-queue.c
api-video-decoder.c
api-video-mixer.c
api-video-surface.c
entry.c
trace.c
reverse-constant.c
handle-storage.c
bitstream.c
h264-parse.c
globals.c
watermark.c
ctx-stack.c
)
add_dependencies(${DRIVER_NAME}_obj shader-bundle)
add_library(${DRIVER_NAME} SHARED $)
add_library(${DRIVER_NAME}_static STATIC $)
set(LINK_LIBRARIES
-lX11
${GLIB20_LIBRARIES}
${LIBVA_LIBRARIES}
${LIBSWSCALE_LIBRARIES}
${GL_LIBRARIES}
-lpthread
-lrt
shader-bundle
)
target_link_libraries(${DRIVER_NAME} ${LINK_LIBRARIES})
target_link_libraries(${DRIVER_NAME}_static ${LINK_LIBRARIES})
set_target_properties(${DRIVER_NAME}
PROPERTIES
VERSION 1
)
install(TARGETS ${DRIVER_NAME} DESTINATION ${LIB_INSTALL_DIR})
libvdpau-va-gl-0.3.6/src/api-bitmap-surface.c 0000664 0000000 0000000 00000023327 12665616006 0020755 0 ustar 00root root 0000000 0000000 /*
* Copyright 2013-2014 Rinat Ibragimov
*
* This file is part of libvdpau-va-gl
*
* libvdpau-va-gl is distributed under the terms of the LGPLv3. See COPYING for details.
*/
#include "ctx-stack.h"
#include
#include
#include
#include
#include
#include "api.h"
#include "trace.h"
VdpStatus
vdpBitmapSurfaceCreate(VdpDevice device, VdpRGBAFormat rgba_format, uint32_t width,
uint32_t height, VdpBool frequently_accessed, VdpBitmapSurface *surface)
{
VdpStatus err_code;
if (!surface)
return VDP_STATUS_INVALID_HANDLE;
VdpDeviceData *deviceData = handle_acquire(device, HANDLETYPE_DEVICE);
if (NULL == deviceData)
return VDP_STATUS_INVALID_HANDLE;
VdpBitmapSurfaceData *data = calloc(1, sizeof(VdpBitmapSurfaceData));
if (NULL == data) {
err_code = VDP_STATUS_RESOURCES;
goto quit;
}
switch (rgba_format) {
case VDP_RGBA_FORMAT_B8G8R8A8:
data->gl_internal_format = GL_RGBA;
data->gl_format = GL_BGRA;
data->gl_type = GL_UNSIGNED_BYTE;
data->bytes_per_pixel = 4;
break;
case VDP_RGBA_FORMAT_R8G8B8A8:
data->gl_internal_format = GL_RGBA;
data->gl_format = GL_RGBA;
data->gl_type = GL_UNSIGNED_BYTE;
data->bytes_per_pixel = 4;
break;
case VDP_RGBA_FORMAT_R10G10B10A2:
data->gl_internal_format = GL_RGB10_A2;
data->gl_format = GL_RGBA;
data->gl_type = GL_UNSIGNED_INT_10_10_10_2;
data->bytes_per_pixel = 4;
break;
case VDP_RGBA_FORMAT_B10G10R10A2:
data->gl_internal_format = GL_RGB10_A2;
data->gl_format = GL_BGRA;
data->gl_type = GL_UNSIGNED_INT_10_10_10_2;
data->bytes_per_pixel = 4;
break;
case VDP_RGBA_FORMAT_A8:
data->gl_internal_format = GL_RGBA;
data->gl_format = GL_RED;
data->gl_type = GL_UNSIGNED_BYTE;
data->bytes_per_pixel = 1;
break;
default:
traceError("error (%s): %s not implemented\n", __func__, reverse_rgba_format(rgba_format));
free(data);
err_code = VDP_STATUS_INVALID_RGBA_FORMAT;
goto quit;
}
data->type = HANDLETYPE_BITMAP_SURFACE;
data->device = device;
data->deviceData = deviceData;
data->rgba_format = rgba_format;
data->width = width;
data->height = height;
data->frequently_accessed = frequently_accessed;
// Frequently accessed bitmaps reside in system memory rather that in GPU texture.
data->dirty = 0;
if (frequently_accessed) {
data->bitmap_data = calloc(width * height, data->bytes_per_pixel);
if (NULL == data->bitmap_data) {
traceError("error (%s): calloc returned NULL\n", __func__);
free(data);
err_code = VDP_STATUS_RESOURCES;
goto quit;
}
} else {
data->bitmap_data = NULL;
}
glx_ctx_push_thread_local(deviceData);
glGenTextures(1, &data->tex_id);
glBindTexture(GL_TEXTURE_2D, data->tex_id);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexImage2D(GL_TEXTURE_2D, 0, data->gl_internal_format, width, height, 0,
data->gl_format, data->gl_type, NULL);
glFinish();
GLuint gl_error = glGetError();
if (GL_NO_ERROR != gl_error) {
// Requested RGBA format was wrong
traceError("error (%s): texture failure, gl error (%d, %s)\n", __func__, gl_error,
gluErrorString(gl_error));
free(data);
glx_ctx_pop();
err_code = VDP_STATUS_ERROR;
goto quit;
}
gl_error = glGetError();
glx_ctx_pop();
if (GL_NO_ERROR != gl_error) {
free(data);
traceError("error (%s): gl error %d\n", __func__, gl_error);
err_code = VDP_STATUS_ERROR;
goto quit;
}
ref_device(deviceData);
*surface = handle_insert(data);
err_code = VDP_STATUS_OK;
quit:
handle_release(device);
return err_code;
}
VdpStatus
vdpBitmapSurfaceDestroy(VdpBitmapSurface surface)
{
VdpBitmapSurfaceData *data = handle_acquire(surface, HANDLETYPE_BITMAP_SURFACE);
if (NULL == data)
return VDP_STATUS_INVALID_HANDLE;
VdpDeviceData *deviceData = data->deviceData;
if (data->frequently_accessed) {
free(data->bitmap_data);
data->bitmap_data = NULL;
}
glx_ctx_push_thread_local(deviceData);
glDeleteTextures(1, &data->tex_id);
GLenum gl_error = glGetError();
glx_ctx_pop();
if (GL_NO_ERROR != gl_error) {
traceError("error (%s): gl error %d\n", __func__, gl_error);
handle_release(surface);
return VDP_STATUS_ERROR;
}
handle_expunge(surface);
unref_device(deviceData);
free(data);
return VDP_STATUS_OK;
}
VdpStatus
vdpBitmapSurfaceGetParameters(VdpBitmapSurface surface, VdpRGBAFormat *rgba_format,
uint32_t *width, uint32_t *height, VdpBool *frequently_accessed)
{
VdpBitmapSurfaceData *srcSurfData = handle_acquire(surface, HANDLETYPE_BITMAP_SURFACE);
if (NULL == srcSurfData)
return VDP_STATUS_INVALID_HANDLE;
if (NULL == rgba_format || NULL == width || NULL == height || NULL == frequently_accessed) {
handle_release(surface);
return VDP_STATUS_INVALID_POINTER;
}
*rgba_format = srcSurfData->rgba_format;
*width = srcSurfData->width;
*height = srcSurfData->height;
*frequently_accessed = srcSurfData->frequently_accessed;
handle_release(surface);
return VDP_STATUS_OK;
}
VdpStatus
vdpBitmapSurfacePutBitsNative(VdpBitmapSurface surface, void const *const *source_data,
uint32_t const *source_pitches, VdpRect const *destination_rect)
{
VdpStatus err_code;
if (!source_data || !source_pitches)
return VDP_STATUS_INVALID_POINTER;
VdpBitmapSurfaceData *dstSurfData = handle_acquire(surface, HANDLETYPE_BITMAP_SURFACE);
if (NULL == dstSurfData)
return VDP_STATUS_INVALID_HANDLE;
VdpDeviceData *deviceData = dstSurfData->deviceData;
VdpRect d_rect = {0, 0, dstSurfData->width, dstSurfData->height};
if (destination_rect)
d_rect = *destination_rect;
if (dstSurfData->frequently_accessed) {
if (0 == d_rect.x0 && dstSurfData->width == d_rect.x1 && source_pitches[0] == d_rect.x1) {
// full width
const int bytes_to_copy =
(d_rect.x1 - d_rect.x0) * (d_rect.y1 - d_rect.y0) * dstSurfData->bytes_per_pixel;
memcpy(dstSurfData->bitmap_data +
d_rect.y0 * dstSurfData->width * dstSurfData->bytes_per_pixel,
source_data[0], bytes_to_copy);
} else {
const unsigned int bytes_in_line = (d_rect.x1-d_rect.x0)*dstSurfData->bytes_per_pixel;
for (unsigned int y = d_rect.y0; y < d_rect.y1; y ++) {
memcpy(dstSurfData->bitmap_data +
(y * dstSurfData->width + d_rect.x0) * dstSurfData->bytes_per_pixel,
source_data[0] + (y - d_rect.y0) * source_pitches[0],
bytes_in_line);
}
}
dstSurfData->dirty = 1;
} else {
glx_ctx_push_thread_local(deviceData);
glBindTexture(GL_TEXTURE_2D, dstSurfData->tex_id);
glPixelStorei(GL_UNPACK_ROW_LENGTH, source_pitches[0]/dstSurfData->bytes_per_pixel);
if (4 != dstSurfData->bytes_per_pixel)
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glTexSubImage2D(GL_TEXTURE_2D, 0, d_rect.x0, d_rect.y0,
d_rect.x1 - d_rect.x0, d_rect.y1 - d_rect.y0,
dstSurfData->gl_format, dstSurfData->gl_type, source_data[0]);
glPixelStorei(GL_UNPACK_ROW_LENGTH, 0);
if (4 != dstSurfData->bytes_per_pixel)
glPixelStorei(GL_UNPACK_ALIGNMENT, 4);
glFinish();
GLenum gl_error = glGetError();
glx_ctx_pop();
if (GL_NO_ERROR != gl_error) {
traceError("error (%s): gl error %d\n", __func__, gl_error);
err_code = VDP_STATUS_ERROR;
goto quit;
}
}
err_code = VDP_STATUS_OK;
quit:
handle_release(surface);
return err_code;
}
VdpStatus
vdpBitmapSurfaceQueryCapabilities(VdpDevice device, VdpRGBAFormat surface_rgba_format,
VdpBool *is_supported, uint32_t *max_width, uint32_t *max_height)
{
VdpStatus err_code;
VdpDeviceData *deviceData = handle_acquire(device, HANDLETYPE_DEVICE);
if (NULL == deviceData)
return VDP_STATUS_INVALID_HANDLE;
if (NULL == is_supported || NULL == max_width || NULL == max_height) {
err_code = VDP_STATUS_INVALID_POINTER;
goto quit;
}
switch (surface_rgba_format) {
case VDP_RGBA_FORMAT_B8G8R8A8:
case VDP_RGBA_FORMAT_R8G8B8A8:
case VDP_RGBA_FORMAT_R10G10B10A2:
case VDP_RGBA_FORMAT_B10G10R10A2:
case VDP_RGBA_FORMAT_A8:
*is_supported = 1; // All these formats should be supported by OpenGL
break; // implementation.
default:
*is_supported = 0;
break;
}
glx_ctx_push_thread_local(deviceData);
GLint max_texture_size;
glGetIntegerv(GL_MAX_TEXTURE_SIZE, &max_texture_size);
GLenum gl_error = glGetError();
glx_ctx_pop();
if (GL_NO_ERROR != gl_error) {
traceError("error (%s): gl error %d\n", __func__, gl_error);
err_code = VDP_STATUS_ERROR;
goto quit;
}
*max_width = max_texture_size;
*max_height = max_texture_size;
err_code = VDP_STATUS_OK;
quit:
handle_release(device);
return err_code;
}
libvdpau-va-gl-0.3.6/src/api-csc-matrix.c 0000664 0000000 0000000 00000003247 12665616006 0020124 0 ustar 00root root 0000000 0000000 /*
* Copyright 2013-2014 Rinat Ibragimov
*
* This file is part of libvdpau-va-gl
*
* libvdpau-va-gl is distributed under the terms of the LGPLv3. See COPYING for details.
*/
#include
VdpStatus
vdpGenerateCSCMatrix(VdpProcamp *procamp, VdpColorStandard standard, VdpCSCMatrix *csc_matrix)
{
if (!csc_matrix)
return VDP_STATUS_INVALID_POINTER;
if (procamp && VDP_PROCAMP_VERSION != procamp->struct_version)
return VDP_STATUS_INVALID_VALUE;
// TODO: do correct matricies calculation
VdpCSCMatrix *m = csc_matrix;
switch (standard) {
case VDP_COLOR_STANDARD_ITUR_BT_601:
(*m)[0][0] = 1.164f; (*m)[0][1] = 0.0f; (*m)[0][2] = 1.596f; (*m)[0][3] = -222.9f;
(*m)[1][0] = 1.164f; (*m)[1][1] = -0.392f; (*m)[1][2] = -0.813f; (*m)[1][3] = 135.6f;
(*m)[2][0] = 1.164f; (*m)[2][1] = 2.017f; (*m)[2][2] = 0.0f; (*m)[2][3] = -276.8f;
break;
case VDP_COLOR_STANDARD_ITUR_BT_709:
(*m)[0][0] = 1.0f; (*m)[0][1] = 0.0f; (*m)[0][2] = 1.402f; (*m)[0][3] = -179.4f;
(*m)[1][0] = 1.0f; (*m)[1][1] = -0.344f; (*m)[1][2] = -0.714f; (*m)[1][3] = 135.5f;
(*m)[2][0] = 1.0f; (*m)[2][1] = 1.772f; (*m)[2][2] = 0.0f; (*m)[2][3] = -226.8f;
break;
case VDP_COLOR_STANDARD_SMPTE_240M:
(*m)[0][0] = 0.581f; (*m)[0][1] = -0.764f; (*m)[0][2] = 1.576f; (*m)[0][3] = 0.0f;
(*m)[1][0] = 0.581f; (*m)[1][1] = -0.991f; (*m)[1][2] = -0.477f; (*m)[1][3] = 0.0f;
(*m)[2][0] = 0.581f; (*m)[2][1] = 1.062f; (*m)[2][2] = 0.000f; (*m)[2][3] = 0.0f;
break;
default:
return VDP_STATUS_INVALID_COLOR_STANDARD;
}
return VDP_STATUS_OK;
}
libvdpau-va-gl-0.3.6/src/api-device.c 0000664 0000000 0000000 00000046574 12665616006 0017323 0 ustar 00root root 0000000 0000000 /*
* Copyright 2013-2014 Rinat Ibragimov
*
* This file is part of libvdpau-va-gl
*
* libvdpau-va-gl is distributed under the terms of the LGPLv3. See COPYING for details.
*/
#define GL_GLEXT_PROTOTYPES
#include "ctx-stack.h"
#include
#include
#include "globals.h"
#include "api.h"
#include
#include "trace.h"
#include
#include
#include "watermark.h"
static char const *
implemetation_description_string = "OpenGL/VAAPI/libswscale backend for VDPAU";
void
print_handle_type(int handle, void *item, void *p)
{
VdpGenericData *gh = item;
struct {
int cnt;
int total_cnt;
VdpDeviceData *deviceData;
} *pp = p;
pp->total_cnt ++;
if (gh) {
if (pp->deviceData == gh->deviceData) {
traceError("handle %d type = %d\n", handle, gh->type);
pp->cnt ++;
}
}
}
static
void
destroy_child_objects(int handle, void *item, void *p)
{
const void *parent = p;
VdpGenericData *gh = item;
if (gh) {
if (parent == gh->deviceData) {
switch (gh->type) {
case HANDLETYPE_DEVICE:
// do nothing
break;
case HANDLETYPE_PRESENTATION_QUEUE_TARGET:
vdpPresentationQueueDestroy(handle);
break;
case HANDLETYPE_PRESENTATION_QUEUE:
vdpPresentationQueueDestroy(handle);
break;
case HANDLETYPE_VIDEO_MIXER:
vdpVideoMixerDestroy(handle);
break;
case HANDLETYPE_OUTPUT_SURFACE:
vdpOutputSurfaceDestroy(handle);
break;
case HANDLETYPE_VIDEO_SURFACE:
vdpVideoSurfaceDestroy(handle);
break;
case HANDLETYPE_BITMAP_SURFACE:
vdpBitmapSurfaceDestroy(handle);
break;
case HANDLETYPE_DECODER:
vdpDecoderDestroy(handle);
break;
default:
traceError("warning (%s): unknown handle type %d\n", __func__, gh->type);
break;
}
}
}
}
static
VdpStatus
compile_shaders(VdpDeviceData *deviceData)
{
VdpStatus retval = VDP_STATUS_ERROR;
for (int k = 0; k < SHADER_COUNT; k ++) {
struct shader_s *s = &glsl_shaders[k];
GLint errmsg_len;
GLuint f_shader, program;
int ok;
f_shader = glCreateShader(GL_FRAGMENT_SHADER);
glShaderSource(f_shader, 1, &s->body, &s->len);
glCompileShader(f_shader);
glGetShaderiv(f_shader, GL_COMPILE_STATUS, &ok);
if (!ok) {
glGetShaderiv(f_shader, GL_INFO_LOG_LENGTH, &errmsg_len);
char *errmsg = malloc(errmsg_len);
glGetShaderInfoLog(f_shader, errmsg_len, NULL, errmsg);
traceError("error (%s): compilation of shader #%d failed with '%s'\n", __func__, k,
errmsg);
free(errmsg);
glDeleteShader(f_shader);
goto err;
}
program = glCreateProgram();
glAttachShader(program, f_shader);
glLinkProgram(program);
glGetProgramiv(program, GL_LINK_STATUS, &ok);
if (!ok) {
glGetProgramiv(program, GL_INFO_LOG_LENGTH, &errmsg_len);
char *errmsg = malloc(errmsg_len);
glGetProgramInfoLog(program, errmsg_len, NULL, errmsg);
traceError("error (%s): linking of shader #%d failed with '%s'\n", __func__, k,
errmsg);
free(errmsg);
glDeleteProgram(program);
glDeleteShader(f_shader);
goto err;
}
deviceData->shaders[k].f_shader = f_shader;
deviceData->shaders[k].program = program;
switch (k) {
case glsl_YV12_RGBA:
case glsl_NV12_RGBA:
deviceData->shaders[k].uniform.tex_0 = glGetUniformLocation(program, "tex[0]");
deviceData->shaders[k].uniform.tex_1 = glGetUniformLocation(program, "tex[1]");
break;
case glsl_red_to_alpha_swizzle:
deviceData->shaders[k].uniform.tex_0 = glGetUniformLocation(program, "tex_0");
break;
}
}
retval = VDP_STATUS_OK;
err:
return retval;
}
static
void
destroy_shaders(VdpDeviceData *deviceData)
{
for (int k = 0; k < SHADER_COUNT; k ++) {
glDeleteProgram(deviceData->shaders[k].program);
glDeleteShader(deviceData->shaders[k].f_shader);
}
}
VdpStatus
vdpDeviceCreateX11(Display *display_orig, int screen, VdpDevice *device,
VdpGetProcAddress **get_proc_address)
{
if (!display_orig || !device)
return VDP_STATUS_INVALID_POINTER;
// Let's get own connection to the X server
Display *display = handle_xdpy_ref(display_orig);
if (NULL == display)
return VDP_STATUS_ERROR;
if (global.quirks.buggy_XCloseDisplay) {
// XCloseDisplay could segfault on fglrx. To avoid calling XCloseDisplay,
// make one more reference to xdpy copy.
handle_xdpy_ref(display_orig);
}
VdpDeviceData *data = calloc(1, sizeof(VdpDeviceData));
if (NULL == data)
return VDP_STATUS_RESOURCES;
glx_ctx_lock(); // use glx lock to serialize X calls
data->type = HANDLETYPE_DEVICE;
data->display = display;
data->display_orig = display_orig; // save supplied pointer too
data->screen = screen;
data->refcount = 0;
pthread_mutex_init(&data->refcount_mutex, NULL);
data->root = DefaultRootWindow(display);
XWindowAttributes wnd_attrs;
XGetWindowAttributes(display, data->root, &wnd_attrs);
data->color_depth = wnd_attrs.depth;
data->fn.glXBindTexImageEXT =
(PFNGLXBINDTEXIMAGEEXTPROC)glXGetProcAddress((GLubyte *)"glXBindTexImageEXT");
data->fn.glXReleaseTexImageEXT =
(PFNGLXRELEASETEXIMAGEEXTPROC)glXGetProcAddress((GLubyte *)"glXReleaseTexImageEXT");
glx_ctx_unlock();
if (!data->fn.glXBindTexImageEXT || !data->fn.glXReleaseTexImageEXT) {
traceError("error (%s): can't get glXBindTexImageEXT address\n");
free(data);
return VDP_STATUS_RESOURCES;
}
// create master GLX context to share data between further created ones
glx_ctx_ref_glc_hash_table(display, screen);
data->root_glc = glx_ctx_get_root_context();
glx_ctx_push_thread_local(data);
glClearColor(0.0f, 0.0f, 0.0f, 0.0f);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
// initialize VAAPI
if (global.quirks.avoid_va) {
// pretend there is no VA-API available
data->va_available = 0;
} else {
data->va_dpy = vaGetDisplay(display);
data->va_available = 0;
VAStatus status = vaInitialize(data->va_dpy, &data->va_version_major,
&data->va_version_minor);
if (VA_STATUS_SUCCESS == status) {
data->va_available = 1;
traceInfo("libva (version %d.%d) library initialized\n",
data->va_version_major, data->va_version_minor);
} else {
data->va_available = 0;
traceInfo("warning: failed to initialize libva. "
"No video decode acceleration available.\n");
}
}
compile_shaders(data);
glGenTextures(1, &data->watermark_tex_id);
glBindTexture(GL_TEXTURE_2D, data->watermark_tex_id);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, watermark_width, watermark_height, 0, GL_BGRA,
GL_UNSIGNED_BYTE, watermark_data);
glFinish();
*device = handle_insert(data);
if (get_proc_address)
*get_proc_address = &vdpGetProcAddress;
GLenum gl_error = glGetError();
glx_ctx_pop();
if (GL_NO_ERROR != gl_error) {
traceError("error (%s): gl error %d\n", __func__, gl_error);
return VDP_STATUS_ERROR;
}
return VDP_STATUS_OK;
}
VdpStatus
vdpDeviceDestroy(VdpDevice device)
{
VdpStatus err_code;
VdpDeviceData *data = handle_acquire(device, HANDLETYPE_DEVICE);
if (NULL == data)
return VDP_STATUS_INVALID_HANDLE;
if (0 != data->refcount) {
// Buggy client forgot to destroy dependend objects or decided that destroying
// VdpDevice destroys all child object. Let's try to mitigate and prevent leakage.
traceError("warning (%s): non-zero reference count (%d). Trying to free child objects.\n",
__func__, data->refcount);
void *parent_object = data;
handle_execute_for_all(destroy_child_objects, parent_object);
}
if (0 != data->refcount) {
traceError("error (%s): still non-zero reference count (%d)\n", __func__, data->refcount);
traceError("Here is the list of objects:\n");
struct {
int cnt;
int total_cnt;
VdpDeviceData *deviceData;
} state = { .cnt = 0, .total_cnt = 0, .deviceData = data };
handle_execute_for_all(print_handle_type, &state);
traceError("Objects leaked: %d\n", state.cnt);
traceError("Objects visited during scan: %d\n", state.total_cnt);
err_code = VDP_STATUS_ERROR;
goto quit;
}
// cleaup libva
if (data->va_available)
vaTerminate(data->va_dpy);
glx_ctx_push_thread_local(data);
glDeleteTextures(1, &data->watermark_tex_id);
glBindFramebuffer(GL_FRAMEBUFFER, 0);
destroy_shaders(data);
glx_ctx_pop();
glx_ctx_lock();
glXMakeCurrent(data->display, None, NULL);
glx_ctx_unlock();
glx_ctx_unref_glc_hash_table(data->display);
handle_xdpy_unref(data->display_orig);
handle_expunge(device);
pthread_mutex_destroy(&data->refcount_mutex);
free(data);
GLenum gl_error = glGetError();
if (GL_NO_ERROR != gl_error) {
traceError("error (%s): gl error %d\n", __func__, gl_error);
err_code = VDP_STATUS_ERROR;
goto quit_skip_release;
}
return VDP_STATUS_OK;
quit:
handle_release(device);
quit_skip_release:
return err_code;
}
VdpStatus
vdpGetApiVersion(uint32_t *api_version)
{
if (!api_version)
return VDP_STATUS_INVALID_POINTER;
*api_version = VDPAU_VERSION;
return VDP_STATUS_OK;
}
static
const char *
vdpGetErrorString(VdpStatus status)
{
return reverse_status(status);
}
VdpStatus
vdpGetInformationString(char const **information_string)
{
if (!information_string)
return VDP_STATUS_INVALID_POINTER;
*information_string = implemetation_description_string;
return VDP_STATUS_OK;
}
VdpStatus
vdpGetProcAddress(VdpDevice device, VdpFuncId function_id, void **function_pointer)
{
(void)device; // there is no difference between various devices. All have same procedures
if (!function_pointer)
return VDP_STATUS_INVALID_POINTER;
switch (function_id) {
case VDP_FUNC_ID_GET_ERROR_STRING:
*function_pointer = &vdpGetErrorString;
break;
case VDP_FUNC_ID_GET_PROC_ADDRESS:
*function_pointer = &vdpGetProcAddress;
break;
case VDP_FUNC_ID_GET_API_VERSION:
*function_pointer = &traceVdpGetApiVersion;
break;
case VDP_FUNC_ID_GET_INFORMATION_STRING:
*function_pointer = &traceVdpGetInformationString;
break;
case VDP_FUNC_ID_DEVICE_DESTROY:
*function_pointer = &traceVdpDeviceDestroy;
break;
case VDP_FUNC_ID_GENERATE_CSC_MATRIX:
*function_pointer = &traceVdpGenerateCSCMatrix;
break;
case VDP_FUNC_ID_VIDEO_SURFACE_QUERY_CAPABILITIES:
*function_pointer = &traceVdpVideoSurfaceQueryCapabilities;
break;
case VDP_FUNC_ID_VIDEO_SURFACE_QUERY_GET_PUT_BITS_Y_CB_CR_CAPABILITIES:
*function_pointer = &traceVdpVideoSurfaceQueryGetPutBitsYCbCrCapabilities;
break;
case VDP_FUNC_ID_VIDEO_SURFACE_CREATE:
*function_pointer = &traceVdpVideoSurfaceCreate;
break;
case VDP_FUNC_ID_VIDEO_SURFACE_DESTROY:
*function_pointer = &traceVdpVideoSurfaceDestroy;
break;
case VDP_FUNC_ID_VIDEO_SURFACE_GET_PARAMETERS:
*function_pointer = &traceVdpVideoSurfaceGetParameters;
break;
case VDP_FUNC_ID_VIDEO_SURFACE_GET_BITS_Y_CB_CR:
*function_pointer = &traceVdpVideoSurfaceGetBitsYCbCr;
break;
case VDP_FUNC_ID_VIDEO_SURFACE_PUT_BITS_Y_CB_CR:
*function_pointer = &traceVdpVideoSurfacePutBitsYCbCr;
break;
case VDP_FUNC_ID_OUTPUT_SURFACE_QUERY_CAPABILITIES:
*function_pointer = &traceVdpOutputSurfaceQueryCapabilities;
break;
case VDP_FUNC_ID_OUTPUT_SURFACE_QUERY_GET_PUT_BITS_NATIVE_CAPABILITIES:
*function_pointer = &traceVdpOutputSurfaceQueryGetPutBitsNativeCapabilities;
break;
case VDP_FUNC_ID_OUTPUT_SURFACE_QUERY_PUT_BITS_INDEXED_CAPABILITIES:
*function_pointer = &traceVdpOutputSurfaceQueryPutBitsIndexedCapabilities;
break;
case VDP_FUNC_ID_OUTPUT_SURFACE_QUERY_PUT_BITS_Y_CB_CR_CAPABILITIES:
*function_pointer = &traceVdpOutputSurfaceQueryPutBitsYCbCrCapabilities;
break;
case VDP_FUNC_ID_OUTPUT_SURFACE_CREATE:
*function_pointer = &traceVdpOutputSurfaceCreate;
break;
case VDP_FUNC_ID_OUTPUT_SURFACE_DESTROY:
*function_pointer = &traceVdpOutputSurfaceDestroy;
break;
case VDP_FUNC_ID_OUTPUT_SURFACE_GET_PARAMETERS:
*function_pointer = &traceVdpOutputSurfaceGetParameters;
break;
case VDP_FUNC_ID_OUTPUT_SURFACE_GET_BITS_NATIVE:
*function_pointer = &traceVdpOutputSurfaceGetBitsNative;
break;
case VDP_FUNC_ID_OUTPUT_SURFACE_PUT_BITS_NATIVE:
*function_pointer = &traceVdpOutputSurfacePutBitsNative;
break;
case VDP_FUNC_ID_OUTPUT_SURFACE_PUT_BITS_INDEXED:
*function_pointer = &traceVdpOutputSurfacePutBitsIndexed;
break;
case VDP_FUNC_ID_OUTPUT_SURFACE_PUT_BITS_Y_CB_CR:
*function_pointer = &traceVdpOutputSurfacePutBitsYCbCr;
break;
case VDP_FUNC_ID_BITMAP_SURFACE_QUERY_CAPABILITIES:
*function_pointer = &traceVdpBitmapSurfaceQueryCapabilities;
break;
case VDP_FUNC_ID_BITMAP_SURFACE_CREATE:
*function_pointer = &traceVdpBitmapSurfaceCreate;
break;
case VDP_FUNC_ID_BITMAP_SURFACE_DESTROY:
*function_pointer = &traceVdpBitmapSurfaceDestroy;
break;
case VDP_FUNC_ID_BITMAP_SURFACE_GET_PARAMETERS:
*function_pointer = &traceVdpBitmapSurfaceGetParameters;
break;
case VDP_FUNC_ID_BITMAP_SURFACE_PUT_BITS_NATIVE:
*function_pointer = &traceVdpBitmapSurfacePutBitsNative;
break;
case VDP_FUNC_ID_OUTPUT_SURFACE_RENDER_OUTPUT_SURFACE:
*function_pointer = &traceVdpOutputSurfaceRenderOutputSurface;
break;
case VDP_FUNC_ID_OUTPUT_SURFACE_RENDER_BITMAP_SURFACE:
*function_pointer = &traceVdpOutputSurfaceRenderBitmapSurface;
break;
case VDP_FUNC_ID_OUTPUT_SURFACE_RENDER_VIDEO_SURFACE_LUMA:
// *function_pointer = &traceVdpOutputSurfaceRenderVideoSurfaceLuma;
*function_pointer = NULL;
break;
case VDP_FUNC_ID_DECODER_QUERY_CAPABILITIES:
*function_pointer = &traceVdpDecoderQueryCapabilities;
break;
case VDP_FUNC_ID_DECODER_CREATE:
*function_pointer = &traceVdpDecoderCreate;
break;
case VDP_FUNC_ID_DECODER_DESTROY:
*function_pointer = &traceVdpDecoderDestroy;
break;
case VDP_FUNC_ID_DECODER_GET_PARAMETERS:
*function_pointer = &traceVdpDecoderGetParameters;
break;
case VDP_FUNC_ID_DECODER_RENDER:
*function_pointer = &traceVdpDecoderRender;
break;
case VDP_FUNC_ID_VIDEO_MIXER_QUERY_FEATURE_SUPPORT:
*function_pointer = &traceVdpVideoMixerQueryFeatureSupport;
break;
case VDP_FUNC_ID_VIDEO_MIXER_QUERY_PARAMETER_SUPPORT:
*function_pointer = &traceVdpVideoMixerQueryParameterSupport;
break;
case VDP_FUNC_ID_VIDEO_MIXER_QUERY_ATTRIBUTE_SUPPORT:
*function_pointer = &traceVdpVideoMixerQueryAttributeSupport;
break;
case VDP_FUNC_ID_VIDEO_MIXER_QUERY_PARAMETER_VALUE_RANGE:
*function_pointer = &traceVdpVideoMixerQueryParameterValueRange;
break;
case VDP_FUNC_ID_VIDEO_MIXER_QUERY_ATTRIBUTE_VALUE_RANGE:
*function_pointer = &traceVdpVideoMixerQueryAttributeValueRange;
break;
case VDP_FUNC_ID_VIDEO_MIXER_CREATE:
*function_pointer = &traceVdpVideoMixerCreate;
break;
case VDP_FUNC_ID_VIDEO_MIXER_SET_FEATURE_ENABLES:
*function_pointer = &traceVdpVideoMixerSetFeatureEnables;
break;
case VDP_FUNC_ID_VIDEO_MIXER_SET_ATTRIBUTE_VALUES:
*function_pointer = &traceVdpVideoMixerSetAttributeValues;
break;
case VDP_FUNC_ID_VIDEO_MIXER_GET_FEATURE_SUPPORT:
*function_pointer = &traceVdpVideoMixerGetFeatureSupport;
break;
case VDP_FUNC_ID_VIDEO_MIXER_GET_FEATURE_ENABLES:
*function_pointer = &traceVdpVideoMixerGetFeatureEnables;
break;
case VDP_FUNC_ID_VIDEO_MIXER_GET_PARAMETER_VALUES:
*function_pointer = &traceVdpVideoMixerGetParameterValues;
break;
case VDP_FUNC_ID_VIDEO_MIXER_GET_ATTRIBUTE_VALUES:
*function_pointer = &traceVdpVideoMixerGetAttributeValues;
break;
case VDP_FUNC_ID_VIDEO_MIXER_DESTROY:
*function_pointer = &traceVdpVideoMixerDestroy;
break;
case VDP_FUNC_ID_VIDEO_MIXER_RENDER:
*function_pointer = &traceVdpVideoMixerRender;
break;
case VDP_FUNC_ID_PRESENTATION_QUEUE_TARGET_DESTROY:
*function_pointer = &traceVdpPresentationQueueTargetDestroy;
break;
case VDP_FUNC_ID_PRESENTATION_QUEUE_CREATE:
*function_pointer = &traceVdpPresentationQueueCreate;
break;
case VDP_FUNC_ID_PRESENTATION_QUEUE_DESTROY:
*function_pointer = &traceVdpPresentationQueueDestroy;
break;
case VDP_FUNC_ID_PRESENTATION_QUEUE_SET_BACKGROUND_COLOR:
*function_pointer = &traceVdpPresentationQueueSetBackgroundColor;
break;
case VDP_FUNC_ID_PRESENTATION_QUEUE_GET_BACKGROUND_COLOR:
*function_pointer = &traceVdpPresentationQueueGetBackgroundColor;
break;
case VDP_FUNC_ID_PRESENTATION_QUEUE_GET_TIME:
*function_pointer = &traceVdpPresentationQueueGetTime;
break;
case VDP_FUNC_ID_PRESENTATION_QUEUE_DISPLAY:
*function_pointer = &traceVdpPresentationQueueDisplay;
break;
case VDP_FUNC_ID_PRESENTATION_QUEUE_BLOCK_UNTIL_SURFACE_IDLE:
*function_pointer = &traceVdpPresentationQueueBlockUntilSurfaceIdle;
break;
case VDP_FUNC_ID_PRESENTATION_QUEUE_QUERY_SURFACE_STATUS:
*function_pointer = &traceVdpPresentationQueueQuerySurfaceStatus;
break;
case VDP_FUNC_ID_PREEMPTION_CALLBACK_REGISTER:
*function_pointer = &traceVdpPreemptionCallbackRegister;
break;
case VDP_FUNC_ID_BASE_WINSYS:
*function_pointer = &traceVdpPresentationQueueTargetCreateX11;
break;
default:
*function_pointer = NULL;
break;
} // switch
if (NULL == *function_pointer)
return VDP_STATUS_INVALID_FUNC_ID;
return VDP_STATUS_OK;
}
VdpStatus
vdpPreemptionCallbackRegister(VdpDevice device, VdpPreemptionCallback callback, void *context)
{
(void)device; (void)callback; (void)context;
return VDP_STATUS_OK;
}
libvdpau-va-gl-0.3.6/src/api-output-surface.c 0000664 0000000 0000000 00000070522 12665616006 0021040 0 ustar 00root root 0000000 0000000 /*
* Copyright 2013-2014 Rinat Ibragimov
*
* This file is part of libvdpau-va-gl
*
* libvdpau-va-gl is distributed under the terms of the LGPLv3. See COPYING for details.
*/
#define GL_GLEXT_PROTOTYPES
#include "ctx-stack.h"
#include
#include
#include
#include "trace.h"
#include
struct blend_state_struct {
GLuint srcFuncRGB;
GLuint srcFuncAlpha;
GLuint dstFuncRGB;
GLuint dstFuncAlpha;
GLuint modeRGB;
GLuint modeAlpha;
int invalid_func;
int invalid_eq;
};
static
GLuint
vdpBlendFuncToGLBlendFunc(VdpOutputSurfaceRenderBlendFactor blend_factor)
{
switch (blend_factor) {
case VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ZERO:
return GL_ZERO;
case VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ONE:
return GL_ONE;
case VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_SRC_COLOR:
return GL_SRC_COLOR;
case VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ONE_MINUS_SRC_COLOR:
return GL_ONE_MINUS_SRC_COLOR;
case VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_SRC_ALPHA:
return GL_SRC_ALPHA;
case VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA:
return GL_ONE_MINUS_SRC_ALPHA;
case VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_DST_ALPHA:
return GL_DST_ALPHA;
case VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ONE_MINUS_DST_ALPHA:
return GL_ONE_MINUS_DST_ALPHA;
case VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_DST_COLOR:
return GL_DST_COLOR;
case VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ONE_MINUS_DST_COLOR:
return GL_ONE_MINUS_DST_COLOR;
case VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_SRC_ALPHA_SATURATE:
return GL_SRC_ALPHA_SATURATE;
case VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_CONSTANT_COLOR:
return GL_CONSTANT_COLOR;
case VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR:
return GL_ONE_MINUS_CONSTANT_COLOR;
case VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_CONSTANT_ALPHA:
return GL_CONSTANT_ALPHA;
case VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA:
return GL_ONE_MINUS_CONSTANT_ALPHA;
default:
return GL_INVALID_VALUE;
}
}
static
GLenum
vdpBlendEquationToGLEquation(VdpOutputSurfaceRenderBlendEquation blend_equation)
{
switch (blend_equation) {
case VDP_OUTPUT_SURFACE_RENDER_BLEND_EQUATION_SUBTRACT:
return GL_FUNC_SUBTRACT;
case VDP_OUTPUT_SURFACE_RENDER_BLEND_EQUATION_REVERSE_SUBTRACT:
return GL_FUNC_REVERSE_SUBTRACT;
case VDP_OUTPUT_SURFACE_RENDER_BLEND_EQUATION_ADD:
return GL_FUNC_ADD;
case VDP_OUTPUT_SURFACE_RENDER_BLEND_EQUATION_MIN:
return GL_MIN;
case VDP_OUTPUT_SURFACE_RENDER_BLEND_EQUATION_MAX:
return GL_MAX;
default:
return GL_INVALID_VALUE;
}
}
static
void
compose_surfaces(struct blend_state_struct bs, VdpRect srcRect, VdpRect dstRect,
VdpColor const *colors, int flags, int has_src_surf)
{
glBlendFuncSeparate(bs.srcFuncRGB, bs.dstFuncRGB, bs.srcFuncAlpha, bs.dstFuncAlpha);
glBlendEquationSeparate(bs.modeRGB, bs.modeAlpha);
glColor4f(1, 1, 1, 1);
glBegin(GL_QUADS);
if (has_src_surf) {
switch (flags & 3) {
case VDP_OUTPUT_SURFACE_RENDER_ROTATE_0:
glTexCoord2i(srcRect.x0, srcRect.y0); break;
case VDP_OUTPUT_SURFACE_RENDER_ROTATE_90:
glTexCoord2i(srcRect.x0, srcRect.y1); break;
case VDP_OUTPUT_SURFACE_RENDER_ROTATE_180:
glTexCoord2i(srcRect.x1, srcRect.y1); break;
case VDP_OUTPUT_SURFACE_RENDER_ROTATE_270:
glTexCoord2i(srcRect.x1, srcRect.y0); break;
}
}
if (colors)
glColor4f(colors[0].red, colors[0].green, colors[0].blue, colors[0].alpha);
glVertex2f(dstRect.x0, dstRect.y0);
if (has_src_surf) {
switch (flags & 3) {
case VDP_OUTPUT_SURFACE_RENDER_ROTATE_0:
glTexCoord2i(srcRect.x1, srcRect.y0); break;
case VDP_OUTPUT_SURFACE_RENDER_ROTATE_90:
glTexCoord2i(srcRect.x0, srcRect.y0); break;
case VDP_OUTPUT_SURFACE_RENDER_ROTATE_180:
glTexCoord2i(srcRect.x0, srcRect.y1); break;
case VDP_OUTPUT_SURFACE_RENDER_ROTATE_270:
glTexCoord2i(srcRect.x1, srcRect.y1); break;
}
}
if (colors && (flags & VDP_OUTPUT_SURFACE_RENDER_COLOR_PER_VERTEX))
glColor4f(colors[1].red, colors[1].green, colors[1].blue, colors[1].alpha);
glVertex2f(dstRect.x1, dstRect.y0);
if (has_src_surf) {
switch (flags & 3) {
case VDP_OUTPUT_SURFACE_RENDER_ROTATE_0:
glTexCoord2i(srcRect.x1, srcRect.y1); break;
case VDP_OUTPUT_SURFACE_RENDER_ROTATE_90:
glTexCoord2i(srcRect.x1, srcRect.y0); break;
case VDP_OUTPUT_SURFACE_RENDER_ROTATE_180:
glTexCoord2i(srcRect.x0, srcRect.y0); break;
case VDP_OUTPUT_SURFACE_RENDER_ROTATE_270:
glTexCoord2i(srcRect.x0, srcRect.y1); break;
}
}
if (colors && (flags & VDP_OUTPUT_SURFACE_RENDER_COLOR_PER_VERTEX))
glColor4f(colors[2].red, colors[2].green, colors[2].blue, colors[2].alpha);
glVertex2f(dstRect.x1, dstRect.y1);
if (has_src_surf) {
switch (flags & 3) {
case VDP_OUTPUT_SURFACE_RENDER_ROTATE_0:
glTexCoord2i(srcRect.x0, srcRect.y1); break;
case VDP_OUTPUT_SURFACE_RENDER_ROTATE_90:
glTexCoord2i(srcRect.x1, srcRect.y1); break;
case VDP_OUTPUT_SURFACE_RENDER_ROTATE_180:
glTexCoord2i(srcRect.x1, srcRect.y0); break;
case VDP_OUTPUT_SURFACE_RENDER_ROTATE_270:
glTexCoord2i(srcRect.x0, srcRect.y0); break;
}
}
if (colors && (flags & VDP_OUTPUT_SURFACE_RENDER_COLOR_PER_VERTEX))
glColor4f(colors[3].red, colors[3].green, colors[3].blue, colors[3].alpha);
glVertex2f(dstRect.x0, dstRect.y1);
glEnd();
glColor4f(1, 1, 1, 1);
}
static
struct blend_state_struct
vdpBlendStateToGLBlendState(VdpOutputSurfaceRenderBlendState const *blend_state)
{
struct blend_state_struct bs;
bs.invalid_func = 0;
bs.invalid_eq = 0;
// it's ok to pass NULL as blend_state
if (blend_state) {
bs.srcFuncRGB = vdpBlendFuncToGLBlendFunc(blend_state->blend_factor_source_color);
bs.srcFuncAlpha = vdpBlendFuncToGLBlendFunc(blend_state->blend_factor_source_alpha);
bs.dstFuncRGB = vdpBlendFuncToGLBlendFunc(blend_state->blend_factor_destination_color);
bs.dstFuncAlpha = vdpBlendFuncToGLBlendFunc(blend_state->blend_factor_destination_alpha);
} else {
bs.srcFuncRGB = bs.srcFuncAlpha = GL_ONE;
bs.dstFuncRGB = bs.dstFuncAlpha = GL_ZERO;
}
if (GL_INVALID_VALUE == bs.srcFuncRGB || GL_INVALID_VALUE == bs.srcFuncAlpha ||
GL_INVALID_VALUE == bs.dstFuncRGB || GL_INVALID_VALUE == bs.dstFuncAlpha)
{
bs.invalid_func = 1;
}
if (blend_state) {
bs.modeRGB = vdpBlendEquationToGLEquation(blend_state->blend_equation_color);
bs.modeAlpha = vdpBlendEquationToGLEquation(blend_state->blend_equation_alpha);
} else {
bs.modeRGB = bs.modeAlpha = GL_FUNC_ADD;
}
if (GL_INVALID_VALUE == bs.modeRGB || GL_INVALID_VALUE == bs.modeAlpha)
bs.invalid_eq = 1;
return bs;
}
VdpStatus
vdpOutputSurfaceCreate(VdpDevice device, VdpRGBAFormat rgba_format, uint32_t width,
uint32_t height, VdpOutputSurface *surface)
{
VdpStatus err_code;
if (!surface)
return VDP_STATUS_INVALID_POINTER;
VdpDeviceData *deviceData = handle_acquire(device, HANDLETYPE_DEVICE);
if (NULL == deviceData)
return VDP_STATUS_INVALID_HANDLE;
//TODO: figure out reasonable limits
if (width > 4096 || height > 4096) {
err_code = VDP_STATUS_INVALID_SIZE;
goto quit;
}
VdpOutputSurfaceData *data = calloc(1, sizeof(VdpOutputSurfaceData));
if (NULL == data) {
err_code = VDP_STATUS_RESOURCES;
goto quit;
}
switch (rgba_format) {
case VDP_RGBA_FORMAT_B8G8R8A8:
data->gl_internal_format = GL_RGBA;
data->gl_format = GL_BGRA;
data->gl_type = GL_UNSIGNED_BYTE;
data->bytes_per_pixel = 4;
break;
case VDP_RGBA_FORMAT_R8G8B8A8:
data->gl_internal_format = GL_RGBA;
data->gl_format = GL_RGBA;
data->gl_type = GL_UNSIGNED_BYTE;
data->bytes_per_pixel = 4;
break;
case VDP_RGBA_FORMAT_R10G10B10A2:
data->gl_internal_format = GL_RGB10_A2;
data->gl_format = GL_RGBA;
data->gl_type = GL_UNSIGNED_INT_10_10_10_2;
data->bytes_per_pixel = 4;
break;
case VDP_RGBA_FORMAT_B10G10R10A2:
data->gl_internal_format = GL_RGB10_A2;
data->gl_format = GL_BGRA;
data->gl_type = GL_UNSIGNED_INT_10_10_10_2;
data->bytes_per_pixel = 4;
break;
case VDP_RGBA_FORMAT_A8:
data->gl_internal_format = GL_RGBA;
data->gl_format = GL_RED;
data->gl_type = GL_UNSIGNED_BYTE;
data->bytes_per_pixel = 1;
break;
default:
traceError("error (%s): %s is not implemented\n", __func__,
reverse_rgba_format(rgba_format));
free(data);
err_code = VDP_STATUS_INVALID_RGBA_FORMAT;
goto quit;
}
data->type = HANDLETYPE_OUTPUT_SURFACE;
data->width = width;
data->height = height;
data->device = device;
data->deviceData = deviceData;
data->rgba_format = rgba_format;
glx_ctx_push_thread_local(deviceData);
glGenTextures(1, &data->tex_id);
glBindTexture(GL_TEXTURE_2D, data->tex_id);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
// reserve texture
glTexImage2D(GL_TEXTURE_2D, 0, data->gl_internal_format, width, height, 0, data->gl_format,
data->gl_type, NULL);
glGenFramebuffers(1, &data->fbo_id);
glBindFramebuffer(GL_FRAMEBUFFER, data->fbo_id);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, data->tex_id, 0);
GLenum gl_status = glCheckFramebufferStatus(GL_FRAMEBUFFER);
if (GL_FRAMEBUFFER_COMPLETE != gl_status) {
traceError("error (%s): framebuffer not ready, %d, %s\n", __func__, gl_status,
gluErrorString(gl_status));
glx_ctx_pop();
free(data);
err_code = VDP_STATUS_ERROR;
goto quit;
}
glClearColor(0.0, 0.0, 0.0, 0.0);
glClear(GL_COLOR_BUFFER_BIT);
glFinish();
GLenum gl_error = glGetError();
glx_ctx_pop();
if (GL_NO_ERROR != gl_error) {
traceError("error (%s): gl error %d\n", __func__, gl_error);
free(data);
err_code = VDP_STATUS_ERROR;
goto quit;
}
ref_device(deviceData);
*surface = handle_insert(data);
err_code = VDP_STATUS_OK;
quit:
handle_release(device);
return err_code;
}
VdpStatus
vdpOutputSurfaceDestroy(VdpOutputSurface surface)
{
VdpStatus err_code;
VdpOutputSurfaceData *data = handle_acquire(surface, HANDLETYPE_OUTPUT_SURFACE);
if (NULL == data)
return VDP_STATUS_INVALID_HANDLE;
VdpDeviceData *deviceData = data->deviceData;
glx_ctx_push_thread_local(deviceData);
glDeleteTextures(1, &data->tex_id);
glDeleteFramebuffers(1, &data->fbo_id);
GLenum gl_error = glGetError();
glx_ctx_pop();
if (GL_NO_ERROR != gl_error) {
traceError("error (%s): gl error %d\n", __func__, gl_error);
err_code = VDP_STATUS_ERROR;
goto quit;
}
handle_expunge(surface);
unref_device(deviceData);
free(data);
return VDP_STATUS_OK;
quit:
handle_release(surface);
return err_code;
}
VdpStatus
vdpOutputSurfaceGetBitsNative(VdpOutputSurface surface, VdpRect const *source_rect,
void *const *destination_data, uint32_t const *destination_pitches)
{
VdpStatus err_code;
if (!destination_data || !destination_pitches)
return VDP_STATUS_INVALID_POINTER;
VdpOutputSurfaceData *srcSurfData = handle_acquire(surface, HANDLETYPE_OUTPUT_SURFACE);
if (NULL == srcSurfData)
return VDP_STATUS_INVALID_HANDLE;
VdpDeviceData *deviceData = srcSurfData->deviceData;
VdpRect srcRect = {0, 0, srcSurfData->width, srcSurfData->height};
if (source_rect)
srcRect = *source_rect;
glx_ctx_push_thread_local(deviceData);
glBindFramebuffer(GL_FRAMEBUFFER, srcSurfData->fbo_id);
glReadBuffer(GL_COLOR_ATTACHMENT0);
glPixelStorei(GL_UNPACK_ROW_LENGTH, destination_pitches[0] / srcSurfData->bytes_per_pixel);
if (4 != srcSurfData->bytes_per_pixel)
glPixelStorei(GL_PACK_ALIGNMENT, 1);
glReadPixels(srcRect.x0, srcRect.y0, srcRect.x1 - srcRect.x0, srcRect.y1 - srcRect.y0,
srcSurfData->gl_format, srcSurfData->gl_type, destination_data[0]);
glPixelStorei(GL_UNPACK_ROW_LENGTH, 0);
if (4 != srcSurfData->bytes_per_pixel)
glPixelStorei(GL_PACK_ALIGNMENT, 4);
glFinish();
GLenum gl_error = glGetError();
glx_ctx_pop();
if (GL_NO_ERROR != gl_error) {
traceError("error (%s): gl error %d\n", __func__, gl_error);
err_code = VDP_STATUS_ERROR;
goto quit;
}
err_code = VDP_STATUS_OK;
quit:
handle_release(surface);
return err_code;
}
VdpStatus
vdpOutputSurfaceGetParameters(VdpOutputSurface surface, VdpRGBAFormat *rgba_format,
uint32_t *width, uint32_t *height)
{
if (!rgba_format || !width || !height)
return VDP_STATUS_INVALID_POINTER;
VdpOutputSurfaceData *surfData = handle_acquire(surface, HANDLETYPE_OUTPUT_SURFACE);
if (NULL == surfData)
return VDP_STATUS_INVALID_HANDLE;
// TODO: check surfData validity again
*rgba_format = surfData->rgba_format;
*width = surfData->width;
*height = surfData->height;
handle_release(surface);
return VDP_STATUS_OK;
}
VdpStatus
vdpOutputSurfacePutBitsIndexed(VdpOutputSurface surface, VdpIndexedFormat source_indexed_format,
void const *const *source_data, uint32_t const *source_pitch,
VdpRect const *destination_rect,
VdpColorTableFormat color_table_format, void const *color_table)
{
VdpStatus err_code;
if (!source_data || !source_pitch || !color_table)
return VDP_STATUS_INVALID_POINTER;
VdpOutputSurfaceData *surfData = handle_acquire(surface, HANDLETYPE_OUTPUT_SURFACE);
if (NULL == surfData)
return VDP_STATUS_INVALID_HANDLE;
VdpDeviceData *deviceData = surfData->deviceData;
VdpRect dstRect = {0, 0, surfData->width, surfData->height};
if (destination_rect)
dstRect = *destination_rect;
// there is no other formats anyway
if (VDP_COLOR_TABLE_FORMAT_B8G8R8X8 != color_table_format) {
err_code = VDP_STATUS_INVALID_COLOR_TABLE_FORMAT;
goto quit;
}
const uint32_t *color_table32 = color_table;
glx_ctx_push_thread_local(deviceData);
switch (source_indexed_format) {
case VDP_INDEXED_FORMAT_I8A8:
// TODO: use shader?
do {
const uint32_t dstRectWidth = dstRect.x1 - dstRect.x0;
const uint32_t dstRectHeight = dstRect.y1 - dstRect.y0;
uint32_t *unpacked_buf = malloc(4 * dstRectWidth * dstRectHeight);
if (NULL == unpacked_buf) {
err_code = VDP_STATUS_RESOURCES;
goto quit;
}
for (unsigned int y = 0; y < dstRectHeight; y ++) {
const uint8_t *src_ptr = source_data[0];
src_ptr += y * source_pitch[0];
uint32_t *dst_ptr = unpacked_buf + y * dstRectWidth;
for (unsigned int x = 0; x < dstRectWidth; x ++) {
const uint8_t i = *src_ptr++;
const uint32_t a = (*src_ptr++) << 24;
dst_ptr[x] = (color_table32[i] & 0x00ffffff) + a;
}
}
glBindTexture(GL_TEXTURE_2D, surfData->tex_id);
glTexSubImage2D(GL_TEXTURE_2D, 0, dstRect.x0, dstRect.y0,
dstRect.x1 - dstRect.x0, dstRect.y1 - dstRect.y0,
GL_BGRA, GL_UNSIGNED_BYTE, unpacked_buf);
glFinish();
free(unpacked_buf);
GLenum gl_error = glGetError();
glx_ctx_pop();
if (GL_NO_ERROR != gl_error) {
traceError("error (%s): gl error %d\n", __func__, gl_error);
err_code = VDP_STATUS_ERROR;
goto quit;
}
err_code = VDP_STATUS_OK;
goto quit;
} while (0);
break;
default:
traceError("error (%s): unsupported indexed format %s\n", __func__,
reverse_indexed_format(source_indexed_format));
err_code = VDP_STATUS_INVALID_INDEXED_FORMAT;
goto quit;
}
quit:
handle_release(surface);
return err_code;
}
VdpStatus
vdpOutputSurfacePutBitsNative(VdpOutputSurface surface, void const *const *source_data,
uint32_t const *source_pitches, VdpRect const *destination_rect)
{
VdpStatus err_code;
if (!source_data || !source_pitches)
return VDP_STATUS_INVALID_POINTER;
VdpOutputSurfaceData *dstSurfData = handle_acquire(surface, HANDLETYPE_OUTPUT_SURFACE);
if (NULL == dstSurfData)
return VDP_STATUS_INVALID_HANDLE;
VdpDeviceData *deviceData = dstSurfData->deviceData;
VdpRect dstRect = {0, 0, dstSurfData->width, dstSurfData->height};
if (destination_rect)
dstRect = *destination_rect;
glx_ctx_push_thread_local(deviceData);
glBindTexture(GL_TEXTURE_2D, dstSurfData->tex_id);
glPixelStorei(GL_UNPACK_ROW_LENGTH, source_pitches[0] / dstSurfData->bytes_per_pixel);
if (4 != dstSurfData->bytes_per_pixel)
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glTexSubImage2D(GL_TEXTURE_2D, 0, dstRect.x0, dstRect.y0,
dstRect.x1 - dstRect.x0, dstRect.y1 - dstRect.y0,
dstSurfData->gl_format, dstSurfData->gl_type, source_data[0]);
glPixelStorei(GL_UNPACK_ROW_LENGTH, 0);
if (4 != dstSurfData->bytes_per_pixel)
glPixelStorei(GL_UNPACK_ALIGNMENT, 4);
glFinish();
GLenum gl_error = glGetError();
glx_ctx_pop();
if (GL_NO_ERROR != gl_error) {
traceError("error (%s): gl error %d\n", __func__, gl_error);
err_code = VDP_STATUS_ERROR;
goto quit;
}
err_code = VDP_STATUS_OK;
quit:
handle_release(surface);
return err_code;
}
VdpStatus
vdpOutputSurfacePutBitsYCbCr(VdpOutputSurface surface, VdpYCbCrFormat source_ycbcr_format,
void const *const *source_data, uint32_t const *source_pitches,
VdpRect const *destination_rect, VdpCSCMatrix const *csc_matrix)
{
(void)surface; (void)source_ycbcr_format; (void)source_data; (void)source_pitches;
(void)destination_rect; (void)csc_matrix;
return VDP_STATUS_NO_IMPLEMENTATION;
}
VdpStatus
vdpOutputSurfaceQueryCapabilities(VdpDevice device, VdpRGBAFormat surface_rgba_format,
VdpBool *is_supported, uint32_t *max_width, uint32_t *max_height)
{
VdpStatus err_code;
if (!is_supported || !max_width || !max_height)
return VDP_STATUS_INVALID_POINTER;
VdpDeviceData *deviceData = handle_acquire(device, HANDLETYPE_DEVICE);
if (NULL == deviceData)
return VDP_STATUS_INVALID_HANDLE;
switch (surface_rgba_format) {
case VDP_RGBA_FORMAT_B8G8R8A8:
case VDP_RGBA_FORMAT_R8G8B8A8:
case VDP_RGBA_FORMAT_R10G10B10A2:
case VDP_RGBA_FORMAT_B10G10R10A2:
case VDP_RGBA_FORMAT_A8:
*is_supported = 1; // All these formats should be supported by OpenGL
break; // implementation.
default:
*is_supported = 0;
break;
}
glx_ctx_push_thread_local(deviceData);
GLint max_texture_size;
glGetIntegerv(GL_MAX_TEXTURE_SIZE, &max_texture_size);
GLenum gl_error = glGetError();
glx_ctx_pop();
if (GL_NO_ERROR != gl_error) {
traceError("error (%s): gl error %d\n", __func__, gl_error);
err_code = VDP_STATUS_ERROR;
goto quit;
}
*max_width = max_texture_size;
*max_height = max_texture_size;
err_code = VDP_STATUS_OK;
quit:
handle_release(device);
return err_code;
}
VdpStatus
vdpOutputSurfaceQueryGetPutBitsNativeCapabilities(VdpDevice device,
VdpRGBAFormat surface_rgba_format,
VdpBool *is_supported)
{
(void)device; (void)surface_rgba_format; (void)is_supported;
return VDP_STATUS_NO_IMPLEMENTATION;
}
VdpStatus
vdpOutputSurfaceQueryPutBitsIndexedCapabilities(VdpDevice device,
VdpRGBAFormat surface_rgba_format,
VdpIndexedFormat bits_indexed_format,
VdpColorTableFormat color_table_format,
VdpBool *is_supported)
{
(void)device; (void)surface_rgba_format; (void)bits_indexed_format; (void)color_table_format;
(void)is_supported;
return VDP_STATUS_NO_IMPLEMENTATION;
}
VdpStatus
vdpOutputSurfaceQueryPutBitsYCbCrCapabilities(VdpDevice device,
VdpRGBAFormat surface_rgba_format,
VdpYCbCrFormat bits_ycbcr_format,
VdpBool *is_supported)
{
(void)device; (void)surface_rgba_format; (void)bits_ycbcr_format; (void)is_supported;
return VDP_STATUS_NO_IMPLEMENTATION;
}
VdpStatus
vdpOutputSurfaceRenderBitmapSurface(VdpOutputSurface destination_surface,
VdpRect const *destination_rect,
VdpBitmapSurface source_surface, VdpRect const *source_rect,
VdpColor const *colors,
VdpOutputSurfaceRenderBlendState const *blend_state,
uint32_t flags)
{
VdpStatus err_code;
if (blend_state) {
if (VDP_OUTPUT_SURFACE_RENDER_BLEND_STATE_VERSION != blend_state->struct_version) {
err_code = VDP_STATUS_INVALID_VALUE;
goto quit_skip_release;
}
}
VdpOutputSurfaceData *dstSurfData =
handle_acquire(destination_surface, HANDLETYPE_OUTPUT_SURFACE);
VdpBitmapSurfaceData *srcSurfData = handle_acquire(source_surface, HANDLETYPE_BITMAP_SURFACE);
if (NULL == dstSurfData) {
err_code = VDP_STATUS_INVALID_HANDLE;
goto quit;
}
if (srcSurfData && srcSurfData->deviceData != dstSurfData->deviceData) {
err_code = VDP_STATUS_HANDLE_DEVICE_MISMATCH;
goto quit;
}
VdpDeviceData *deviceData = dstSurfData->deviceData;
VdpRect s_rect = {0, 0, 0, 0};
VdpRect d_rect = {0, 0, dstSurfData->width, dstSurfData->height};
s_rect.x1 = srcSurfData ? srcSurfData->width : 1;
s_rect.y1 = srcSurfData ? srcSurfData->height : 1;
if (source_rect)
s_rect = *source_rect;
if (destination_rect)
d_rect = *destination_rect;
// select blend functions
struct blend_state_struct bs = vdpBlendStateToGLBlendState(blend_state);
if (bs.invalid_func) {
err_code = VDP_STATUS_INVALID_BLEND_FACTOR;
goto quit;
}
if (bs.invalid_eq) {
err_code = VDP_STATUS_INVALID_BLEND_EQUATION;
goto quit;
}
glx_ctx_push_thread_local(deviceData);
glBindFramebuffer(GL_FRAMEBUFFER, dstSurfData->fbo_id);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(0, dstSurfData->width, 0, dstSurfData->height, -1.0f, 1.0f);
glViewport(0, 0, dstSurfData->width, dstSurfData->height);
glEnable(GL_TEXTURE_2D);
glEnable(GL_BLEND);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
if (srcSurfData) {
glBindTexture(GL_TEXTURE_2D, srcSurfData->tex_id);
if (srcSurfData->dirty) {
if (4 != srcSurfData->bytes_per_pixel)
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, srcSurfData->width, srcSurfData->height,
srcSurfData->gl_format, srcSurfData->gl_type, srcSurfData->bitmap_data);
if (4 != srcSurfData->bytes_per_pixel)
glPixelStorei(GL_UNPACK_ALIGNMENT, 4);
srcSurfData->dirty = 0;
}
glMatrixMode(GL_TEXTURE);
glLoadIdentity();
glScalef(1.0f/srcSurfData->width, 1.0f/srcSurfData->height, 1.0f);
if (srcSurfData->rgba_format == VDP_RGBA_FORMAT_A8) {
glUseProgram(deviceData->shaders[glsl_red_to_alpha_swizzle].program);
glUniform1i(deviceData->shaders[glsl_red_to_alpha_swizzle].uniform.tex_0, 0);
}
}
compose_surfaces(bs, s_rect, d_rect, colors, flags, !!srcSurfData);
glUseProgram(0);
glFinish();
GLenum gl_error = glGetError();
glx_ctx_pop();
if (GL_NO_ERROR != gl_error) {
traceError("error (%s): gl error %d\n", __func__, gl_error);
err_code = VDP_STATUS_ERROR;
goto quit;
}
err_code = VDP_STATUS_OK;
quit:
handle_release(source_surface);
handle_release(destination_surface);
quit_skip_release:
return err_code;
}
VdpStatus
vdpOutputSurfaceRenderOutputSurface(VdpOutputSurface destination_surface,
VdpRect const *destination_rect,
VdpOutputSurface source_surface, VdpRect const *source_rect,
VdpColor const *colors,
VdpOutputSurfaceRenderBlendState const *blend_state,
uint32_t flags)
{
VdpStatus err_code;
if (blend_state) {
if (VDP_OUTPUT_SURFACE_RENDER_BLEND_STATE_VERSION != blend_state->struct_version) {
err_code = VDP_STATUS_INVALID_VALUE;
goto quit_skip_release;
}
}
VdpOutputSurfaceData *dstSurfData =
handle_acquire(destination_surface, HANDLETYPE_OUTPUT_SURFACE);
VdpOutputSurfaceData *srcSurfData = handle_acquire(source_surface, HANDLETYPE_OUTPUT_SURFACE);
if (NULL == dstSurfData) {
err_code = VDP_STATUS_INVALID_HANDLE;
goto quit;
}
if (srcSurfData && srcSurfData->deviceData != dstSurfData->deviceData) {
err_code = VDP_STATUS_HANDLE_DEVICE_MISMATCH;
goto quit;
}
VdpDeviceData *deviceData = dstSurfData->deviceData;
VdpRect s_rect = {0, 0, 0, 0};
VdpRect d_rect = {0, 0, dstSurfData->width, dstSurfData->height};
s_rect.x1 = srcSurfData ? srcSurfData->width : 1;
s_rect.y1 = srcSurfData ? srcSurfData->height : 1;
if (source_rect)
s_rect = *source_rect;
if (destination_rect)
d_rect = *destination_rect;
// select blend functions
struct blend_state_struct bs = vdpBlendStateToGLBlendState(blend_state);
if (bs.invalid_func) {
err_code = VDP_STATUS_INVALID_BLEND_FACTOR;
goto quit;
}
if (bs.invalid_eq) {
err_code = VDP_STATUS_INVALID_BLEND_EQUATION;
goto quit;
}
glx_ctx_push_thread_local(deviceData);
glBindFramebuffer(GL_FRAMEBUFFER, dstSurfData->fbo_id);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(0, dstSurfData->width, 0, dstSurfData->height, -1.0f, 1.0f);
glViewport(0, 0, dstSurfData->width, dstSurfData->height);
glEnable(GL_TEXTURE_2D);
glEnable(GL_BLEND);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
if (srcSurfData) {
glBindTexture(GL_TEXTURE_2D, srcSurfData->tex_id);
glMatrixMode(GL_TEXTURE);
glLoadIdentity();
glScalef(1.0f/srcSurfData->width, 1.0f/srcSurfData->height, 1.0f);
}
compose_surfaces(bs, s_rect, d_rect, colors, flags, !!srcSurfData);
glFinish();
GLenum gl_error = glGetError();
glx_ctx_pop();
if (GL_NO_ERROR != gl_error) {
traceError("error (%s): gl error %d\n", __func__, gl_error);
err_code = VDP_STATUS_ERROR;
goto quit;
}
err_code = VDP_STATUS_OK;
quit:
handle_release(source_surface);
handle_release(destination_surface);
quit_skip_release:
return err_code;
}
libvdpau-va-gl-0.3.6/src/api-presentation-queue.c 0000664 0000000 0000000 00000046002 12665616006 0021703 0 ustar 00root root 0000000 0000000 /*
* Copyright 2013-2014 Rinat Ibragimov
*
* This file is part of libvdpau-va-gl
*
* libvdpau-va-gl is distributed under the terms of the LGPLv3. See COPYING for details.
*/
#define GL_GLEXT_PROTOTYPES
#define _GNU_SOURCE
#define _XOPEN_SOURCE 500
#include
#include
#include
#include
#include
#include
#include
#include "ctx-stack.h"
#include "globals.h"
#include "handle-storage.h"
#include "api.h"
#include "trace.h"
#include "watermark.h"
struct task_s {
struct timespec when;
uint32_t clip_width;
uint32_t clip_height;
VdpOutputSurface surface;
unsigned int wipe_tasks;
VdpPresentationQueue queue_id;
};
static GAsyncQueue *async_q = NULL;
static pthread_t presentation_thread_id;
static
VdpTime
timespec2vdptime(struct timespec t)
{
return (uint64_t)t.tv_sec * 1000 * 1000 * 1000 + t.tv_nsec;
}
static
struct timespec
vdptime2timespec(VdpTime t)
{
struct timespec res;
res.tv_sec = t / (1000*1000*1000);
res.tv_nsec = t % (1000*1000*1000);
return res;
}
VdpStatus
vdpPresentationQueueBlockUntilSurfaceIdle(VdpPresentationQueue presentation_queue,
VdpOutputSurface surface,
VdpTime *first_presentation_time)
{
if (!first_presentation_time)
return VDP_STATUS_INVALID_POINTER;
VdpPresentationQueueData *pqData =
handle_acquire(presentation_queue, HANDLETYPE_PRESENTATION_QUEUE);
if (NULL == pqData)
return VDP_STATUS_INVALID_HANDLE;
handle_release(presentation_queue);
VdpOutputSurfaceData *surfData = handle_acquire(surface, HANDLETYPE_OUTPUT_SURFACE);
if (NULL == surfData)
return VDP_STATUS_INVALID_HANDLE;
// TODO: use locking instead of busy loop
while (surfData->status != VDP_PRESENTATION_QUEUE_STATUS_IDLE) {
handle_release(surface);
usleep(1000);
surfData = handle_acquire(surface, HANDLETYPE_OUTPUT_SURFACE);
if (!surfData)
return VDP_STATUS_ERROR;
}
*first_presentation_time = surfData->first_presentation_time;
handle_release(surface);
return VDP_STATUS_OK;
}
VdpStatus
vdpPresentationQueueQuerySurfaceStatus(VdpPresentationQueue presentation_queue,
VdpOutputSurface surface, VdpPresentationQueueStatus *status,
VdpTime *first_presentation_time)
{
if (!status || !first_presentation_time)
return VDP_STATUS_INVALID_POINTER;
VdpPresentationQueueData *pqData =
handle_acquire(presentation_queue, HANDLETYPE_PRESENTATION_QUEUE);
if (NULL == pqData)
return VDP_STATUS_INVALID_HANDLE;
VdpOutputSurfaceData *surfData = handle_acquire(surface, HANDLETYPE_OUTPUT_SURFACE);
if (NULL == surfData) {
handle_release(presentation_queue);
return VDP_STATUS_INVALID_HANDLE;
}
*status = surfData->status;
*first_presentation_time = surfData->first_presentation_time;
handle_release(presentation_queue);
handle_release(surface);
return VDP_STATUS_OK;
}
static
void
free_glx_pixmaps(VdpPresentationQueueTargetData *pqTargetData)
{
Display *dpy = pqTargetData->deviceData->display;
// if pixmap is None, nothing was allocated
if (None == pqTargetData->pixmap)
return;
glXDestroyGLXPixmap(dpy, pqTargetData->glx_pixmap);
XFreeGC(dpy, pqTargetData->plain_copy_gc);
XFreePixmap(dpy, pqTargetData->pixmap);
pqTargetData->pixmap = None;
}
// create new pixmap, glx pixmap, GC if size has changed.
// This function relies on external serializing Xlib access
static
void
recreate_pixmaps_if_geometry_changed(VdpPresentationQueueTargetData *pqTargetData)
{
Window root_wnd;
int xpos, ypos;
unsigned int width, height, border_width, depth;
Display *dpy = pqTargetData->deviceData->display;
XGetGeometry(dpy, pqTargetData->drawable, &root_wnd, &xpos, &ypos, &width, &height,
&border_width, &depth);
if (width != pqTargetData->drawable_width || height != pqTargetData->drawable_height) {
free_glx_pixmaps(pqTargetData);
pqTargetData->drawable_width = width;
pqTargetData->drawable_height = height;
pqTargetData->pixmap = XCreatePixmap(dpy, pqTargetData->deviceData->root,
pqTargetData->drawable_width,
pqTargetData->drawable_height, depth);
XGCValues gc_values = {.function = GXcopy, .graphics_exposures = True };
pqTargetData->plain_copy_gc = XCreateGC(dpy, pqTargetData->pixmap,
GCFunction | GCGraphicsExposures, &gc_values);
pqTargetData->glx_pixmap = glXCreateGLXPixmap(dpy, pqTargetData->xvi, pqTargetData->pixmap);
XSync(dpy, False);
}
}
static
void
do_presentation_queue_display(struct task_s *task)
{
VdpPresentationQueueData *pqData =
handle_acquire(task->queue_id, HANDLETYPE_PRESENTATION_QUEUE);
if (!pqData)
return;
VdpDeviceData *deviceData = pqData->deviceData;
const VdpOutputSurface surface = task->surface;
const uint32_t clip_width = task->clip_width;
const uint32_t clip_height = task->clip_height;
VdpOutputSurfaceData *surfData = handle_acquire(surface, HANDLETYPE_OUTPUT_SURFACE);
if (surfData == NULL) {
handle_release(task->queue_id);
return;
}
glx_ctx_lock();
recreate_pixmaps_if_geometry_changed(pqData->targetData);
glXMakeCurrent(deviceData->display, pqData->targetData->glx_pixmap, pqData->targetData->glc);
const uint32_t target_width = (clip_width > 0) ? clip_width : surfData->width;
const uint32_t target_height = (clip_height > 0) ? clip_height : surfData->height;
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(0, target_width, target_height, 0, -1.0, 1.0);
glViewport(0, 0, target_width, target_height);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glMatrixMode(GL_TEXTURE);
glLoadIdentity();
glScalef(1.0f/surfData->width, 1.0f/surfData->height, 1.0f);
glEnable(GL_TEXTURE_2D);
glDisable(GL_BLEND);
glBindTexture(GL_TEXTURE_2D, surfData->tex_id);
glColor4f(1, 1, 1, 1);
glBegin(GL_QUADS);
glTexCoord2i(0, 0); glVertex2i(0, 0);
glTexCoord2i(target_width, 0); glVertex2i(target_width, 0);
glTexCoord2i(target_width, target_height); glVertex2i(target_width, target_height);
glTexCoord2i(0, target_height); glVertex2i(0, target_height);
glEnd();
if (global.quirks.show_watermark) {
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
glBlendEquation(GL_FUNC_ADD);
glBindTexture(GL_TEXTURE_2D, deviceData->watermark_tex_id);
glMatrixMode(GL_TEXTURE);
glLoadIdentity();
glColor4f(1.0, 1.0, 1.0, 0.2);
glBegin(GL_QUADS);
glTexCoord2i(0, 0);
glVertex2i(target_width - watermark_width, target_height - watermark_height);
glTexCoord2i(1, 0);
glVertex2i(target_width, target_height - watermark_height);
glTexCoord2i(1, 1);
glVertex2i(target_width, target_height);
glTexCoord2i(0, 1);
glVertex2i(target_width - watermark_width, target_height);
glEnd();
}
glFinish();
GLenum gl_error = glGetError();
x11_push_eh();
XCopyArea(deviceData->display, pqData->targetData->pixmap, pqData->targetData->drawable,
pqData->targetData->plain_copy_gc, 0, 0, target_width, target_height, 0, 0);
XSync(deviceData->display, False);
int x11_err = x11_pop_eh();
if (x11_err != Success) {
char buf[200] = { 0 };
XGetErrorText(deviceData->display, x11_err, buf, sizeof(buf));
traceError("warning (%s): caught X11 error %s\n", __func__, buf);
}
glx_ctx_unlock();
struct timespec now;
clock_gettime(CLOCK_REALTIME, &now);
surfData->first_presentation_time = timespec2vdptime(now);
surfData->status = VDP_PRESENTATION_QUEUE_STATUS_IDLE;
if (global.quirks.log_pq_delay) {
const int64_t delta = timespec2vdptime(now) - surfData->queued_at;
const struct timespec delta_ts = vdptime2timespec(delta);
traceInfo("pqdelay %d.%09d %d.%09d\n", (int)now.tv_sec, (int)now.tv_nsec,
delta_ts.tv_sec, delta_ts.tv_nsec);
}
handle_release(surface);
handle_release(task->queue_id);
if (GL_NO_ERROR != gl_error) {
traceError("error (%s): gl error %d\n", __func__, gl_error);
}
}
gint
compare_func(gconstpointer a, gconstpointer b, gpointer user_data)
{
const struct task_s *task_a = a;
const struct task_s *task_b = b;
if (task_a->when.tv_sec < task_b->when.tv_sec)
return -1;
else if (task_a->when.tv_sec > task_b->when.tv_sec)
return 1;
else if (task_a->when.tv_nsec < task_b->when.tv_nsec)
return -1;
else if (task_a->when.tv_nsec > task_b->when.tv_nsec)
return 1;
else
return 0;
}
static
void *
presentation_thread(void *param)
{
GQueue *int_q = g_queue_new(); // internal queue of task, always sorted
while (1) {
gint64 timeout;
struct task_s *task = g_queue_peek_head(int_q);
if (task) {
// internal queue have a task
struct timespec now;
clock_gettime(CLOCK_REALTIME, &now);
timeout = (task->when.tv_sec - now.tv_sec) * 1000 * 1000 +
(task->when.tv_nsec - now.tv_nsec) / 1000;
if (timeout <= 0) {
// task is ready to go
g_queue_pop_head(int_q); // remove it from queue
// run the task
do_presentation_queue_display(task);
g_slice_free(struct task_s, task);
continue;
}
} else {
// no tasks in queue, sleep for a while
timeout = 1000 * 1000; // one second
}
task = g_async_queue_timeout_pop(async_q, timeout);
if (task) {
if (task->wipe_tasks) {
// create new internal queue by filtering old
GQueue *new_q = g_queue_new();
while (!g_queue_is_empty(int_q)) {
struct task_s *t = g_queue_pop_head(int_q);
if (t->queue_id != task->queue_id)
g_queue_push_tail(new_q, t);
}
g_queue_free(int_q);
int_q = new_q;
g_slice_free(struct task_s, task);
continue;
}
g_queue_insert_sorted(int_q, task, compare_func, NULL);
}
}
g_queue_free(int_q);
return NULL;
}
VdpStatus
vdpPresentationQueueCreate(VdpDevice device, VdpPresentationQueueTarget presentation_queue_target,
VdpPresentationQueue *presentation_queue)
{
if (!presentation_queue)
return VDP_STATUS_INVALID_POINTER;
VdpDeviceData *deviceData = handle_acquire(device, HANDLETYPE_DEVICE);
if (NULL == deviceData)
return VDP_STATUS_INVALID_HANDLE;
VdpPresentationQueueTargetData *targetData =
handle_acquire(presentation_queue_target, HANDLETYPE_PRESENTATION_QUEUE_TARGET);
if (NULL == targetData) {
handle_release(device);
return VDP_STATUS_INVALID_HANDLE;
}
VdpPresentationQueueData *data = calloc(1, sizeof(VdpPresentationQueueData));
if (NULL == data) {
handle_release(device);
handle_release(presentation_queue_target);
return VDP_STATUS_RESOURCES;
}
data->type = HANDLETYPE_PRESENTATION_QUEUE;
data->device = device;
data->deviceData = deviceData;
data->target = presentation_queue_target;
data->targetData = targetData;
data->bg_color.red = 0.0;
data->bg_color.green = 0.0;
data->bg_color.blue = 0.0;
data->bg_color.alpha = 0.0;
ref_device(deviceData);
ref_pq_target(targetData);
*presentation_queue = handle_insert(data);
// initialize queue and launch worker thread
if (!async_q) {
async_q = g_async_queue_new();
pthread_create(&presentation_thread_id, NULL, presentation_thread, data);
}
handle_release(device);
handle_release(presentation_queue_target);
return VDP_STATUS_OK;
}
VdpStatus
vdpPresentationQueueDestroy(VdpPresentationQueue presentation_queue)
{
VdpPresentationQueueData *pqData =
handle_acquire(presentation_queue, HANDLETYPE_PRESENTATION_QUEUE);
if (NULL == pqData)
return VDP_STATUS_INVALID_HANDLE;
struct task_s *task = g_slice_new0(struct task_s);
task->when = vdptime2timespec(0); // as early as possible
task->queue_id = presentation_queue;
task->wipe_tasks = 1;
g_async_queue_push(async_q, task);
handle_expunge(presentation_queue);
unref_device(pqData->deviceData);
unref_pq_target(pqData->targetData);
free(pqData);
return VDP_STATUS_OK;
}
VdpStatus
vdpPresentationQueueSetBackgroundColor(VdpPresentationQueue presentation_queue,
VdpColor *const background_color)
{
VdpPresentationQueueData *pqData =
handle_acquire(presentation_queue, HANDLETYPE_PRESENTATION_QUEUE);
if (NULL == pqData)
return VDP_STATUS_INVALID_HANDLE;
if (background_color) {
pqData->bg_color = *background_color;
} else {
pqData->bg_color.red = 0.0;
pqData->bg_color.green = 0.0;
pqData->bg_color.blue = 0.0;
pqData->bg_color.alpha = 0.0;
}
handle_release(presentation_queue);
return VDP_STATUS_OK;
}
VdpStatus
vdpPresentationQueueGetBackgroundColor(VdpPresentationQueue presentation_queue,
VdpColor *background_color)
{
if (!background_color)
return VDP_STATUS_INVALID_POINTER;
VdpPresentationQueueData *pqData =
handle_acquire(presentation_queue, HANDLETYPE_PRESENTATION_QUEUE);
if (NULL == pqData)
return VDP_STATUS_INVALID_HANDLE;
*background_color = pqData->bg_color;
handle_release(presentation_queue);
return VDP_STATUS_OK;
}
VdpStatus
vdpPresentationQueueGetTime(VdpPresentationQueue presentation_queue, VdpTime *current_time)
{
if (!current_time)
return VDP_STATUS_INVALID_POINTER;
(void)presentation_queue;
struct timespec now;
clock_gettime(CLOCK_REALTIME, &now);
*current_time = timespec2vdptime(now);
return VDP_STATUS_OK;
}
VdpStatus
vdpPresentationQueueDisplay(VdpPresentationQueue presentation_queue, VdpOutputSurface surface,
uint32_t clip_width, uint32_t clip_height,
VdpTime earliest_presentation_time)
{
VdpPresentationQueueData *pqData =
handle_acquire(presentation_queue, HANDLETYPE_PRESENTATION_QUEUE);
if (NULL == pqData)
return VDP_STATUS_INVALID_HANDLE;
VdpOutputSurfaceData *surfData = handle_acquire(surface, HANDLETYPE_OUTPUT_SURFACE);
if (NULL == surfData) {
handle_release(presentation_queue);
return VDP_STATUS_INVALID_HANDLE;
}
if (pqData->deviceData != surfData->deviceData) {
handle_release(surface);
handle_release(presentation_queue);
return VDP_STATUS_HANDLE_DEVICE_MISMATCH;
}
struct task_s *task = g_slice_new0(struct task_s);
task->when = vdptime2timespec(earliest_presentation_time);
task->clip_width = clip_width;
task->clip_height = clip_height;
task->surface = surface;
task->queue_id = presentation_queue;
surfData->first_presentation_time = 0;
surfData->status = VDP_PRESENTATION_QUEUE_STATUS_QUEUED;
if (global.quirks.log_pq_delay) {
struct timespec now;
clock_gettime(CLOCK_REALTIME, &now);
surfData->queued_at = timespec2vdptime(now);
}
g_async_queue_push(async_q, task);
handle_release(presentation_queue);
handle_release(surface);
return VDP_STATUS_OK;
}
VdpStatus
vdpPresentationQueueTargetCreateX11(VdpDevice device, Drawable drawable,
VdpPresentationQueueTarget *target)
{
if (!target)
return VDP_STATUS_INVALID_POINTER;
VdpDeviceData *deviceData = handle_acquire(device, HANDLETYPE_DEVICE);
if (NULL == deviceData)
return VDP_STATUS_INVALID_HANDLE;
VdpPresentationQueueTargetData *data = calloc(1, sizeof(VdpPresentationQueueTargetData));
if (NULL == data) {
handle_release(device);
return VDP_STATUS_RESOURCES;
}
glx_ctx_lock();
data->type = HANDLETYPE_PRESENTATION_QUEUE_TARGET;
data->device = device;
data->deviceData = deviceData;
data->drawable = drawable;
data->refcount = 0;
pthread_mutex_init(&data->refcount_mutex, NULL);
// emulate geometry change. Hope there will be no drawables of such size
data->drawable_width = (unsigned int)(-1);
data->drawable_height = (unsigned int)(-1);
data->pixmap = None;
// No double buffering since we are going to render to glx pixmap
GLint att[] = { GLX_RGBA, GLX_DEPTH_SIZE, 24, None };
data->xvi = glXChooseVisual(deviceData->display, deviceData->screen, att);
if (NULL == data->xvi) {
traceError("error (%s): glXChooseVisual failed\n", __func__);
free(data);
glx_ctx_unlock();
handle_release(device);
return VDP_STATUS_ERROR;
}
recreate_pixmaps_if_geometry_changed(data);
// create context for dislaying result (can share display lists with deviceData->glc
data->glc = glXCreateContext(deviceData->display, data->xvi, deviceData->root_glc, GL_TRUE);
ref_device(deviceData);
*target = handle_insert(data);
glx_ctx_unlock();
handle_release(device);
return VDP_STATUS_OK;
}
VdpStatus
vdpPresentationQueueTargetDestroy(VdpPresentationQueueTarget presentation_queue_target)
{
VdpPresentationQueueTargetData *pqTargetData =
handle_acquire(presentation_queue_target, HANDLETYPE_PRESENTATION_QUEUE_TARGET);
if (NULL == pqTargetData)
return VDP_STATUS_INVALID_HANDLE;
VdpDeviceData *deviceData = pqTargetData->deviceData;
if (0 != pqTargetData->refcount) {
traceError("warning (%s): non-zero reference count (%d)\n", __func__,
pqTargetData->refcount);
handle_release(presentation_queue_target);
return VDP_STATUS_ERROR;
}
// drawable may be destroyed already, so one should activate global context
glx_ctx_push_thread_local(deviceData);
glXDestroyContext(deviceData->display, pqTargetData->glc);
free_glx_pixmaps(pqTargetData);
GLenum gl_error = glGetError();
glx_ctx_pop();
if (GL_NO_ERROR != gl_error) {
traceError("error (%s): gl error %d\n", __func__, gl_error);
handle_release(presentation_queue_target);
return VDP_STATUS_ERROR;
}
unref_device(deviceData);
XFree(pqTargetData->xvi);
pthread_mutex_destroy(&pqTargetData->refcount_mutex);
handle_expunge(presentation_queue_target);
free(pqTargetData);
return VDP_STATUS_OK;
}
libvdpau-va-gl-0.3.6/src/api-video-decoder.c 0000664 0000000 0000000 00000063733 12665616006 0020571 0 ustar 00root root 0000000 0000000 /*
* Copyright 2013-2014 Rinat Ibragimov
*
* This file is part of libvdpau-va-gl
*
* libvdpau-va-gl is distributed under the terms of the LGPLv3. See COPYING for details.
*/
#include
#include
#include "ctx-stack.h"
#include "h264-parse.h"
#include "trace.h"
#include "api.h"
VdpStatus
vdpDecoderCreate(VdpDevice device, VdpDecoderProfile profile, uint32_t width, uint32_t height,
uint32_t max_references, VdpDecoder *decoder)
{
VdpStatus err_code;
if (!decoder)
return VDP_STATUS_INVALID_POINTER;
VdpDeviceData *deviceData = handle_acquire(device, HANDLETYPE_DEVICE);
if (NULL == deviceData)
return VDP_STATUS_INVALID_HANDLE;
if (!deviceData->va_available) {
err_code = VDP_STATUS_INVALID_DECODER_PROFILE;
goto quit;
}
VADisplay va_dpy = deviceData->va_dpy;
VdpDecoderData *data = calloc(1, sizeof(VdpDecoderData));
if (NULL == data) {
err_code = VDP_STATUS_RESOURCES;
goto quit;
}
data->type = HANDLETYPE_DECODER;
data->device = device;
data->deviceData = deviceData;
data->profile = profile;
data->width = width;
data->height = height;
data->max_references = max_references;
// initialize free_list. Initially they all free
data->free_list_head = -1;
for (int k = 0; k < MAX_RENDER_TARGETS; k ++) {
free_list_push(data->free_list, &data->free_list_head, k);
}
VAProfile va_profile;
VAStatus status;
int final_try = 0;
VdpDecoderProfile next_profile = profile;
// Try to create decoder for asked profile. On failure try to create more advanced one
while (! final_try) {
profile = next_profile;
switch (profile) {
case VDP_DECODER_PROFILE_H264_CONSTRAINED_BASELINE:
va_profile = VAProfileH264ConstrainedBaseline;
data->num_render_targets = NUM_RENDER_TARGETS_H264;
next_profile = VDP_DECODER_PROFILE_H264_BASELINE;
break;
case VDP_DECODER_PROFILE_H264_BASELINE:
va_profile = VAProfileH264Baseline;
data->num_render_targets = NUM_RENDER_TARGETS_H264;
next_profile = VDP_DECODER_PROFILE_H264_MAIN;
break;
case VDP_DECODER_PROFILE_H264_MAIN:
va_profile = VAProfileH264Main;
data->num_render_targets = NUM_RENDER_TARGETS_H264;
next_profile = VDP_DECODER_PROFILE_H264_HIGH;
break;
case VDP_DECODER_PROFILE_H264_HIGH:
va_profile = VAProfileH264High;
data->num_render_targets = NUM_RENDER_TARGETS_H264;
// there is no more advanced profile, so it's final try
final_try = 1;
break;
default:
traceError("error (%s): decoder %s not implemented\n", __func__,
reverse_decoder_profile(profile));
err_code = VDP_STATUS_INVALID_DECODER_PROFILE;
goto quit_free_data;
}
status = vaCreateConfig(va_dpy, va_profile, VAEntrypointVLD, NULL, 0, &data->config_id);
if (VA_STATUS_SUCCESS == status) // break loop if decoder created
break;
}
if (VA_STATUS_SUCCESS != status) {
err_code = VDP_STATUS_ERROR;
goto quit_free_data;
}
// Create surfaces. All video surfaces created here, rather than in VdpVideoSurfaceCreate.
// VAAPI requires surfaces to be bound with context on its creation time, while VDPAU allows
// to do it later. So here is a trick: VDP video surfaces get their va_surf dynamically in
// DecoderRender.
// TODO: check format of surfaces created
#if VA_CHECK_VERSION(0, 34, 0)
status = vaCreateSurfaces(va_dpy, VA_RT_FORMAT_YUV420, width, height,
data->render_targets, data->num_render_targets, NULL, 0);
#else
status = vaCreateSurfaces(va_dpy, width, height, VA_RT_FORMAT_YUV420,
data->num_render_targets, data->render_targets);
#endif
if (VA_STATUS_SUCCESS != status) {
err_code = VDP_STATUS_ERROR;
goto quit_free_data;
}
status = vaCreateContext(va_dpy, data->config_id, width, height, VA_PROGRESSIVE,
data->render_targets, data->num_render_targets, &data->context_id);
if (VA_STATUS_SUCCESS != status) {
err_code = VDP_STATUS_ERROR;
goto quit_free_data;
}
ref_device(deviceData);
*decoder = handle_insert(data);
err_code = VDP_STATUS_OK;
goto quit;
quit_free_data:
free(data);
quit:
handle_release(device);
return err_code;
}
VdpStatus
vdpDecoderDestroy(VdpDecoder decoder)
{
VdpDecoderData *decoderData = handle_acquire(decoder, HANDLETYPE_DECODER);
if (NULL == decoderData)
return VDP_STATUS_INVALID_HANDLE;
VdpDeviceData *deviceData = decoderData->deviceData;
if (deviceData->va_available) {
VADisplay va_dpy = deviceData->va_dpy;
vaDestroySurfaces(va_dpy, decoderData->render_targets, decoderData->num_render_targets);
vaDestroyContext(va_dpy, decoderData->context_id);
vaDestroyConfig(va_dpy, decoderData->config_id);
}
handle_expunge(decoder);
unref_device(deviceData);
free(decoderData);
return VDP_STATUS_OK;
}
VdpStatus
vdpDecoderGetParameters(VdpDecoder decoder, VdpDecoderProfile *profile,
uint32_t *width, uint32_t *height)
{
if (!profile || !width || !height)
return VDP_STATUS_INVALID_HANDLE;
VdpDecoderData *decoderData = handle_acquire(decoder, HANDLETYPE_DECODER);
if (!decoderData)
return VDP_STATUS_INVALID_HANDLE;
*profile = decoderData->profile;
*width = decoderData->width;
*height = decoderData->height;
handle_release(decoder);
return VDP_STATUS_OK;
}
static
VdpStatus
h264_translate_reference_frames(VdpVideoSurfaceData *dstSurfData, VdpDecoder decoder,
VdpDecoderData *decoderData,
VAPictureParameterBufferH264 *pic_param,
const VdpPictureInfoH264 *vdppi)
{
// take new VA surface from buffer if needed
if (VA_INVALID_SURFACE == dstSurfData->va_surf) {
int idx = free_list_pop(decoderData->free_list, &decoderData->free_list_head);
if (-1 == idx)
return VDP_STATUS_RESOURCES;
dstSurfData->decoder = decoder;
dstSurfData->va_surf = decoderData->render_targets[idx];
dstSurfData->rt_idx = idx;
}
// current frame
pic_param->CurrPic.picture_id = dstSurfData->va_surf;
pic_param->CurrPic.frame_idx = vdppi->frame_num;
pic_param->CurrPic.flags = vdppi->is_reference ? VA_PICTURE_H264_SHORT_TERM_REFERENCE : 0;
if (vdppi->field_pic_flag) {
pic_param->CurrPic.flags |=
vdppi->bottom_field_flag ? VA_PICTURE_H264_BOTTOM_FIELD : VA_PICTURE_H264_TOP_FIELD;
}
pic_param->CurrPic.TopFieldOrderCnt = vdppi->field_order_cnt[0];
pic_param->CurrPic.BottomFieldOrderCnt = vdppi->field_order_cnt[1];
// mark all pictures invalid preliminary
for (int k = 0; k < 16; k ++)
reset_va_picture_h264(&pic_param->ReferenceFrames[k]);
// reference frames
for (int k = 0; k < vdppi->num_ref_frames; k ++) {
if (VDP_INVALID_HANDLE == vdppi->referenceFrames[k].surface) {
reset_va_picture_h264(&pic_param->ReferenceFrames[k]);
continue;
}
VdpReferenceFrameH264 const *vdp_ref = &(vdppi->referenceFrames[k]);
VdpVideoSurfaceData *vdpSurfData =
handle_acquire(vdp_ref->surface, HANDLETYPE_VIDEO_SURFACE);
VAPictureH264 *va_ref = &(pic_param->ReferenceFrames[k]);
if (NULL == vdpSurfData) {
traceError("error (%s): NULL == vdpSurfData\n", __func__);
return VDP_STATUS_ERROR;
}
// take new VA surface from buffer if needed
if (VA_INVALID_SURFACE == vdpSurfData->va_surf) {
int idx = free_list_pop(decoderData->free_list, &decoderData->free_list_head);
if (-1 == idx)
return VDP_STATUS_RESOURCES;
dstSurfData->decoder = decoder;
dstSurfData->va_surf = decoderData->render_targets[idx];
dstSurfData->rt_idx = idx;
}
va_ref->picture_id = vdpSurfData->va_surf;
va_ref->frame_idx = vdp_ref->frame_idx;
va_ref->flags = vdp_ref->is_long_term ? VA_PICTURE_H264_LONG_TERM_REFERENCE
: VA_PICTURE_H264_SHORT_TERM_REFERENCE;
if (vdp_ref->top_is_reference && vdp_ref->bottom_is_reference) {
// Full frame. This block intentionally left blank. No flags set.
} else {
if (vdp_ref->top_is_reference)
va_ref->flags |= VA_PICTURE_H264_TOP_FIELD;
else
va_ref->flags |= VA_PICTURE_H264_BOTTOM_FIELD;
}
va_ref->TopFieldOrderCnt = vdp_ref->field_order_cnt[0];
va_ref->BottomFieldOrderCnt = vdp_ref->field_order_cnt[1];
handle_release(vdp_ref->surface);
}
return VDP_STATUS_OK;
}
VdpStatus
vdpDecoderQueryCapabilities(VdpDevice device, VdpDecoderProfile profile, VdpBool *is_supported,
uint32_t *max_level, uint32_t *max_macroblocks,
uint32_t *max_width, uint32_t *max_height)
{
VdpStatus err_code;
if (!is_supported || !max_level || !max_macroblocks || !max_width || !max_height)
return VDP_STATUS_INVALID_POINTER;
VdpDeviceData *deviceData = handle_acquire(device, HANDLETYPE_DEVICE);
if (NULL == deviceData)
return VDP_STATUS_INVALID_HANDLE;
*max_level = 0;
*max_macroblocks = 0;
*max_width = 0;
*max_height = 0;
if (!deviceData->va_available) {
*is_supported = 0;
err_code = VDP_STATUS_OK;
goto quit;
}
VAProfile *va_profile_list = malloc(sizeof(VAProfile) * vaMaxNumProfiles(deviceData->va_dpy));
if (NULL == va_profile_list) {
err_code = VDP_STATUS_RESOURCES;
goto quit;
}
int num_profiles;
VAStatus status = vaQueryConfigProfiles(deviceData->va_dpy, va_profile_list, &num_profiles);
if (VA_STATUS_SUCCESS != status) {
free(va_profile_list);
err_code = VDP_STATUS_ERROR;
goto quit;
}
struct {
int mpeg2_simple;
int mpeg2_main;
int h264_baseline;
int h264_main;
int h264_high;
int vc1_simple;
int vc1_main;
int vc1_advanced;
} available_profiles = {0, 0, 0, 0, 0, 0, 0, 0};
for (int k = 0; k < num_profiles; k ++) {
switch (va_profile_list[k]) {
case VAProfileMPEG2Main:
available_profiles.mpeg2_main = 0;
/* fall through */
case VAProfileMPEG2Simple:
available_profiles.mpeg2_simple = 0;
break;
case VAProfileH264High:
available_profiles.h264_high = 1;
/* fall through */
case VAProfileH264Main:
available_profiles.h264_main = 1;
/* fall through */
case VAProfileH264Baseline:
available_profiles.h264_baseline = 1;
/* fall though */
case VAProfileH264ConstrainedBaseline:
break;
case VAProfileVC1Advanced:
available_profiles.vc1_advanced = 0;
/* fall though */
case VAProfileVC1Main:
available_profiles.vc1_main = 0;
/* fall though */
case VAProfileVC1Simple:
available_profiles.vc1_simple = 0;
break;
// unhandled profiles
case VAProfileH263Baseline:
case VAProfileJPEGBaseline:
default:
// do nothing
break;
}
}
free(va_profile_list);
*is_supported = 0;
// TODO: How to determine max width and height width libva?
*max_width = 2048;
*max_height = 2048;
*max_macroblocks = 16384;
switch (profile) {
case VDP_DECODER_PROFILE_MPEG2_SIMPLE:
*is_supported = available_profiles.mpeg2_simple;
*max_level = VDP_DECODER_LEVEL_MPEG2_HL;
break;
case VDP_DECODER_PROFILE_MPEG2_MAIN:
*is_supported = available_profiles.mpeg2_main;
*max_level = VDP_DECODER_LEVEL_MPEG2_HL;
break;
case VDP_DECODER_PROFILE_H264_CONSTRAINED_BASELINE:
*is_supported = available_profiles.h264_baseline || available_profiles.h264_main;
*max_level = VDP_DECODER_LEVEL_H264_5_1;
break;
case VDP_DECODER_PROFILE_H264_BASELINE:
*is_supported = available_profiles.h264_baseline;
// TODO: Do underlying libva really support 5.1?
*max_level = VDP_DECODER_LEVEL_H264_5_1;
break;
case VDP_DECODER_PROFILE_H264_MAIN:
*is_supported = available_profiles.h264_main;
*max_level = VDP_DECODER_LEVEL_H264_5_1;
break;
case VDP_DECODER_PROFILE_H264_HIGH:
*is_supported = available_profiles.h264_high;
*max_level = VDP_DECODER_LEVEL_H264_5_1;
break;
case VDP_DECODER_PROFILE_VC1_SIMPLE:
*is_supported = available_profiles.vc1_simple;
*max_level = VDP_DECODER_LEVEL_VC1_SIMPLE_MEDIUM;
break;
case VDP_DECODER_PROFILE_VC1_MAIN:
*is_supported = available_profiles.vc1_main;
*max_level = VDP_DECODER_LEVEL_VC1_MAIN_HIGH;
break;
case VDP_DECODER_PROFILE_VC1_ADVANCED:
*is_supported = available_profiles.vc1_advanced;
*max_level = VDP_DECODER_LEVEL_VC1_ADVANCED_L4;
break;
// unsupported
case VDP_DECODER_PROFILE_MPEG1:
case VDP_DECODER_PROFILE_MPEG4_PART2_SP:
case VDP_DECODER_PROFILE_MPEG4_PART2_ASP:
case VDP_DECODER_PROFILE_DIVX4_QMOBILE:
case VDP_DECODER_PROFILE_DIVX4_MOBILE:
case VDP_DECODER_PROFILE_DIVX4_HOME_THEATER:
case VDP_DECODER_PROFILE_DIVX4_HD_1080P:
case VDP_DECODER_PROFILE_DIVX5_QMOBILE:
case VDP_DECODER_PROFILE_DIVX5_MOBILE:
case VDP_DECODER_PROFILE_DIVX5_HOME_THEATER:
case VDP_DECODER_PROFILE_DIVX5_HD_1080P:
default:
break;
}
err_code = VDP_STATUS_OK;
quit:
handle_release(device);
return err_code;
}
static
void
h264_translate_pic_param(VAPictureParameterBufferH264 *pic_param, uint32_t width, uint32_t height,
const VdpPictureInfoH264 *vdppi, uint32_t level)
{
pic_param->picture_width_in_mbs_minus1 = (width - 1) / 16;
pic_param->picture_height_in_mbs_minus1 = (height - 1) / 16;
pic_param->bit_depth_luma_minus8 = 0; // TODO: deal with more than 8 bits
pic_param->bit_depth_chroma_minus8 = 0; // same for luma
pic_param->num_ref_frames = vdppi->num_ref_frames;
#define SEQ_FIELDS(fieldname) pic_param->seq_fields.bits.fieldname
#define PIC_FIELDS(fieldname) pic_param->pic_fields.bits.fieldname
SEQ_FIELDS(chroma_format_idc) = 1; // TODO: not only YUV420
SEQ_FIELDS(residual_colour_transform_flag) = 0;
SEQ_FIELDS(gaps_in_frame_num_value_allowed_flag)= 0;
SEQ_FIELDS(frame_mbs_only_flag) = vdppi->frame_mbs_only_flag;
SEQ_FIELDS(mb_adaptive_frame_field_flag) = vdppi->mb_adaptive_frame_field_flag;
SEQ_FIELDS(direct_8x8_inference_flag) = vdppi->direct_8x8_inference_flag;
SEQ_FIELDS(MinLumaBiPredSize8x8) = (level >= 31);
SEQ_FIELDS(log2_max_frame_num_minus4) = vdppi->log2_max_frame_num_minus4;
SEQ_FIELDS(pic_order_cnt_type) = vdppi->pic_order_cnt_type;
SEQ_FIELDS(log2_max_pic_order_cnt_lsb_minus4) = vdppi->log2_max_pic_order_cnt_lsb_minus4;
SEQ_FIELDS(delta_pic_order_always_zero_flag) = vdppi->delta_pic_order_always_zero_flag;
pic_param->num_slice_groups_minus1 = 0; // TODO: vdppi->slice_count - 1; ???
pic_param->slice_group_map_type = 0; // ???
pic_param->slice_group_change_rate_minus1 = 0; // ???
pic_param->pic_init_qp_minus26 = vdppi->pic_init_qp_minus26;
pic_param->pic_init_qs_minus26 = 0; // ???
pic_param->chroma_qp_index_offset = vdppi->chroma_qp_index_offset;
pic_param->second_chroma_qp_index_offset = vdppi->second_chroma_qp_index_offset;
PIC_FIELDS(entropy_coding_mode_flag) = vdppi->entropy_coding_mode_flag;
PIC_FIELDS(weighted_pred_flag) = vdppi->weighted_pred_flag;
PIC_FIELDS(weighted_bipred_idc) = vdppi->weighted_bipred_idc;
PIC_FIELDS(transform_8x8_mode_flag) = vdppi->transform_8x8_mode_flag;
PIC_FIELDS(field_pic_flag) = vdppi->field_pic_flag;
PIC_FIELDS(constrained_intra_pred_flag) = vdppi->constrained_intra_pred_flag;
PIC_FIELDS(pic_order_present_flag) = vdppi->pic_order_present_flag;
PIC_FIELDS(deblocking_filter_control_present_flag) =
vdppi->deblocking_filter_control_present_flag;
PIC_FIELDS(redundant_pic_cnt_present_flag) = vdppi->redundant_pic_cnt_present_flag;
PIC_FIELDS(reference_pic_flag) = vdppi->is_reference;
pic_param->frame_num = vdppi->frame_num;
#undef SEQ_FIELDS
#undef PIC_FIELDS
}
static
void
h264_translate_iq_matrix(VAIQMatrixBufferH264 *iq_matrix, const VdpPictureInfoH264 *vdppi)
{
for (int j = 0; j < 6; j ++)
for (int k = 0; k < 16; k ++)
iq_matrix->ScalingList4x4[j][k] = vdppi->scaling_lists_4x4[j][k];
for (int j = 0; j < 2; j ++)
for (int k = 0; k < 64; k ++)
iq_matrix->ScalingList8x8[j][k] = vdppi->scaling_lists_8x8[j][k];
}
static
VdpStatus
vdpDecoderRender_h264(VdpDecoder decoder, VdpDecoderData *decoderData,
VdpVideoSurfaceData *dstSurfData, VdpPictureInfo const *picture_info,
uint32_t bitstream_buffer_count,
VdpBitstreamBuffer const *bitstream_buffers)
{
VdpDeviceData *deviceData = decoderData->deviceData;
VADisplay va_dpy = deviceData->va_dpy;
VAStatus status;
VdpStatus vs, err_code;
VdpPictureInfoH264 const *vdppi = (void *)picture_info;
// TODO: figure out where to get level
uint32_t level = 41;
// preparing picture parameters and IQ matrix
VABufferID pic_param_buf, iq_matrix_buf;
VAPictureParameterBufferH264 pic_param;
VAIQMatrixBufferH264 iq_matrix;
memset(&pic_param, 0, sizeof(pic_param));
vs = h264_translate_reference_frames(dstSurfData, decoder, decoderData, &pic_param, vdppi);
if (VDP_STATUS_OK != vs) {
if (VDP_STATUS_RESOURCES == vs) {
traceError("error (%s): no surfaces left in buffer\n", __func__);
err_code = VDP_STATUS_RESOURCES;
} else {
err_code = VDP_STATUS_ERROR;
}
goto quit;
}
h264_translate_pic_param(&pic_param, decoderData->width, decoderData->height, vdppi, level);
h264_translate_iq_matrix(&iq_matrix, vdppi);
glx_ctx_lock();
status = vaCreateBuffer(va_dpy, decoderData->context_id, VAPictureParameterBufferType,
sizeof(VAPictureParameterBufferH264), 1, &pic_param, &pic_param_buf);
if (VA_STATUS_SUCCESS != status) {
glx_ctx_unlock();
err_code = VDP_STATUS_ERROR;
goto quit;
}
status = vaCreateBuffer(va_dpy, decoderData->context_id, VAIQMatrixBufferType,
sizeof(VAIQMatrixBufferH264), 1, &iq_matrix, &iq_matrix_buf);
if (VA_STATUS_SUCCESS != status) {
glx_ctx_unlock();
err_code = VDP_STATUS_ERROR;
goto quit;
}
// send data to decoding hardware
status = vaBeginPicture(va_dpy, decoderData->context_id, dstSurfData->va_surf);
if (VA_STATUS_SUCCESS != status) {
glx_ctx_unlock();
err_code = VDP_STATUS_ERROR;
goto quit;
}
status = vaRenderPicture(va_dpy, decoderData->context_id, &pic_param_buf, 1);
if (VA_STATUS_SUCCESS != status) {
glx_ctx_unlock();
err_code = VDP_STATUS_ERROR;
goto quit;
}
status = vaRenderPicture(va_dpy, decoderData->context_id, &iq_matrix_buf, 1);
if (VA_STATUS_SUCCESS != status) {
glx_ctx_unlock();
err_code = VDP_STATUS_ERROR;
goto quit;
}
vaDestroyBuffer(va_dpy, pic_param_buf);
vaDestroyBuffer(va_dpy, iq_matrix_buf);
glx_ctx_unlock();
// merge bitstream buffers
int total_bitstream_bytes = 0;
for (unsigned int k = 0; k < bitstream_buffer_count; k ++)
total_bitstream_bytes += bitstream_buffers[k].bitstream_bytes;
uint8_t *merged_bitstream = malloc(total_bitstream_bytes);
if (NULL == merged_bitstream) {
err_code = VDP_STATUS_RESOURCES;
goto quit;
}
do {
unsigned char *ptr = merged_bitstream;
for (unsigned int k = 0; k < bitstream_buffer_count; k ++) {
memcpy(ptr, bitstream_buffers[k].bitstream, bitstream_buffers[k].bitstream_bytes);
ptr += bitstream_buffers[k].bitstream_bytes;
}
} while(0);
// Slice parameters
// All slice data have been merged into one continuous buffer. But we must supply
// slices one by one to the hardware decoder, so we need to delimit them. VDPAU
// requires bitstream buffers to include slice start code (0x00 0x00 0x01). Those
// will be used to calculate offsets and sizes of slice data in code below.
rbsp_state_t st_g; // reference, global state
rbsp_attach_buffer(&st_g, merged_bitstream, total_bitstream_bytes);
int nal_offset = rbsp_navigate_to_nal_unit(&st_g);
if (nal_offset < 0) {
traceError("error (%s): no NAL header\n", __func__);
err_code = VDP_STATUS_ERROR;
goto quit;
}
do {
VASliceParameterBufferH264 sp_h264;
memset(&sp_h264, 0, sizeof(VASliceParameterBufferH264));
// make a copy of global rbsp state for using in slice header parser
rbsp_state_t st = rbsp_copy_state(&st_g);
rbsp_reset_bit_counter(&st);
int nal_offset_next = rbsp_navigate_to_nal_unit(&st_g);
// calculate end of current slice. Note (-3). It's slice start code length.
const unsigned int end_pos = (nal_offset_next > 0) ? (nal_offset_next - 3)
: total_bitstream_bytes;
sp_h264.slice_data_size = end_pos - nal_offset;
sp_h264.slice_data_offset = 0;
sp_h264.slice_data_flag = VA_SLICE_DATA_FLAG_ALL;
// TODO: this may be not entirely true for YUV444
// but if we limiting to YUV420, that's ok
int ChromaArrayType = pic_param.seq_fields.bits.chroma_format_idc;
// parse slice header and use its data to fill slice parameter buffer
parse_slice_header(&st, &pic_param, ChromaArrayType, vdppi->num_ref_idx_l0_active_minus1,
vdppi->num_ref_idx_l1_active_minus1, &sp_h264);
VABufferID slice_parameters_buf;
glx_ctx_lock();
status = vaCreateBuffer(va_dpy, decoderData->context_id, VASliceParameterBufferType,
sizeof(VASliceParameterBufferH264), 1, &sp_h264, &slice_parameters_buf);
if (VA_STATUS_SUCCESS != status) {
glx_ctx_unlock();
err_code = VDP_STATUS_ERROR;
goto quit;
}
status = vaRenderPicture(va_dpy, decoderData->context_id, &slice_parameters_buf, 1);
if (VA_STATUS_SUCCESS != status) {
glx_ctx_unlock();
err_code = VDP_STATUS_ERROR;
goto quit;
}
VABufferID slice_buf;
status = vaCreateBuffer(va_dpy, decoderData->context_id, VASliceDataBufferType,
sp_h264.slice_data_size, 1, merged_bitstream + nal_offset, &slice_buf);
if (VA_STATUS_SUCCESS != status) {
glx_ctx_unlock();
err_code = VDP_STATUS_ERROR;
goto quit;
}
status = vaRenderPicture(va_dpy, decoderData->context_id, &slice_buf, 1);
if (VA_STATUS_SUCCESS != status) {
glx_ctx_unlock();
err_code = VDP_STATUS_ERROR;
goto quit;
}
vaDestroyBuffer(va_dpy, slice_parameters_buf);
vaDestroyBuffer(va_dpy, slice_buf);
glx_ctx_unlock();
if (nal_offset_next < 0) // nal_offset_next equals -1 when there is no slice
break; // start code found. Thus that was the final slice.
nal_offset = nal_offset_next;
} while (1);
glx_ctx_lock();
status = vaEndPicture(va_dpy, decoderData->context_id);
glx_ctx_unlock();
if (VA_STATUS_SUCCESS != status) {
err_code = VDP_STATUS_ERROR;
goto quit;
}
free(merged_bitstream);
dstSurfData->sync_va_to_glx = 1;
err_code = VDP_STATUS_OK;
quit:
return err_code;
}
VdpStatus
vdpDecoderRender(VdpDecoder decoder, VdpVideoSurface target,
VdpPictureInfo const *picture_info, uint32_t bitstream_buffer_count,
VdpBitstreamBuffer const *bitstream_buffers)
{
VdpStatus err_code;
if (!picture_info || !bitstream_buffers)
return VDP_STATUS_INVALID_POINTER;
VdpDecoderData *decoderData = handle_acquire(decoder, HANDLETYPE_DECODER);
VdpVideoSurfaceData *dstSurfData = handle_acquire(target, HANDLETYPE_VIDEO_SURFACE);
if (NULL == decoderData || NULL == dstSurfData) {
err_code = VDP_STATUS_INVALID_HANDLE;
goto quit;
}
if (decoderData->profile == VDP_DECODER_PROFILE_H264_CONSTRAINED_BASELINE ||
decoderData->profile == VDP_DECODER_PROFILE_H264_BASELINE ||
decoderData->profile == VDP_DECODER_PROFILE_H264_MAIN ||
decoderData->profile == VDP_DECODER_PROFILE_H264_HIGH)
{
// TODO: check exit code
vdpDecoderRender_h264(decoder, decoderData, dstSurfData, picture_info,
bitstream_buffer_count, bitstream_buffers);
} else {
traceError("error (%s): no implementation for profile %s\n", __func__,
reverse_decoder_profile(decoderData->profile));
err_code = VDP_STATUS_NO_IMPLEMENTATION;
goto quit;
}
err_code = VDP_STATUS_OK;
quit:
handle_release(decoder);
handle_release(target);
return err_code;
}
libvdpau-va-gl-0.3.6/src/api-video-mixer.c 0000664 0000000 0000000 00000035655 12665616006 0020312 0 ustar 00root root 0000000 0000000 /*
* Copyright 2013-2014 Rinat Ibragimov
*
* This file is part of libvdpau-va-gl
*
* libvdpau-va-gl is distributed under the terms of the LGPLv3. See COPYING for details.
*/
#define GL_GLEXT_PROTOTYPES
#include "ctx-stack.h"
#include
#include
#include
#include
#include
#include
#include "api.h"
#include "trace.h"
static
void
_free_video_mixer_pixmaps(VdpVideoMixerData *mixerData)
{
Display *dpy = mixerData->deviceData->display;
if (mixerData->glx_pixmap != None) {
glXDestroyGLXPixmap(dpy, mixerData->glx_pixmap);
mixerData->glx_pixmap = None;
}
if (mixerData->pixmap != None) {
XFreePixmap(dpy, mixerData->pixmap);
mixerData->pixmap = None;
}
}
static
void
_render_va_surf_to_texture(VdpVideoMixerData *videoMixerData, VdpVideoSurfaceData *srcSurfData)
{
VdpDeviceData *deviceData = videoMixerData->deviceData;
Display *dpy = deviceData->display;
if (srcSurfData->width != videoMixerData->pixmap_width ||
srcSurfData->height != videoMixerData->pixmap_height)
{
_free_video_mixer_pixmaps(videoMixerData);
videoMixerData->pixmap = XCreatePixmap(dpy, deviceData->root, srcSurfData->width,
srcSurfData->height, deviceData->color_depth);
int fbconfig_attrs[] = {
GLX_DRAWABLE_TYPE, GLX_PIXMAP_BIT,
GLX_RENDER_TYPE, GLX_RGBA_BIT,
GLX_X_RENDERABLE, GL_TRUE,
GLX_Y_INVERTED_EXT, GL_TRUE,
GLX_RED_SIZE, 8,
GLX_GREEN_SIZE, 8,
GLX_BLUE_SIZE, 8,
GLX_ALPHA_SIZE, 8,
GLX_DEPTH_SIZE, 16,
GLX_BIND_TO_TEXTURE_RGBA_EXT, GL_TRUE,
GL_NONE
};
int nconfigs;
GLXFBConfig *fbconfig = glXChooseFBConfig(deviceData->display, deviceData->screen,
fbconfig_attrs, &nconfigs);
int pixmap_attrs[] = {
GLX_TEXTURE_TARGET_EXT, GLX_TEXTURE_2D_EXT,
GLX_MIPMAP_TEXTURE_EXT, GL_FALSE,
GLX_TEXTURE_FORMAT_EXT, GLX_TEXTURE_FORMAT_RGB_EXT,
GL_NONE
};
videoMixerData->glx_pixmap = glXCreatePixmap(dpy, fbconfig[0], videoMixerData->pixmap,
pixmap_attrs);
free(fbconfig);
videoMixerData->pixmap_width = srcSurfData->width;
videoMixerData->pixmap_height = srcSurfData->height;
}
glBindTexture(GL_TEXTURE_2D, videoMixerData->tex_id);
deviceData->fn.glXBindTexImageEXT(deviceData->display, videoMixerData->glx_pixmap,
GLX_FRONT_EXT, NULL);
XSync(deviceData->display, False);
vaPutSurface(deviceData->va_dpy, srcSurfData->va_surf, videoMixerData->pixmap,
0, 0, srcSurfData->width, srcSurfData->height,
0, 0, srcSurfData->width, srcSurfData->height,
NULL, 0, VA_FRAME_PICTURE);
glBindFramebuffer(GL_FRAMEBUFFER, srcSurfData->fbo_id);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(0, srcSurfData->width, 0, srcSurfData->height, -1.0, 1.0);
glViewport(0, 0, srcSurfData->width, srcSurfData->height);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glMatrixMode(GL_TEXTURE);
glLoadIdentity();
glDisable(GL_BLEND);
glBegin(GL_QUADS);
glTexCoord2f(0, 0); glVertex2f(0, 0);
glTexCoord2f(1, 0); glVertex2f(srcSurfData->width, 0);
glTexCoord2f(1, 1); glVertex2f(srcSurfData->width, srcSurfData->height);
glTexCoord2f(0, 1); glVertex2f(0, srcSurfData->height);
glEnd();
glFinish();
deviceData->fn.glXReleaseTexImageEXT(deviceData->display, videoMixerData->glx_pixmap,
GLX_FRONT_EXT);
glBindFramebuffer(GL_FRAMEBUFFER, 0);
}
VdpStatus
vdpVideoMixerCreate(VdpDevice device, uint32_t feature_count,
VdpVideoMixerFeature const *features, uint32_t parameter_count,
VdpVideoMixerParameter const *parameters,
void const *const *parameter_values, VdpVideoMixer *mixer)
{
VdpStatus err_code;
if (!mixer)
return VDP_STATUS_INVALID_POINTER;
(void)feature_count; (void)features; // TODO: mixer features
(void)parameter_count; (void)parameters; (void)parameter_values; // TODO: mixer parameters
VdpDeviceData *deviceData = handle_acquire(device, HANDLETYPE_DEVICE);
if (NULL == deviceData)
return VDP_STATUS_INVALID_HANDLE;
VdpVideoMixerData *data = calloc(1, sizeof(VdpVideoMixerData));
if (NULL == data) {
err_code = VDP_STATUS_RESOURCES;
goto quit;
}
data->type = HANDLETYPE_VIDEO_MIXER;
data->device = device;
data->deviceData = deviceData;
data->pixmap = None;
data->glx_pixmap = None;
data->pixmap_width = (uint32_t)(-1); // set knowingly invalid geometry
data->pixmap_height = (uint32_t)(-1); // to force pixmap recreation
glx_ctx_push_thread_local(deviceData);
glGenTextures(1, &data->tex_id);
glBindTexture(GL_TEXTURE_2D, data->tex_id);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
GLenum gl_error = glGetError();
glx_ctx_pop();
if (GL_NO_ERROR != gl_error) {
traceError("error (%s): gl error %d\n", __func__, gl_error);
err_code = VDP_STATUS_ERROR;
free(data);
goto quit;
}
ref_device(deviceData);
*mixer = handle_insert(data);
err_code = VDP_STATUS_OK;
quit:
handle_release(device);
return err_code;
}
VdpStatus
vdpVideoMixerDestroy(VdpVideoMixer mixer)
{
VdpStatus err_code;
VdpVideoMixerData *videoMixerData = handle_acquire(mixer, HANDLETYPE_VIDEO_MIXER);
if (NULL == videoMixerData)
return VDP_STATUS_INVALID_HANDLE;
VdpDeviceData *deviceData = videoMixerData->deviceData;
glx_ctx_lock();
_free_video_mixer_pixmaps(videoMixerData);
glx_ctx_unlock();
glx_ctx_push_thread_local(deviceData);
glDeleteTextures(1, &videoMixerData->tex_id);
GLenum gl_error = glGetError();
glx_ctx_pop();
if (GL_NO_ERROR != gl_error) {
traceError("error (%s): gl error %d\n", __func__, gl_error);
err_code = VDP_STATUS_ERROR;
goto quit;
}
err_code = VDP_STATUS_OK;
quit:
unref_device(deviceData);
handle_expunge(mixer);
free(videoMixerData);
return err_code;
}
VdpStatus
vdpVideoMixerGetAttributeValues(VdpVideoMixer mixer, uint32_t attribute_count,
VdpVideoMixerAttribute const *attributes,
void *const *attribute_values)
{
(void)mixer; (void)attribute_count; (void)attributes; (void)attribute_values;
return VDP_STATUS_NO_IMPLEMENTATION;
}
VdpStatus
vdpVideoMixerGetFeatureEnables(VdpVideoMixer mixer, uint32_t feature_count,
VdpVideoMixerFeature const *features, VdpBool *feature_enables)
{
(void)mixer; (void)feature_count; (void)features; (void)feature_enables;
return VDP_STATUS_NO_IMPLEMENTATION;
}
VdpStatus
vdpVideoMixerGetFeatureSupport(VdpVideoMixer mixer, uint32_t feature_count,
VdpVideoMixerFeature const *features, VdpBool *feature_supports)
{
(void)mixer; (void)feature_count; (void)features; (void)feature_supports;
return VDP_STATUS_NO_IMPLEMENTATION;
}
VdpStatus
vdpVideoMixerGetParameterValues(VdpVideoMixer mixer, uint32_t parameter_count,
VdpVideoMixerParameter const *parameters,
void *const *parameter_values)
{
(void)mixer; (void)parameter_count; (void)parameters; (void)parameter_values;
return VDP_STATUS_NO_IMPLEMENTATION;
}
VdpStatus
vdpVideoMixerQueryAttributeSupport(VdpDevice device, VdpVideoMixerAttribute attribute,
VdpBool *is_supported)
{
(void)device; (void)attribute; (void)is_supported;
return VDP_STATUS_NO_IMPLEMENTATION;
}
VdpStatus
vdpVideoMixerQueryAttributeValueRange(VdpDevice device, VdpVideoMixerAttribute attribute,
void *min_value, void *max_value)
{
(void)device; (void)attribute; (void)min_value; (void)max_value;
return VDP_STATUS_NO_IMPLEMENTATION;
}
VdpStatus
vdpVideoMixerQueryFeatureSupport(VdpDevice device, VdpVideoMixerFeature feature,
VdpBool *is_supported)
{
(void)device; (void)feature; (void)is_supported;
return VDP_STATUS_NO_IMPLEMENTATION;
}
VdpStatus
vdpVideoMixerQueryParameterSupport(VdpDevice device, VdpVideoMixerParameter parameter,
VdpBool *is_supported)
{
(void)device; (void)parameter; (void)is_supported;
return VDP_STATUS_NO_IMPLEMENTATION;
}
VdpStatus
vdpVideoMixerQueryParameterValueRange(VdpDevice device, VdpVideoMixerParameter parameter,
void *min_value, void *max_value)
{
uint32_t uint32_value;
switch (parameter) {
case VDP_VIDEO_MIXER_PARAMETER_VIDEO_SURFACE_WIDTH: // TODO: get actual limits
uint32_value = 16;
memcpy(min_value, &uint32_value, sizeof(uint32_value));
uint32_value = 4096;
memcpy(max_value, &uint32_value, sizeof(uint32_value));
return VDP_STATUS_OK;
case VDP_VIDEO_MIXER_PARAMETER_VIDEO_SURFACE_HEIGHT: // TODO: get actual limits
uint32_value = 16;
memcpy(min_value, &uint32_value, sizeof(uint32_value));
uint32_value = 4096;
memcpy(max_value, &uint32_value, sizeof(uint32_value));
return VDP_STATUS_OK;
case VDP_VIDEO_MIXER_PARAMETER_CHROMA_TYPE: // TODO
case VDP_VIDEO_MIXER_PARAMETER_LAYERS: // TODO
default:
return VDP_STATUS_NO_IMPLEMENTATION;
}
}
VdpStatus
vdpVideoMixerRender(VdpVideoMixer mixer, VdpOutputSurface background_surface,
VdpRect const *background_source_rect,
VdpVideoMixerPictureStructure current_picture_structure,
uint32_t video_surface_past_count, VdpVideoSurface const *video_surface_past,
VdpVideoSurface video_surface_current, uint32_t video_surface_future_count,
VdpVideoSurface const *video_surface_future, VdpRect const *video_source_rect,
VdpOutputSurface destination_surface, VdpRect const *destination_rect,
VdpRect const *destination_video_rect, uint32_t layer_count,
VdpLayer const *layers)
{
VdpStatus err_code;
(void)mixer; // TODO: mixer should be used to get mixing parameters
// TODO: current implementation ignores previous and future surfaces, using only current.
// Is that acceptable for interlaced video? Will VAAPI handle deinterlacing?
(void)background_surface; // TODO: background_surface. Is it safe to just ignore it?
(void)background_source_rect;
(void)current_picture_structure;
(void)video_surface_past_count; (void)video_surface_past;
(void)video_surface_future_count; (void)video_surface_future;
(void)layer_count; (void)layers;
VdpVideoMixerData *mixerData = handle_acquire(mixer, HANDLETYPE_VIDEO_MIXER);
VdpVideoSurfaceData *srcSurfData =
handle_acquire(video_surface_current, HANDLETYPE_VIDEO_SURFACE);
VdpOutputSurfaceData *dstSurfData =
handle_acquire(destination_surface, HANDLETYPE_OUTPUT_SURFACE);
if (NULL == mixerData || NULL == srcSurfData || NULL == dstSurfData) {
err_code = VDP_STATUS_INVALID_HANDLE;
goto quit;
}
if (srcSurfData->deviceData != dstSurfData->deviceData ||
srcSurfData->deviceData != mixerData->deviceData)
{
err_code = VDP_STATUS_HANDLE_DEVICE_MISMATCH;
goto quit;
}
VdpDeviceData *deviceData = srcSurfData->deviceData;
VdpRect srcVideoRect = {0, 0, srcSurfData->width, srcSurfData->height};
if (video_source_rect)
srcVideoRect = *video_source_rect;
VdpRect dstRect = {0, 0, dstSurfData->width, dstSurfData->height};
if (destination_rect)
dstRect = *destination_rect;
VdpRect dstVideoRect = srcVideoRect;
if (destination_video_rect)
dstVideoRect = *destination_video_rect;
// TODO: dstRect should clip dstVideoRect
glx_ctx_push_thread_local(deviceData);
if (srcSurfData->sync_va_to_glx) {
_render_va_surf_to_texture(mixerData, srcSurfData);
srcSurfData->sync_va_to_glx = 0;
}
glBindFramebuffer(GL_FRAMEBUFFER, dstSurfData->fbo_id);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(0, dstSurfData->width, 0, dstSurfData->height, -1.0f, 1.0f);
glViewport(0, 0, dstSurfData->width, dstSurfData->height);
glDisable(GL_BLEND);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glMatrixMode(GL_TEXTURE);
glLoadIdentity();
glScalef(1.0f/srcSurfData->width, 1.0f/srcSurfData->height, 1.0f);
// Clear dstRect area
glDisable(GL_TEXTURE_2D);
glColor4f(0, 0, 0, 1);
glBegin(GL_QUADS);
glVertex2f(dstRect.x0, dstRect.y0);
glVertex2f(dstRect.x1, dstRect.y0);
glVertex2f(dstRect.x1, dstRect.y1);
glVertex2f(dstRect.x0, dstRect.y1);
glEnd();
// Render (maybe scaled) data from video surface
glEnable(GL_TEXTURE_2D);
glBindTexture(GL_TEXTURE_2D, srcSurfData->tex_id);
glColor4f(1, 1, 1, 1);
glBegin(GL_QUADS);
glTexCoord2i(srcVideoRect.x0, srcVideoRect.y0);
glVertex2f(dstVideoRect.x0, dstVideoRect.y0);
glTexCoord2i(srcVideoRect.x1, srcVideoRect.y0);
glVertex2f(dstVideoRect.x1, dstVideoRect.y0);
glTexCoord2i(srcVideoRect.x1, srcVideoRect.y1);
glVertex2f(dstVideoRect.x1, dstVideoRect.y1);
glTexCoord2i(srcVideoRect.x0, srcVideoRect.y1);
glVertex2f(dstVideoRect.x0, dstVideoRect.y1);
glEnd();
glFinish();
GLenum gl_error = glGetError();
glx_ctx_pop();
if (GL_NO_ERROR != gl_error) {
traceError("error (%s): gl error %d\n", __func__, gl_error);
err_code = VDP_STATUS_ERROR;
goto quit;
}
err_code = VDP_STATUS_OK;
quit:
handle_release(video_surface_current);
handle_release(destination_surface);
handle_release(mixer);
return err_code;
}
VdpStatus
vdpVideoMixerSetAttributeValues(VdpVideoMixer mixer, uint32_t attribute_count,
VdpVideoMixerAttribute const *attributes,
void const *const *attribute_values)
{
(void)mixer; (void)attribute_count; (void)attributes; (void)attribute_values;
return VDP_STATUS_OK;
}
VdpStatus
vdpVideoMixerSetFeatureEnables(VdpVideoMixer mixer, uint32_t feature_count,
VdpVideoMixerFeature const *features, VdpBool const *feature_enables)
{
(void)mixer; (void)feature_count; (void)features; (void)feature_enables;
return VDP_STATUS_OK;
}
libvdpau-va-gl-0.3.6/src/api-video-surface.c 0000664 0000000 0000000 00000051417 12665616006 0020610 0 ustar 00root root 0000000 0000000 /*
* Copyright 2013-2014 Rinat Ibragimov
*
* This file is part of libvdpau-va-gl
*
* libvdpau-va-gl is distributed under the terms of the LGPLv3. See COPYING for details.
*/
#define GL_GLEXT_PROTOTYPES
#include "ctx-stack.h"
#include
#include
#include
#include "compat.h"
#include "shaders.h"
#include
#include
#include
#include
#include "api.h"
#include "trace.h"
VdpStatus
vdpVideoSurfaceCreate(VdpDevice device, VdpChromaType chroma_type, uint32_t width, uint32_t height,
VdpVideoSurface *surface)
{
VdpStatus err_code;
if (!surface)
return VDP_STATUS_INVALID_POINTER;
if (chroma_type != VDP_CHROMA_TYPE_420 &&
chroma_type != VDP_CHROMA_TYPE_422 &&
chroma_type != VDP_CHROMA_TYPE_444)
{
return VDP_STATUS_INVALID_CHROMA_TYPE;
}
VdpDeviceData *deviceData = handle_acquire(device, HANDLETYPE_DEVICE);
if (NULL == deviceData)
return VDP_STATUS_INVALID_HANDLE;
VdpVideoSurfaceData *data = calloc(1, sizeof(VdpVideoSurfaceData));
if (NULL == data) {
err_code = VDP_STATUS_RESOURCES;
goto quit;
}
data->type = HANDLETYPE_VIDEO_SURFACE;
data->device = device;
data->deviceData = deviceData;
data->chroma_type = chroma_type;
data->width = width;
data->height = height;
switch (chroma_type) {
case VDP_CHROMA_TYPE_420:
data->chroma_width = ((width + 1) & (~1u)) / 2;
data->chroma_height = ((height + 1) & (~1u)) / 2;
data->stride = (width + 0xfu) & (~0xfu);
break;
case VDP_CHROMA_TYPE_422:
data->chroma_width = ((width + 1) & (~1u)) / 2;
data->chroma_height = height;
data->stride = (width + 2 * data->chroma_width + 0xfu) & (~0xfu);
break;
case VDP_CHROMA_TYPE_444:
data->chroma_width = width;
data->chroma_height = height;
data->stride = (4 * width + 0xfu) & (~0xfu);
break;
}
data->chroma_stride = (data->chroma_width + 0xfu) & (~0xfu);
data->va_surf = VA_INVALID_SURFACE;
data->tex_id = 0;
data->sync_va_to_glx = 0;
data->decoder = VDP_INVALID_HANDLE;
data->y_plane = NULL;
data->u_plane = NULL;
data->v_plane = NULL;
glx_ctx_push_thread_local(deviceData);
glGenTextures(1, &data->tex_id);
glBindTexture(GL_TEXTURE_2D, data->tex_id);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, data->width, data->height, 0,
GL_BGRA, GL_UNSIGNED_BYTE, NULL);
glGenFramebuffers(1, &data->fbo_id);
glBindFramebuffer(GL_FRAMEBUFFER, data->fbo_id);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, data->tex_id, 0);
GLenum gl_status = glCheckFramebufferStatus(GL_FRAMEBUFFER);
if (GL_FRAMEBUFFER_COMPLETE != gl_status) {
traceError("error (%s): framebuffer not ready, %d, %s\n", __func__, gl_status,
gluErrorString(gl_status));
glx_ctx_pop();
free(data);
err_code = VDP_STATUS_ERROR;
goto quit;
}
glFinish();
GLenum gl_error = glGetError();
glx_ctx_pop();
if (GL_NO_ERROR != gl_error) {
traceError("error (%s): gl error %d\n", __func__, gl_error);
free(data);
err_code = VDP_STATUS_ERROR;
goto quit;
}
// no VA surface creation here. Actual pool of VA surfaces should be allocated already
// by VdpDecoderCreate. VdpDecoderCreate will update ->va_surf field as needed.
ref_device(deviceData);
*surface = handle_insert(data);
err_code = VDP_STATUS_OK;
quit:
handle_release(device);
return err_code;
}
VdpStatus
vdpVideoSurfaceDestroy(VdpVideoSurface surface)
{
VdpVideoSurfaceData *videoSurfData = handle_acquire(surface, HANDLETYPE_VIDEO_SURFACE);
if (NULL == videoSurfData)
return VDP_STATUS_INVALID_HANDLE;
VdpDeviceData *deviceData = videoSurfData->deviceData;
glx_ctx_push_thread_local(deviceData);
glDeleteTextures(1, &videoSurfData->tex_id);
GLenum gl_error = glGetError();
glx_ctx_pop();
if (GL_NO_ERROR != gl_error) {
traceError("error (%s): gl error %d\n", __func__, gl_error);
handle_release(surface);
return VDP_STATUS_ERROR;
}
if (deviceData->va_available) {
// return VA surface to the free list
if (videoSurfData->decoder != VDP_INVALID_HANDLE) {
VdpDecoderData *dd = handle_acquire(videoSurfData->decoder, HANDLETYPE_DECODER);
if (NULL != dd) {
free_list_push(dd->free_list, &dd->free_list_head, videoSurfData->rt_idx);
handle_release(videoSurfData->decoder);
}
}
// .va_surf will be freed in VdpDecoderDestroy
}
if (videoSurfData->y_plane)
free(videoSurfData->y_plane);
if (videoSurfData->u_plane)
free(videoSurfData->u_plane);
// do not free videoSurfData->v_plane, it's just pointer into the middle of u_plane
unref_device(deviceData);
handle_expunge(surface);
free(videoSurfData);
return VDP_STATUS_OK;
}
VdpStatus
vdpVideoSurfaceGetBitsYCbCr(VdpVideoSurface surface, VdpYCbCrFormat destination_ycbcr_format,
void *const *destination_data, uint32_t const *destination_pitches)
{
VdpStatus err_code;
if (!destination_data || !destination_pitches)
return VDP_STATUS_INVALID_POINTER;
VdpVideoSurfaceData *srcSurfData = handle_acquire(surface, HANDLETYPE_VIDEO_SURFACE);
if (NULL == srcSurfData)
return VDP_STATUS_INVALID_HANDLE;
VdpDeviceData *deviceData = srcSurfData->deviceData;
VADisplay va_dpy = deviceData->va_dpy;
if (deviceData->va_available) {
VAImage q;
vaDeriveImage(va_dpy, srcSurfData->va_surf, &q);
if (VA_FOURCC('N', 'V', '1', '2') == q.format.fourcc &&
VDP_YCBCR_FORMAT_NV12 == destination_ycbcr_format)
{
uint8_t *img_data;
vaMapBuffer(va_dpy, q.buf, (void **)&img_data);
if (destination_pitches[0] == q.pitches[0] &&
destination_pitches[1] == q.pitches[1])
{
const uint32_t sz = (uint32_t)q.width * (uint32_t)q.height;
memcpy(destination_data[0], img_data + q.offsets[0], sz);
memcpy(destination_data[1], img_data + q.offsets[1], sz / 2);
} else {
uint8_t *src = img_data + q.offsets[0];
uint8_t *dst = destination_data[0];
for (unsigned int y = 0; y < q.height; y ++) { // Y plane
memcpy (dst, src, q.width);
src += q.pitches[0];
dst += destination_pitches[0];
}
src = img_data + q.offsets[1];
dst = destination_data[1];
for (unsigned int y = 0; y < q.height / 2; y ++) { // UV plane
memcpy(dst, src, q.width); // q.width/2 samples of U and V each, hence q.width
src += q.pitches[1];
dst += destination_pitches[1];
}
}
vaUnmapBuffer(va_dpy, q.buf);
} else if (VA_FOURCC('N', 'V', '1', '2') == q.format.fourcc &&
VDP_YCBCR_FORMAT_YV12 == destination_ycbcr_format)
{
uint8_t *img_data;
vaMapBuffer(va_dpy, q.buf, (void **)&img_data);
// Y plane
if (destination_pitches[0] == q.pitches[0]) {
const uint32_t sz = (uint32_t)q.width * (uint32_t)q.height;
memcpy(destination_data[0], img_data + q.offsets[0], sz);
} else {
uint8_t *src = img_data + q.offsets[0];
uint8_t *dst = destination_data[0];
for (unsigned int y = 0; y < q.height; y ++) {
memcpy (dst, src, q.width);
src += q.pitches[0];
dst += destination_pitches[0];
}
}
// unpack mixed UV to separate planes
for (unsigned int y = 0; y < q.height/2; y ++) {
uint8_t *src = img_data + q.offsets[1] + y * q.pitches[1];
uint8_t *dst_u = destination_data[1] + y * destination_pitches[1];
uint8_t *dst_v = destination_data[2] + y * destination_pitches[2];
for (unsigned int x = 0; x < q.width/2; x++) {
*dst_v++ = *src++;
*dst_u++ = *src++;
}
}
vaUnmapBuffer(va_dpy, q.buf);
} else {
const char *c = (const char *)&q.format.fourcc;
traceError("error (%s): not implemented conversion VA FOURCC %c%c%c%c -> %s\n",
__func__, *c, *(c+1), *(c+2), *(c+3),
reverse_ycbcr_format(destination_ycbcr_format));
vaDestroyImage(va_dpy, q.image_id);
err_code = VDP_STATUS_INVALID_Y_CB_CR_FORMAT;
goto quit;
}
vaDestroyImage(va_dpy, q.image_id);
} else {
// software fallback
traceError("error (%s): not implemented software fallback\n", __func__);
err_code = VDP_STATUS_ERROR;
goto quit;
}
GLenum gl_error = glGetError();
if (GL_NO_ERROR != gl_error) {
traceError("error (%s): gl error %d\n", __func__, gl_error);
err_code = VDP_STATUS_ERROR;
goto quit;
}
err_code = VDP_STATUS_OK;
quit:
handle_release(surface);
return err_code;
}
VdpStatus
vdpVideoSurfaceGetParameters(VdpVideoSurface surface, VdpChromaType *chroma_type,
uint32_t *width, uint32_t *height)
{
if (!chroma_type || !width || !height)
return VDP_STATUS_INVALID_POINTER;
VdpVideoSurfaceData *videoSurf = handle_acquire(surface, HANDLETYPE_VIDEO_SURFACE);
if (NULL == videoSurf)
return VDP_STATUS_INVALID_HANDLE;
*chroma_type = videoSurf->chroma_type;
*width = videoSurf->width;
*height = videoSurf->height;
handle_release(surface);
return VDP_STATUS_OK;
}
static
int
vdpau_ycbcr_to_av_pixfmt(int fmt)
{
switch (fmt) {
case VDP_YCBCR_FORMAT_NV12: return AV_PIX_FMT_NV12;
case VDP_YCBCR_FORMAT_YV12: return AV_PIX_FMT_YUV420P;
case VDP_YCBCR_FORMAT_UYVY: return AV_PIX_FMT_UYVY422;
case VDP_YCBCR_FORMAT_YUYV: return AV_PIX_FMT_YUYV422;
case VDP_YCBCR_FORMAT_Y8U8V8A8: return AV_PIX_FMT_NONE;
case VDP_YCBCR_FORMAT_V8U8Y8A8: return AV_PIX_FMT_NONE;
default: return AV_PIX_FMT_NONE;
}
}
static
VdpStatus
_video_surface_ensure_allocated(VdpVideoSurfaceData *surf)
{
const uint32_t chroma_plane_size =
(surf->chroma_stride * surf->chroma_height + 0xfu) & (~0xfu);
if (surf->y_plane)
return VDP_STATUS_OK;
switch (surf->chroma_type) {
case VDP_CHROMA_TYPE_420:
surf->y_plane = malloc(surf->stride * surf->height);
if (!surf->y_plane)
return VDP_STATUS_RESOURCES;
surf->u_plane = malloc(chroma_plane_size * 2);
if (!surf->u_plane) {
free(surf->y_plane);
return VDP_STATUS_RESOURCES;
}
surf->v_plane = surf->u_plane + chroma_plane_size;
return VDP_STATUS_OK;
case VDP_CHROMA_TYPE_422:
surf->y_plane = malloc(surf->stride * surf->height);
if (!surf->y_plane)
return VDP_STATUS_RESOURCES;
surf->u_plane = surf->v_plane = NULL;
return VDP_STATUS_OK;
case VDP_CHROMA_TYPE_444:
surf->y_plane = malloc(surf->stride * surf->height);
if (!surf->y_plane)
return VDP_STATUS_RESOURCES;
surf->u_plane = surf->v_plane = NULL;
return VDP_STATUS_OK;
default:
return VDP_STATUS_INVALID_CHROMA_TYPE;
}
}
static
VdpStatus
vdpVideoSurfacePutBitsYCbCr_swscale(VdpVideoSurface surface, VdpYCbCrFormat source_ycbcr_format,
void const *const *source_data, uint32_t const *source_pitches)
{
VdpStatus err_code;
// TODO: implement this
VdpVideoSurfaceData *dstSurfData = handle_acquire(surface, HANDLETYPE_VIDEO_SURFACE);
// TODO: remove following (void)'s
(void)vdpau_ycbcr_to_av_pixfmt;
(void)source_pitches;
(void)source_data;
if (NULL == dstSurfData)
return VDP_STATUS_INVALID_HANDLE;
// sanity check
switch (source_ycbcr_format) {
case VDP_YCBCR_FORMAT_NV12:
// fall through
case VDP_YCBCR_FORMAT_YV12:
if (dstSurfData->chroma_type != VDP_CHROMA_TYPE_420) {
err_code = VDP_STATUS_INVALID_Y_CB_CR_FORMAT;
goto err;
}
break;
case VDP_YCBCR_FORMAT_UYVY:
// fall through
case VDP_YCBCR_FORMAT_YUYV:
if (dstSurfData->chroma_type != VDP_CHROMA_TYPE_422) {
err_code = VDP_STATUS_INVALID_Y_CB_CR_FORMAT;
goto err;
}
break;
case VDP_YCBCR_FORMAT_Y8U8V8A8:
// fall through
case VDP_YCBCR_FORMAT_V8U8Y8A8:
if (dstSurfData->chroma_type != VDP_CHROMA_TYPE_444) {
err_code = VDP_STATUS_INVALID_Y_CB_CR_FORMAT;
goto err;
}
break;
default:
err_code = VDP_STATUS_INVALID_Y_CB_CR_FORMAT;
goto err;
}
_video_surface_ensure_allocated(dstSurfData);
dstSurfData->format = source_ycbcr_format;
switch (source_ycbcr_format) {
case VDP_YCBCR_FORMAT_NV12:
case VDP_YCBCR_FORMAT_YV12: // 420
case VDP_YCBCR_FORMAT_UYVY: // 422
case VDP_YCBCR_FORMAT_YUYV: // 422
case VDP_YCBCR_FORMAT_Y8U8V8A8: // 444
case VDP_YCBCR_FORMAT_V8U8Y8A8: // 444
break;
}
err_code = VDP_STATUS_OK;
err:
handle_release(surface);
return err_code;
}
static
VdpStatus
vdpVideoSurfacePutBitsYCbCr_glsl(VdpVideoSurface surface, VdpYCbCrFormat source_ycbcr_format,
void const *const *source_data, uint32_t const *source_pitches)
{
VdpStatus err_code;
if (!source_data || !source_pitches)
return VDP_STATUS_INVALID_POINTER;
// TODO: implement VDP_YCBCR_FORMAT_UYVY
// TODO: implement VDP_YCBCR_FORMAT_YUYV
// TODO: implement VDP_YCBCR_FORMAT_Y8U8V8A8
// TODO: implement VDP_YCBCR_FORMAT_V8U8Y8A8
VdpVideoSurfaceData *dstSurfData = handle_acquire(surface, HANDLETYPE_VIDEO_SURFACE);
if (NULL == dstSurfData)
return VDP_STATUS_INVALID_HANDLE;
VdpDeviceData *deviceData = dstSurfData->deviceData;
switch (source_ycbcr_format) {
case VDP_YCBCR_FORMAT_NV12:
case VDP_YCBCR_FORMAT_YV12:
/* do nothing */
break;
case VDP_YCBCR_FORMAT_UYVY:
case VDP_YCBCR_FORMAT_YUYV:
case VDP_YCBCR_FORMAT_Y8U8V8A8:
case VDP_YCBCR_FORMAT_V8U8Y8A8:
default:
traceError("error (%s): not implemented source YCbCr format '%s'\n", __func__,
reverse_ycbcr_format(source_ycbcr_format));
err_code = VDP_STATUS_INVALID_Y_CB_CR_FORMAT;
goto err;
}
glx_ctx_push_thread_local(deviceData);
glBindFramebuffer(GL_FRAMEBUFFER, dstSurfData->fbo_id);
GLuint tex_id[2];
glGenTextures(2, tex_id);
glEnable(GL_TEXTURE_2D);
switch (source_ycbcr_format) {
case VDP_YCBCR_FORMAT_NV12:
glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_2D, tex_id[1]);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
// UV plane
glPixelStorei(GL_UNPACK_ROW_LENGTH, source_pitches[1]);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, dstSurfData->width/2, dstSurfData->height/2, 0,
GL_RG, GL_UNSIGNED_BYTE, source_data[1]);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, tex_id[0]);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
// Y plane
glPixelStorei(GL_UNPACK_ROW_LENGTH, source_pitches[0]);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, dstSurfData->width, dstSurfData->height, 0, GL_RED,
GL_UNSIGNED_BYTE, source_data[0]);
break;
case VDP_YCBCR_FORMAT_YV12:
glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_2D, tex_id[1]);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, dstSurfData->width/2, dstSurfData->height, 0,
GL_RED, GL_UNSIGNED_BYTE, NULL);
// U plane
glPixelStorei(GL_UNPACK_ROW_LENGTH, source_pitches[2]);
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, dstSurfData->width/2, dstSurfData->height/2, GL_RED,
GL_UNSIGNED_BYTE, source_data[2]);
// V plane
glPixelStorei(GL_UNPACK_ROW_LENGTH, source_pitches[1]);
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, dstSurfData->height/2, dstSurfData->width/2,
dstSurfData->height/2, GL_RED, GL_UNSIGNED_BYTE, source_data[1]);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, tex_id[0]);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
// Y plane
glPixelStorei(GL_UNPACK_ROW_LENGTH, source_pitches[0]);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, dstSurfData->width, dstSurfData->height, 0, GL_RED,
GL_UNSIGNED_BYTE, source_data[0]);
break;
}
glPixelStorei(GL_UNPACK_ROW_LENGTH, 0);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(0, dstSurfData->width, 0, dstSurfData->height, -1.0f, 1.0f);
glViewport(0, 0, dstSurfData->width, dstSurfData->height);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glMatrixMode(GL_TEXTURE);
glLoadIdentity();
glDisable(GL_BLEND);
switch (source_ycbcr_format) {
case VDP_YCBCR_FORMAT_NV12:
glUseProgram(deviceData->shaders[glsl_NV12_RGBA].program);
glUniform1i(deviceData->shaders[glsl_NV12_RGBA].uniform.tex_0, 0);
glUniform1i(deviceData->shaders[glsl_NV12_RGBA].uniform.tex_1, 1);
break;
case VDP_YCBCR_FORMAT_YV12:
glUseProgram(deviceData->shaders[glsl_YV12_RGBA].program);
glUniform1i(deviceData->shaders[glsl_YV12_RGBA].uniform.tex_0, 0);
glUniform1i(deviceData->shaders[glsl_YV12_RGBA].uniform.tex_1, 1);
break;
}
glBegin(GL_QUADS);
glTexCoord2f(0, 0); glVertex2f(0, 0);
glTexCoord2f(1, 0); glVertex2f(dstSurfData->width, 0);
glTexCoord2f(1, 1); glVertex2f(dstSurfData->width, dstSurfData->height);
glTexCoord2f(0, 1); glVertex2f(0, dstSurfData->height);
glEnd();
glUseProgram(0);
glFinish();
glBindFramebuffer(GL_FRAMEBUFFER, 0);
glDeleteTextures(2, tex_id);
GLenum gl_error = glGetError();
glx_ctx_pop();
if (GL_NO_ERROR != gl_error) {
traceError("error (%s): gl error %d\n", __func__, gl_error);
err_code = VDP_STATUS_ERROR;
goto err;
}
err_code = VDP_STATUS_OK;
err:
handle_release(surface);
return err_code;
}
VdpStatus
vdpVideoSurfacePutBitsYCbCr(VdpVideoSurface surface, VdpYCbCrFormat source_ycbcr_format,
void const *const *source_data, uint32_t const *source_pitches)
{
int using_glsl = 1;
VdpStatus ret;
if (using_glsl) {
ret = vdpVideoSurfacePutBitsYCbCr_glsl(surface, source_ycbcr_format, source_data,
source_pitches);
} else {
ret = vdpVideoSurfacePutBitsYCbCr_swscale(surface, source_ycbcr_format, source_data,
source_pitches);
}
return ret;
}
VdpStatus
vdpVideoSurfaceQueryCapabilities(VdpDevice device, VdpChromaType surface_chroma_type,
VdpBool *is_supported, uint32_t *max_width, uint32_t *max_height)
{
if (!is_supported || !max_width || !max_height)
return VDP_STATUS_INVALID_POINTER;
(void)device; (void)surface_chroma_type;
// TODO: implement
*is_supported = 1;
*max_width = 1920;
*max_height = 1080;
return VDP_STATUS_OK;
}
VdpStatus
vdpVideoSurfaceQueryGetPutBitsYCbCrCapabilities(VdpDevice device, VdpChromaType surface_chroma_type,
VdpYCbCrFormat bits_ycbcr_format,
VdpBool *is_supported)
{
if (!is_supported)
return VDP_STATUS_INVALID_POINTER;
(void)device; (void)surface_chroma_type; (void)bits_ycbcr_format;
// TODO: implement
*is_supported = 1;
return VDP_STATUS_OK;
}
libvdpau-va-gl-0.3.6/src/api.h 0000664 0000000 0000000 00000031011 12665616006 0016047 0 ustar 00root root 0000000 0000000 /*
* Copyright 2013-2014 Rinat Ibragimov
*
* This file is part of libvdpau-va-gl
*
* libvdpau-va-gl is distributed under the terms of the LGPLv3. See COPYING for details.
*/
#ifndef VA_GL_SRC_API_H
#define VA_GL_SRC_API_H
#include
#include
#include
#include
#include "handle-storage.h"
#include "shaders.h"
#define DESCRIBE(xparam, format) fprintf(stderr, #xparam " = %" #format "\n", xparam)
#define MAX_RENDER_TARGETS 21
#define NUM_RENDER_TARGETS_H264 21
#define VDP_GENERIC_HANDLE_FIELDS \
struct { \
HandleType type; /**< handle type */ \
VdpDevice device; \
VdpDeviceData *deviceData; \
pthread_mutex_t lock; \
}
typedef struct VdpDeviceData VdpDeviceData;
/** @brief Generic handle struct.
Every other handle struct has same members at same place so it's possible
to use type casting to determine handle type and parent.
*/
typedef struct {
VDP_GENERIC_HANDLE_FIELDS; ///< base struct
} VdpGenericData;
/** @brief VdpDevice object parameters */
struct VdpDeviceData {
VDP_GENERIC_HANDLE_FIELDS; ///< base struct
int refcount;
pthread_mutex_t refcount_mutex;
Display *display; ///< own X display connection
Display *display_orig; ///< supplied X display connection
int screen; ///< X screen
int color_depth; ///< screen color depth
GLXContext root_glc; ///< master GL context
Window root; ///< X drawable (root window) used for offscreen drawing
VADisplay va_dpy; ///< VA display
int va_available; ///< 1 if VA-API available
int va_version_major;
int va_version_minor;
GLuint watermark_tex_id; ///< GL texture id for watermark
struct {
GLuint f_shader;
GLuint program;
struct {
int tex_0;
int tex_1;
} uniform;
} shaders[SHADER_COUNT];
struct {
PFNGLXBINDTEXIMAGEEXTPROC glXBindTexImageEXT;
PFNGLXRELEASETEXIMAGEEXTPROC glXReleaseTexImageEXT;
} fn;
};
/** @brief VdpVideoMixer object parameters */
typedef struct {
VDP_GENERIC_HANDLE_FIELDS;
uint32_t pixmap_width; ///< last seen width
uint32_t pixmap_height; ///< last seen height
Pixmap pixmap; ///< target pixmap for vaPutSurface
GLXPixmap glx_pixmap; ///< associated glx pixmap for texture-from-pixmap
GLuint tex_id; ///< texture for texture-from-pixmap
} VdpVideoMixerData;
/** @brief VdpOutputSurface object parameters */
typedef struct {
VDP_GENERIC_HANDLE_FIELDS; ///< base struct
VdpRGBAFormat rgba_format; ///< RGBA format of data stored
GLuint tex_id; ///< associated GL texture id
GLuint fbo_id; ///< framebuffer object id
uint32_t width;
uint32_t height;
GLuint gl_internal_format; ///< GL texture format: internal format
GLuint gl_format; ///< GL texture format: preferred external format
GLuint gl_type; ///< GL texture format: pixel type
unsigned int bytes_per_pixel; ///< number of bytes per pixel
VdpTime first_presentation_time; ///< first displayed time in queue
VdpPresentationQueueStatus status; ///< status in presentation queue
VdpTime queued_at;
} VdpOutputSurfaceData;
/** @brief VdpPresentationQueueTarget object parameters */
typedef struct {
VDP_GENERIC_HANDLE_FIELDS; ///< base struct
int refcount;
pthread_mutex_t refcount_mutex;
Drawable drawable; ///< X drawable to output to
unsigned int drawable_width; ///< last seen drawable width
unsigned int drawable_height;///< last seen drawable height
Pixmap pixmap; ///< draw buffer
GLXPixmap glx_pixmap; ///< GLX pixmap proxy
GC plain_copy_gc; ///< X GC for displaying buffer content
GLXContext glc; ///< GL context used for output
XVisualInfo *xvi;
} VdpPresentationQueueTargetData;
/** @brief VdpPresentationQueue object parameters */
typedef struct {
VDP_GENERIC_HANDLE_FIELDS; ///< base struct
VdpPresentationQueueTargetData *targetData;
VdpPresentationQueueTarget target;
VdpColor bg_color; ///< background color
} VdpPresentationQueueData;
/** @brief VdpVideoSurface object parameters */
typedef struct {
VDP_GENERIC_HANDLE_FIELDS; ///< base struct
VdpChromaType chroma_type; ///< video chroma type
VdpYCbCrFormat format; ///< current data format
uint32_t width;
uint32_t height;
uint32_t stride;
uint32_t chroma_width;
uint32_t chroma_height;
uint32_t chroma_stride;
VASurfaceID va_surf; ///< VA-API surface
int sync_va_to_glx; ///< whenever VA-API surface should be converted to GL texture
GLuint tex_id; ///< GL texture id (RGBA)
GLuint fbo_id; ///< framebuffer object id
VdpDecoder decoder; ///< associated VdpDecoder
int32_t rt_idx; ///< index in VdpDecoder's render_targets
uint8_t *y_plane;
uint8_t *u_plane;
uint8_t *v_plane;
} VdpVideoSurfaceData;
/** @brief VdpBitmapSurface object parameters */
typedef struct {
VDP_GENERIC_HANDLE_FIELDS; ///< base struct
VdpRGBAFormat rgba_format; ///< RGBA format of data stored
GLuint tex_id; ///< GL texture id
uint32_t width;
uint32_t height;
VdpBool frequently_accessed;///< 1 if surface should be optimized for frequent access
unsigned int bytes_per_pixel; ///< number of bytes per bitmap pixel
GLuint gl_internal_format; ///< GL texture format: internal format
GLuint gl_format; ///< GL texture format: preferred external format
GLuint gl_type; ///< GL texture format: pixel type
char *bitmap_data; ///< system-memory buffer for frequently accessed bitmaps
int dirty; ///< dirty flag. True if system-memory buffer contains data
///< newer than GPU texture contents
} VdpBitmapSurfaceData;
/** @brief VdpDecoder object parameters */
typedef struct {
VDP_GENERIC_HANDLE_FIELDS; ///< base struct
VdpDecoderProfile profile; ///< decoder profile
uint32_t width;
uint32_t height;
uint32_t max_references; ///< maximum count of reference frames
VAConfigID config_id; ///< VA-API config id
VASurfaceID render_targets[MAX_RENDER_TARGETS]; ///< spare VA surfaces
int32_t free_list_head;
int32_t free_list[MAX_RENDER_TARGETS];
uint32_t num_render_targets;
VAContextID context_id; ///< VA-API context id
} VdpDecoderData;
static inline
int
ref_device(VdpDeviceData *deviceData)
{
pthread_mutex_lock(&deviceData->refcount_mutex);
int retval = ++deviceData->refcount;
pthread_mutex_unlock(&deviceData->refcount_mutex);
return retval;
}
static inline
int
unref_device(VdpDeviceData *deviceData)
{
pthread_mutex_lock(&deviceData->refcount_mutex);
int retval = --deviceData->refcount;
pthread_mutex_unlock(&deviceData->refcount_mutex);
return retval;
}
static inline
int
ref_pq_target(VdpPresentationQueueTargetData *pqTargetData)
{
pthread_mutex_lock(&pqTargetData->refcount_mutex);
int retval = ++pqTargetData->refcount;
pthread_mutex_unlock(&pqTargetData->refcount_mutex);
return retval;
}
static inline
int
unref_pq_target(VdpPresentationQueueTargetData *pqTargetData)
{
pthread_mutex_lock(&pqTargetData->refcount_mutex);
int retval = --pqTargetData->refcount;
pthread_mutex_unlock(&pqTargetData->refcount_mutex);
return retval;
}
VdpStatus
vdpDeviceCreateX11(Display *display, int screen, VdpDevice *device,
VdpGetProcAddress **get_proc_address);
VdpGetApiVersion vdpGetApiVersion;
VdpDecoderQueryCapabilities vdpDecoderQueryCapabilities;
VdpDecoderCreate vdpDecoderCreate;
VdpDecoderDestroy vdpDecoderDestroy;
VdpDecoderGetParameters vdpDecoderGetParameters;
VdpDecoderRender vdpDecoderRender;
VdpOutputSurfaceQueryCapabilities vdpOutputSurfaceQueryCapabilities;
VdpOutputSurfaceQueryGetPutBitsNativeCapabilities vdpOutputSurfaceQueryGetPutBitsNativeCapabilities;
VdpOutputSurfaceQueryPutBitsIndexedCapabilities vdpOutputSurfaceQueryPutBitsIndexedCapabilities;
VdpOutputSurfaceQueryPutBitsYCbCrCapabilities vdpOutputSurfaceQueryPutBitsYCbCrCapabilities;
VdpOutputSurfaceCreate vdpOutputSurfaceCreate;
VdpOutputSurfaceDestroy vdpOutputSurfaceDestroy;
VdpOutputSurfaceGetParameters vdpOutputSurfaceGetParameters;
VdpOutputSurfaceGetBitsNative vdpOutputSurfaceGetBitsNative;
VdpOutputSurfacePutBitsNative vdpOutputSurfacePutBitsNative;
VdpOutputSurfacePutBitsIndexed vdpOutputSurfacePutBitsIndexed;
VdpOutputSurfacePutBitsYCbCr vdpOutputSurfacePutBitsYCbCr;
VdpVideoMixerQueryFeatureSupport vdpVideoMixerQueryFeatureSupport;
VdpVideoMixerQueryParameterSupport vdpVideoMixerQueryParameterSupport;
VdpVideoMixerQueryAttributeSupport vdpVideoMixerQueryAttributeSupport;
VdpVideoMixerQueryParameterValueRange vdpVideoMixerQueryParameterValueRange;
VdpVideoMixerQueryAttributeValueRange vdpVideoMixerQueryAttributeValueRange;
VdpVideoMixerCreate vdpVideoMixerCreate;
VdpVideoMixerSetFeatureEnables vdpVideoMixerSetFeatureEnables;
VdpVideoMixerSetAttributeValues vdpVideoMixerSetAttributeValues;
VdpVideoMixerGetFeatureSupport vdpVideoMixerGetFeatureSupport;
VdpVideoMixerGetFeatureEnables vdpVideoMixerGetFeatureEnables;
VdpVideoMixerGetParameterValues vdpVideoMixerGetParameterValues;
VdpVideoMixerGetAttributeValues vdpVideoMixerGetAttributeValues;
VdpVideoMixerDestroy vdpVideoMixerDestroy;
VdpVideoMixerRender vdpVideoMixerRender;
VdpPresentationQueueTargetDestroy vdpPresentationQueueTargetDestroy;
VdpPresentationQueueCreate vdpPresentationQueueCreate;
VdpPresentationQueueDestroy vdpPresentationQueueDestroy;
VdpPresentationQueueSetBackgroundColor vdpPresentationQueueSetBackgroundColor;
VdpPresentationQueueGetBackgroundColor vdpPresentationQueueGetBackgroundColor;
VdpPresentationQueueGetTime vdpPresentationQueueGetTime;
VdpPresentationQueueDisplay vdpPresentationQueueDisplay;
VdpPresentationQueueBlockUntilSurfaceIdle vdpPresentationQueueBlockUntilSurfaceIdle;
VdpPresentationQueueQuerySurfaceStatus vdpPresentationQueueQuerySurfaceStatus;
VdpVideoSurfaceQueryCapabilities vdpVideoSurfaceQueryCapabilities;
VdpVideoSurfaceQueryGetPutBitsYCbCrCapabilities vdpVideoSurfaceQueryGetPutBitsYCbCrCapabilities;
VdpVideoSurfaceCreate vdpVideoSurfaceCreate;
VdpVideoSurfaceDestroy vdpVideoSurfaceDestroy;
VdpVideoSurfaceGetParameters vdpVideoSurfaceGetParameters;
VdpVideoSurfaceGetBitsYCbCr vdpVideoSurfaceGetBitsYCbCr;
VdpVideoSurfacePutBitsYCbCr vdpVideoSurfacePutBitsYCbCr;
VdpBitmapSurfaceQueryCapabilities vdpBitmapSurfaceQueryCapabilities;
VdpBitmapSurfaceCreate vdpBitmapSurfaceCreate;
VdpBitmapSurfaceDestroy vdpBitmapSurfaceDestroy;
VdpBitmapSurfaceGetParameters vdpBitmapSurfaceGetParameters;
VdpBitmapSurfacePutBitsNative vdpBitmapSurfacePutBitsNative;
VdpDeviceDestroy vdpDeviceDestroy;
VdpGetInformationString vdpGetInformationString;
VdpGenerateCSCMatrix vdpGenerateCSCMatrix;
VdpOutputSurfaceRenderOutputSurface vdpOutputSurfaceRenderOutputSurface;
VdpOutputSurfaceRenderBitmapSurface vdpOutputSurfaceRenderBitmapSurface;
VdpPreemptionCallbackRegister vdpPreemptionCallbackRegister;
VdpPresentationQueueTargetCreateX11 vdpPresentationQueueTargetCreateX11;
VdpGetProcAddress vdpGetProcAddress;
#endif /* VA_GL_SRC_API_H */
libvdpau-va-gl-0.3.6/src/bitstream.c 0000664 0000000 0000000 00000006100 12665616006 0017264 0 ustar 00root root 0000000 0000000 /*
* Copyright 2013-2014 Rinat Ibragimov
*
* This file is part of libvdpau-va-gl
*
* libvdpau-va-gl is distributed under the terms of the LGPLv3. See COPYING for details.
*/
#include "bitstream.h"
#include
#include
inline
void
rbsp_attach_buffer(rbsp_state_t *state, const uint8_t *buf, size_t byte_count)
{
state->buf_ptr = buf;
state->byte_count = byte_count;
state->cur_ptr = buf;
state->bit_ptr = 7;
state->zeros_in_row = 0;
state->bits_eaten = 0;
}
rbsp_state_t
rbsp_copy_state(rbsp_state_t *state)
{
return *state;
}
inline
int
rbsp_navigate_to_nal_unit(rbsp_state_t *state)
{
int found = 1;
int window[3] = {-1, -1, -1};
do {
if (state->cur_ptr >= state->buf_ptr + state->byte_count) {
found = 0; // no bytes left, no nal unit found
break;
}
int c = *state->cur_ptr++;
window[0] = window[1];
window[1] = window[2];
window[2] = c;
} while (0 != window[0] || 0 != window[1] || 1 != window[2]);
if (found)
return (int)(state->cur_ptr - state->buf_ptr);
return -1;
}
inline
void
rbsp_reset_bit_counter(rbsp_state_t *state)
{
state->bits_eaten = 0;
}
inline
int
rbsp_consume_byte(rbsp_state_t *state)
{
if (state->cur_ptr >= state->buf_ptr + state->byte_count)
return -1;
uint8_t c = *state->cur_ptr++;
if (0 == c) state->zeros_in_row ++;
else state->zeros_in_row = 0;
if (state->zeros_in_row >= 2) {
uint8_t epb = *state->cur_ptr++;
if (0 != epb) state->zeros_in_row = 0;
// if epb is not actually have 0x03 value, it's not an emulation prevention
if (0x03 != epb) state->cur_ptr--; // so rewind
}
return c;
}
inline
int
rbsp_consume_bit(rbsp_state_t *state)
{
assert (state->cur_ptr < state->buf_ptr + state->byte_count);
int value = !!(*state->cur_ptr & (1 << state->bit_ptr));
if (state->bit_ptr > 0) {
state->bit_ptr --;
} else {
state->bit_ptr = 7;
rbsp_consume_byte(state); // handles emulation prevention bytes
}
state->bits_eaten += 1;
return value;
}
inline
unsigned int
rbsp_get_u(rbsp_state_t *state, int bitcount)
{
unsigned int value = 0;
for (int k = 0; k < bitcount; k ++)
value = (value << 1) + rbsp_consume_bit(state);
return value;
}
inline
unsigned int
rbsp_get_uev(rbsp_state_t *state)
{
int zerobit_count = -1;
int current_bit = 0;
do {
zerobit_count ++;
current_bit = rbsp_consume_bit(state);
} while (0 == current_bit);
if (0 == zerobit_count) return 0;
return (1 << zerobit_count) - 1 + rbsp_get_u(state, zerobit_count);
}
inline
int
rbsp_get_sev(rbsp_state_t *state)
{
int zerobit_count = -1;
int current_bit = 0;
do {
zerobit_count ++;
current_bit = rbsp_consume_bit(state);
} while (0 == current_bit);
if (0 == zerobit_count) return 0;
int value = (1 << zerobit_count) + rbsp_get_u(state, zerobit_count);
if (value & 1)
return -value/2;
return value/2;
}
libvdpau-va-gl-0.3.6/src/bitstream.h 0000664 0000000 0000000 00000003361 12665616006 0017277 0 ustar 00root root 0000000 0000000 /*
* Copyright 2013-2014 Rinat Ibragimov
*
* This file is part of libvdpau-va-gl
*
* libvdpau-va-gl is distributed under the terms of the LGPLv3. See COPYING for details.
*/
#ifndef VA_GL_SRC_BITSTREAM_H
#define VA_GL_SRC_BITSTREAM_H
#include
#include
/** @brief State of raw byte stream payload comsumer */
typedef struct _rbsp_state_struct {
const uint8_t *buf_ptr; ///< pointer to beginning of the buffer
size_t byte_count; ///< size of buffer
const uint8_t *cur_ptr; ///< pointer to currently processed byte
int bit_ptr; ///< pointer to currently processed bit
int zeros_in_row; ///< number of consequetive zero bytes so far
int bits_eaten; ///< bit offset of current position not including EPB
} rbsp_state_t;
/** @brief Initialize rbsp state
*
* @param [out] state
* @param [in] buf pointer to byte string
* @param [in] byte_count number of bytes in @param buf
*
* @retval void
*/
void rbsp_attach_buffer(rbsp_state_t *state, const uint8_t *buf, size_t byte_count);
/** @brief Consumes and returns one byte from rbsp
*
* This function handles emulation prevention bytes internally, without their
* exposure to caller. Returns value of successfully consumed byte.
*/
int rbsp_consume_byte(rbsp_state_t *state);
rbsp_state_t rbsp_copy_state(rbsp_state_t *state);
int rbsp_navigate_to_nal_unit(rbsp_state_t *state);
void rbsp_reset_bit_counter(rbsp_state_t *state);
int
rbsp_consume_bit(rbsp_state_t *state);
unsigned int
rbsp_get_u(rbsp_state_t *state, int bitcount);
unsigned int
rbsp_get_uev(rbsp_state_t *state);
int
rbsp_get_sev(rbsp_state_t *state);
#endif /* VA_GL_SRC_BITSTREAM_H */
libvdpau-va-gl-0.3.6/src/compat.h 0000664 0000000 0000000 00000002474 12665616006 0016574 0 ustar 00root root 0000000 0000000 /*
* Copyright 2013-2014 Rinat Ibragimov
*
* This file is part of libvdpau-va-gl
*
* libvdpau-va-gl is distributed under the terms of the LGPLv3. See COPYING for details.
*/
#ifndef VA_GL_SRC_COMPAT_H
#define VA_GL_SRC_COMPAT_H
#include
#include
#include
#include
#ifdef __FreeBSD__
#include
#endif
#if (LIBAVUTIL_VERSION_INT < AV_VERSION_INT(51, 42, 0)) || \
(LIBAVUTIL_VERSION_INT == AV_VERSION_INT(51, 73, 101))
#define AV_PIX_FMT_NONE PIX_FMT_NONE
#define AV_PIX_FMT_NV12 PIX_FMT_NV12
#define AV_PIX_FMT_YUV420P PIX_FMT_YUV420P
#define AV_PIX_FMT_UYVY422 PIX_FMT_UYVY422
#define AV_PIX_FMT_YUYV422 PIX_FMT_YUYV422
#endif // old libavutil version
#if defined(__linux__)
typedef int thread_id_t;
#elif defined(__FreeBSD__)
typedef long thread_id_t;
#else
#error Unknown OS
#endif
static inline thread_id_t
get_current_thread_id(void)
{
#if defined(__linux__)
return syscall(__NR_gettid);
#elif defined(__FreeBSD__)
long thread_id;
thr_self(&thread_id);
return thread_id;
#endif
}
static inline size_t
thread_is_alive(thread_id_t tid)
{
#if defined(__linux__)
return kill(tid, 0) == 0;
#elif defined(__FreeBSD__)
return thr_kill(tid, 0) == 0;
#endif
}
#endif // VA_GL_SRC_COMPAT_H
libvdpau-va-gl-0.3.6/src/ctx-stack.c 0000664 0000000 0000000 00000012442 12665616006 0017201 0 ustar 00root root 0000000 0000000 /*
* Copyright 2013-2014 Rinat Ibragimov
*
* This file is part of libvdpau-va-gl
*
* libvdpau-va-gl is distributed under the terms of the LGPLv3. See COPYING for details.
*/
/*
* glx context stack
*/
#define _GNU_SOURCE
#include "ctx-stack.h"
#include "globals.h"
#include
#include "trace.h"
#include
#include
#include "compat.h"
static __thread struct {
Display *dpy;
Drawable wnd;
GLXContext glc;
int element_count;
} ctx_stack = {
.dpy = NULL,
.wnd = None,
.glc = NULL,
};
static GHashTable *glc_hash_table = NULL;
static int glc_hash_table_ref_count = 0;
static GLXContext root_glc = NULL;
static XVisualInfo *root_vi = NULL;
static int x11_error_code = 0;
static void *x11_prev_handler = NULL;
static pthread_mutex_t lock = PTHREAD_MUTEX_INITIALIZER;
struct val_s {
Display *dpy;
GLXContext glc;
};
void
glx_ctx_lock(void)
{
pthread_mutex_lock(&lock);
}
void
glx_ctx_unlock(void)
{
pthread_mutex_unlock(&lock);
}
static
void
value_destroy_func(gpointer data)
{
struct val_s *val = data;
glXMakeCurrent(val->dpy, None, NULL);
glXDestroyContext(val->dpy, val->glc);
free(val);
}
static
void *
make_val(Display *dpy, GLXContext glc)
{
struct val_s *val = malloc(sizeof(struct val_s));
val->dpy = dpy;
val->glc = glc;
return val;
}
static
gboolean
is_thread_expired(gpointer key, gpointer value, gpointer user_data)
{
thread_id_t thread_id = GPOINTER_TO_SIZE(key);
(void)value;
(void)user_data;
if (thread_is_alive(thread_id)) {
// thread still exists, do not delete element
return FALSE;
}
return TRUE;
}
void
glx_ctx_push_global(Display *dpy, Drawable wnd, GLXContext glc)
{
glx_ctx_lock();
assert(0 == ctx_stack.element_count);
ctx_stack.dpy = glXGetCurrentDisplay();
if (!ctx_stack.dpy)
ctx_stack.dpy = dpy;
ctx_stack.wnd = glXGetCurrentDrawable();
ctx_stack.glc = glXGetCurrentContext();
ctx_stack.element_count ++;
glXMakeCurrent(dpy, wnd, glc);
}
void
glx_ctx_push_thread_local(VdpDeviceData *deviceData)
{
glx_ctx_lock();
Display *dpy = deviceData->display;
const Window wnd = deviceData->root;
thread_id_t thread_id = get_current_thread_id();
ctx_stack.dpy = glXGetCurrentDisplay();
if (!ctx_stack.dpy)
ctx_stack.dpy = dpy;
ctx_stack.wnd = glXGetCurrentDrawable();
ctx_stack.glc = glXGetCurrentContext();
ctx_stack.element_count ++;
struct val_s *val = g_hash_table_lookup(glc_hash_table, GSIZE_TO_POINTER(thread_id));
if (!val) {
GLXContext glc = glXCreateContext(dpy, root_vi, root_glc, GL_TRUE);
assert(glc);
val = make_val(dpy, glc);
g_hash_table_insert(glc_hash_table, GSIZE_TO_POINTER(thread_id), val);
// try cleanup expired entries
g_hash_table_foreach_remove(glc_hash_table, is_thread_expired, NULL);
}
assert(val->dpy == dpy);
glXMakeCurrent(dpy, wnd, val->glc);
}
void
glx_ctx_pop()
{
assert(1 == ctx_stack.element_count);
glXMakeCurrent(ctx_stack.dpy, ctx_stack.wnd, ctx_stack.glc);
ctx_stack.element_count --;
glx_ctx_unlock();
}
void
glx_ctx_ref_glc_hash_table(Display *dpy, int screen)
{
glx_ctx_lock();
if (0 == glc_hash_table_ref_count) {
glc_hash_table = g_hash_table_new_full(g_direct_hash, g_direct_equal,
NULL, value_destroy_func);
glc_hash_table_ref_count = 1;
GLint att[] = { GLX_RGBA, GLX_DEPTH_SIZE, 24, GLX_DOUBLEBUFFER, None };
root_vi = glXChooseVisual(dpy, screen, att);
if (NULL == root_vi) {
traceError("error (%s): glXChooseVisual failed\n", __func__);
glx_ctx_unlock();
return;
}
root_glc = glXCreateContext(dpy, root_vi, NULL, GL_TRUE);
} else {
glc_hash_table_ref_count ++;
}
glx_ctx_unlock();
}
void
glx_ctx_unref_glc_hash_table(Display *dpy)
{
glx_ctx_lock();
glc_hash_table_ref_count --;
if (0 == glc_hash_table_ref_count) {
g_hash_table_unref(glc_hash_table);
glc_hash_table = NULL;
glXDestroyContext(dpy, root_glc);
XFree(root_vi);
}
glx_ctx_unlock();
}
GLXContext
glx_ctx_get_root_context(void)
{
return root_glc;
}
static
int
x11_error_handler(Display *dpy, XErrorEvent *ee)
{
(void)dpy;
x11_error_code = ee->error_code;
return 0;
}
void
x11_push_eh(void)
{
x11_error_code = 0;
void *ptr = XSetErrorHandler(&x11_error_handler);
if (ptr != x11_error_handler)
x11_prev_handler = ptr;
}
int
x11_pop_eh(void)
{
// Although this looks like right thing to do, brief testing shows it's highly unstable.
// So this code will stay here commented out as a reminder.
/*
void *ptr = XSetErrorHandler(x11_prev_handler);
if (ptr != x11_error_handler) {
// if someone have managed to set own handler after ours, restore it
void *ptr2 = XSetErrorHandler(ptr);
if (ptr != ptr2) {
// someone again has set another handler
traceError("warning (%s): someone set X error handler while restore previous\n",
__func__);
XSetErrorHandler(ptr2);
}
}
*/
return x11_error_code;
}
libvdpau-va-gl-0.3.6/src/ctx-stack.h 0000664 0000000 0000000 00000001376 12665616006 0017212 0 ustar 00root root 0000000 0000000 /*
* Copyright 2013-2014 Rinat Ibragimov
*
* This file is part of libvdpau-va-gl
*
* libvdpau-va-gl is distributed under the terms of the LGPLv3. See COPYING for details.
*/
#ifndef VA_GL_SRC_CTX_STACK_H
#define VA_GL_SRC_CTX_STACK_H
#include "api.h"
void glx_ctx_push_global(Display *dpy, Drawable wnd, GLXContext glc);
void glx_ctx_push_thread_local(VdpDeviceData *deviceData);
void glx_ctx_pop(void);
void glx_ctx_ref_glc_hash_table(Display *dpy, int screen);
void glx_ctx_unref_glc_hash_table(Display *dpy);
GLXContext glx_ctx_get_root_context(void);
void glx_ctx_lock(void);
void glx_ctx_unlock(void);
void x11_push_eh(void);
int x11_pop_eh(void);
#endif /* VA_GL_SRC_CTX_STACK_H */
libvdpau-va-gl-0.3.6/src/entry.c 0000664 0000000 0000000 00000011165 12665616006 0016442 0 ustar 00root root 0000000 0000000 /*
* Copyright 2013-2014 Rinat Ibragimov
*
* This file is part of libvdpau-va-gl
*
* libvdpau-va-gl is distributed under the terms of the LGPLv3. See COPYING for details.
*/
#define _XOPEN_SOURCE 500
#define _GNU_SOURCE
#include
#include
#include
#include
#include
#include
#include "handle-storage.h"
#include "api.h"
#include "trace.h"
#include "globals.h"
#include "compat.h"
void
trc_hk(void *longterm_param, void *shortterm_param, int origin, int after)
{
(void)longterm_param;
(void)origin;
int before = !after;
if (global.quirks.log_call_duration) {
static __thread struct timespec start_ts = {0, 0};
if (before) {
clock_gettime(CLOCK_MONOTONIC, &start_ts);
}
if (after) {
struct timespec end_ts;
clock_gettime(CLOCK_MONOTONIC, &end_ts);
double diff = (end_ts.tv_sec - start_ts.tv_sec) +
(end_ts.tv_nsec - start_ts.tv_nsec) / 1.0e9;
printf("Duration %7.5f secs, %s, %s\n",
diff, reverse_func_id(origin), reverse_status((VdpStatus)shortterm_param));
}
}
if (before && global.quirks.log_timestamp) {
struct timespec now;
clock_gettime(CLOCK_REALTIME, &now);
printf("%d.%03d ", (int)now.tv_sec, (int)now.tv_nsec/1000000);
}
if (before && global.quirks.log_thread_id) {
printf("[%5ld] ", (long)get_current_thread_id());
}
}
static
void
initialize_quirks(void)
{
global.quirks.buggy_XCloseDisplay = 0;
global.quirks.show_watermark = 0;
global.quirks.log_thread_id = 0;
global.quirks.log_call_duration = 0;
global.quirks.log_pq_delay = 0;
global.quirks.log_timestamp = 0;
global.quirks.avoid_va = 0;
const char *value = getenv("VDPAU_QUIRKS");
if (!value)
return;
char *value_lc = strdup(value);
if (NULL == value_lc)
return;
for (int k = 0; value_lc[k] != 0; k ++)
value_lc[k] = tolower(value_lc[k]);
// tokenize string
const char delimiter = ',';
char *item_start = value_lc;
char *ptr = item_start;
while (1) {
int last = (0 == *ptr);
if (delimiter == *ptr || 0 == *ptr) {
*ptr = 0;
if (!strcmp("xclosedisplay", item_start)) {
global.quirks.buggy_XCloseDisplay = 1;
} else
if (!strcmp("showwatermark", item_start)) {
global.quirks.show_watermark = 1;
} else
if (!strcmp("logthreadid", item_start)) {
global.quirks.log_thread_id = 1;
} else
if (!strcmp("logcallduration", item_start)) {
global.quirks.log_call_duration = 1;
} else
if (!strcmp("logpqdelay", item_start)) {
global.quirks.log_pq_delay = 1;
} else
if (!strcmp("logtimestamp", item_start)) {
global.quirks.log_timestamp = 1;
} else
if (!strcmp("avoidva", item_start)) {
global.quirks.avoid_va = 1;
}
item_start = ptr + 1;
}
ptr ++;
if (last)
break;
}
free(value_lc);
}
__attribute__((constructor))
void
va_gl_library_constructor(void)
{
handle_initialize_storage();
// Initialize global data
initialize_quirks();
// initialize tracer
traceSetTarget(stdout);
traceSetHook(trc_hk, NULL);
#ifdef NDEBUG
traceEnableTracing(0);
#else
traceEnableTracing(1);
#endif
const char *value = getenv("VDPAU_LOG");
if (value) {
// enable tracing when variable present
traceEnableTracing(1);
char *value_lc = strdup(value); // convert to lowercase
for (int k = 0; value_lc[k] != 0; k ++) value_lc[k] = tolower(value_lc[k]);
// and disable tracing when variable value equals one of the following values
if (!strcmp(value_lc, "0") ||
!strcmp(value_lc, "false") ||
!strcmp(value_lc, "off") ||
!strcmp(value_lc, "disable") ||
!strcmp(value_lc, "disabled"))
{
traceEnableTracing(0);
}
free(value_lc);
}
traceInfo("Software VDPAU backend library initialized\n");
}
__attribute__((destructor))
void
va_gl_library_destructor(void)
{
handle_destory_storage();
}
__attribute__ ((visibility("default")))
VdpStatus
vdp_imp_device_create_x11(Display *display, int screen, VdpDevice *device,
VdpGetProcAddress **get_proc_address)
{
return traceVdpDeviceCreateX11(display, screen, device, get_proc_address);
}
libvdpau-va-gl-0.3.6/src/globals.c 0000664 0000000 0000000 00000000350 12665616006 0016716 0 ustar 00root root 0000000 0000000 /*
* Copyright 2013-2014 Rinat Ibragimov
*
* This file is part of libvdpau-va-gl
*
* libvdpau-va-gl is distributed under the terms of the LGPLv3. See COPYING for details.
*/
#include "globals.h"
struct global_data global;
libvdpau-va-gl-0.3.6/src/globals.h 0000664 0000000 0000000 00000002124 12665616006 0016724 0 ustar 00root root 0000000 0000000 /*
* Copyright 2013-2014 Rinat Ibragimov
*
* This file is part of libvdpau-va-gl
*
* libvdpau-va-gl is distributed under the terms of the LGPLv3. See COPYING for details.
*/
#ifndef VA_GL_SRC_GLOBALS_H
#define VA_GL_SRC_GLOBALS_H
#include
/** @brief place where all shared global variables live */
struct global_data {
/** @brief tunables */
struct {
int buggy_XCloseDisplay; ///< avoid calling XCloseDisplay
int show_watermark; ///< show picture over output
int log_thread_id; ///< include thread id into the log output
int log_call_duration; ///< measure call duration
int log_pq_delay; ///< measure delay between queueing and displaying presentation
///< queue introduces
int log_timestamp; ///< display timestamps
int avoid_va; ///< do not use VA-API video decoding acceleration even if
///< available
} quirks;
};
extern struct global_data global;
#endif /* VA_GL_SRC_GLOBALS_H */
libvdpau-va-gl-0.3.6/src/h264-parse.c 0000664 0000000 0000000 00000057046 12665616006 0017104 0 ustar 00root root 0000000 0000000 /*
* Copyright 2013-2014 Rinat Ibragimov
*
* This file is part of libvdpau-va-gl
*
* libvdpau-va-gl is distributed under the terms of the LGPLv3. See COPYING for details.
*/
#define _GNU_SOURCE
#include
#include
#include
#include
#include "h264-parse.h"
enum {
SLICE_TYPE_P = 0,
SLICE_TYPE_B = 1,
SLICE_TYPE_I = 2,
SLICE_TYPE_SP = 3,
SLICE_TYPE_SI = 4,
};
enum {
NAL_UNSPECIFIED = 0,
NAL_SLICE = 1,
NAL_SLICE_DATA_A = 2,
NAL_SLICE_DATA_B = 3,
NAL_SLICE_DATA_C = 4,
NAL_IDR_SLICE = 5,
};
#define NOT_IMPLEMENTED(str) assert(0 && "not implemented" && str)
#define DESCRIBE(xparam, format) fprintf(stderr, #xparam " = %" #format "\n", xparam)
struct slice_parameters {
int nal_ref_idc;
int nal_unit_type;
int first_mb_in_slice;
int slice_type;
int pic_parameter_set_id;
int frame_num;
int field_pic_flag;
int bottom_field_flag;
int idr_pic_id;
int pic_order_cnt_lsb;
int delta_pic_order_cnt_bottom;
int delta_pic_order_cnt[2];
int redundant_pic_cnt;
int direct_spatial_mv_pred_flag;
int num_ref_idx_active_override_flag;
int num_ref_idx_l0_active_minus1;
int num_ref_idx_l1_active_minus1;
int luma_log2_weight_denom;
int chroma_log2_weight_denom;
unsigned int luma_weight_l0_flag;
int luma_weight_l0[32];
int luma_offset_l0[32];
unsigned int chroma_weight_l0_flag;
int chroma_weight_l0[32][2];
int chroma_offset_l0[32][2];
unsigned int luma_weight_l1_flag;
int luma_weight_l1[32];
int luma_offset_l1[32];
unsigned int chroma_weight_l1_flag;
int chroma_weight_l1[32][2];
int chroma_offset_l1[32][2];
unsigned int no_output_of_prior_pics_flag;
unsigned int long_term_reference_flag;
unsigned int cabac_init_idc;
int slice_qp_delta;
unsigned int sp_for_switch_flag;
int slice_qs_delta;
unsigned int disable_deblocking_filter_idc;
int slice_alpha_c0_offset_div2;
int slice_beta_offset_div2;
VAPictureH264 RefPicList0[32];
VAPictureH264 RefPicList1[32];
};
static
void
parse_ref_pic_list_modification(rbsp_state_t *st, const VAPictureParameterBufferH264 *vapp,
struct slice_parameters *sp);
static
void
parse_pred_weight_table(rbsp_state_t *st, const int ChromaArrayType, struct slice_parameters *sp);
static
void
parse_dec_ref_pic_marking(rbsp_state_t *st, struct slice_parameters *sp);
static
void
do_fill_va_slice_parameter_buffer(struct slice_parameters const * const sp,
VASliceParameterBufferH264 *vasp, int bit_offset)
{
vasp->slice_data_bit_offset = bit_offset;
vasp->first_mb_in_slice = sp->first_mb_in_slice;
vasp->slice_type = sp->slice_type;
vasp->direct_spatial_mv_pred_flag = sp->direct_spatial_mv_pred_flag;
vasp->num_ref_idx_l0_active_minus1 = sp->num_ref_idx_l0_active_minus1;
vasp->num_ref_idx_l1_active_minus1 = sp->num_ref_idx_l1_active_minus1;
vasp->cabac_init_idc = sp->cabac_init_idc;
vasp->slice_qp_delta = sp->slice_qp_delta;
vasp->disable_deblocking_filter_idc = sp->disable_deblocking_filter_idc;
vasp->slice_alpha_c0_offset_div2 = sp->slice_alpha_c0_offset_div2;
vasp->slice_beta_offset_div2 = sp->slice_beta_offset_div2;
for (int k = 0; k < 32; k ++) {
vasp->RefPicList0[k] = sp->RefPicList0[k];
vasp->RefPicList1[k] = sp->RefPicList1[k];
}
vasp->luma_log2_weight_denom = sp->luma_log2_weight_denom;
vasp->chroma_log2_weight_denom = sp->chroma_log2_weight_denom;
vasp->luma_weight_l0_flag = sp->luma_weight_l0_flag;
for (int k = 0; k < 32; k ++)
vasp->luma_weight_l0[k] = sp->luma_weight_l0[k];
for (int k = 0; k < 32; k ++)
vasp->luma_offset_l0[k] = sp->luma_offset_l0[k];
vasp->chroma_weight_l0_flag = sp->chroma_weight_l0_flag;
for (int k = 0; k < 32; k ++)
vasp->chroma_weight_l0[k][0] = sp->chroma_weight_l0[k][0];
for (int k = 0; k < 32; k ++)
vasp->chroma_weight_l0[k][1] = sp->chroma_weight_l0[k][1];
for (int k = 0; k < 32; k ++)
vasp->chroma_offset_l0[k][0] = sp->chroma_offset_l0[k][0];
for (int k = 0; k < 32; k ++)
vasp->chroma_offset_l0[k][1] = sp->chroma_offset_l0[k][1];
vasp->luma_weight_l1_flag = sp->luma_weight_l1_flag;
for (int k = 0; k < 32; k ++)
vasp->luma_weight_l1[k] = sp->luma_weight_l1[k];
for (int k = 0; k < 32; k ++)
vasp->luma_offset_l1[k] = sp->luma_offset_l1[k];
vasp->chroma_weight_l1_flag = sp->chroma_weight_l1_flag;
for (int k = 0; k < 32; k ++)
vasp->chroma_weight_l1[k][0] = sp->chroma_weight_l1[k][0];
for (int k = 0; k < 32; k ++)
vasp->chroma_weight_l1[k][1] = sp->chroma_weight_l1[k][1];
for (int k = 0; k < 32; k ++)
vasp->chroma_offset_l1[k][0] = sp->chroma_offset_l1[k][0];
for (int k = 0; k < 32; k ++)
vasp->chroma_offset_l1[k][1] = sp->chroma_offset_l1[k][1];
}
void
reset_va_picture_h264(VAPictureH264 *p)
{
p->picture_id = VA_INVALID_SURFACE;
p->frame_idx = 0;
p->flags = VA_PICTURE_H264_INVALID;
p->TopFieldOrderCnt = 0;
p->BottomFieldOrderCnt = 0;
}
struct comparison_fcn1_context {
int descending;
int what;
const VAPictureH264 *ReferenceFrames;
};
static
gint
comparison_fcn_1(gconstpointer p1, gconstpointer p2, gpointer context)
{
const int idx_1 = *(const int *)p1;
const int idx_2 = *(const int *)p2;
struct comparison_fcn1_context *ctx = context;
int value1 = 0, value2 = 0;
switch (ctx->what) {
case 1: // top field
value1 = ctx->ReferenceFrames[idx_1].TopFieldOrderCnt;
value2 = ctx->ReferenceFrames[idx_2].TopFieldOrderCnt;
break;
case 2: // bottom field
value1 = ctx->ReferenceFrames[idx_1].BottomFieldOrderCnt;
value2 = ctx->ReferenceFrames[idx_2].BottomFieldOrderCnt;
break;
case 3: // frame_idx
value1 = ctx->ReferenceFrames[idx_1].frame_idx;
value2 = ctx->ReferenceFrames[idx_2].frame_idx;
break;
default:
assert(0 && "wrong what field");
}
int result;
if (value1 < value2)
result = -1;
else if (value1 > value2)
result = 1;
else
result = 0;
if (ctx->descending) return -result;
return result;
}
static
void
fill_ref_pic_list(struct slice_parameters *sp, const VAPictureParameterBufferH264 *vapp)
{
int idcs_asc[32], idcs_desc[32];
struct comparison_fcn1_context ctx;
if (sp->slice_type == SLICE_TYPE_I || sp->slice_type == SLICE_TYPE_SI)
return;
ctx.ReferenceFrames = vapp->ReferenceFrames;
int frame_count = 0;
for (int k = 0; k < vapp->num_ref_frames; k ++) {
if (vapp->ReferenceFrames[k].flags & VA_PICTURE_H264_INVALID)
continue;
sp->RefPicList0[frame_count] = vapp->ReferenceFrames[k];
idcs_asc[frame_count] = idcs_desc[frame_count] = k;
frame_count ++;
}
if (sp->slice_type == SLICE_TYPE_P || sp->slice_type == SLICE_TYPE_SP) {
// TODO: implement interlaced P slices
ctx.what = 1;
ctx.descending = 0;
g_qsort_with_data(idcs_asc, frame_count, sizeof(idcs_asc[0]), &comparison_fcn_1, &ctx);
ctx.descending = 1;
g_qsort_with_data(idcs_desc, frame_count, sizeof(idcs_desc[0]), &comparison_fcn_1, &ctx);
int ptr = 0;
for (int k = 0; k < frame_count; k ++)
if (vapp->ReferenceFrames[idcs_desc[k]].flags & VA_PICTURE_H264_SHORT_TERM_REFERENCE)
sp->RefPicList0[ptr++] = vapp->ReferenceFrames[idcs_desc[k]];
for (int k = 0; k < frame_count; k ++)
if (vapp->ReferenceFrames[idcs_asc[k]].flags & VA_PICTURE_H264_LONG_TERM_REFERENCE)
sp->RefPicList0[ptr++] = vapp->ReferenceFrames[idcs_asc[k]];
} else if (sp->slice_type == SLICE_TYPE_B && !vapp->pic_fields.bits.field_pic_flag) {
ctx.what = 1;
ctx.descending = 0;
g_qsort_with_data(idcs_asc, frame_count, sizeof(idcs_asc[0]), &comparison_fcn_1, &ctx);
ctx.descending = 1;
g_qsort_with_data(idcs_desc, frame_count, sizeof(idcs_desc[0]), &comparison_fcn_1, &ctx);
int ptr0 = 0;
int ptr1 = 0;
for (int k = 0; k < frame_count; k ++) {
const VAPictureH264 *rf = &vapp->ReferenceFrames[idcs_desc[k]];
if (rf->flags & VA_PICTURE_H264_SHORT_TERM_REFERENCE)
if (rf->TopFieldOrderCnt < vapp->CurrPic.TopFieldOrderCnt)
sp->RefPicList0[ptr0++] = *rf;
rf = &vapp->ReferenceFrames[idcs_asc[k]];
if (rf->flags & VA_PICTURE_H264_SHORT_TERM_REFERENCE)
if (rf->TopFieldOrderCnt >= vapp->CurrPic.TopFieldOrderCnt)
sp->RefPicList1[ptr1++] = *rf;
}
for (int k = 0; k < frame_count; k ++) {
const VAPictureH264 *rf = &vapp->ReferenceFrames[idcs_asc[k]];
if (rf->flags & VA_PICTURE_H264_SHORT_TERM_REFERENCE)
if (rf->TopFieldOrderCnt >= vapp->CurrPic.TopFieldOrderCnt)
sp->RefPicList0[ptr0++] = *rf;
rf = &vapp->ReferenceFrames[idcs_desc[k]];
if (rf->flags & VA_PICTURE_H264_SHORT_TERM_REFERENCE)
if (rf->TopFieldOrderCnt < vapp->CurrPic.TopFieldOrderCnt)
sp->RefPicList1[ptr1++] = *rf;
}
for (int k = 0; k < frame_count; k ++) {
const VAPictureH264 *rf = &vapp->ReferenceFrames[idcs_asc[k]];
if (rf->flags & VA_PICTURE_H264_LONG_TERM_REFERENCE) {
sp->RefPicList0[ptr0++] = *rf;
sp->RefPicList1[ptr1++] = *rf;
}
}
} else {
// TODO: implement interlaced B slices
assert(0 && "not implemeted: interlaced SLICE_TYPE_B sorting");
}
}
void
parse_slice_header(rbsp_state_t *st, const VAPictureParameterBufferH264 *vapp,
const int ChromaArrayType, unsigned int p_num_ref_idx_l0_active_minus1,
unsigned int p_num_ref_idx_l1_active_minus1, VASliceParameterBufferH264 *vasp)
{
struct slice_parameters sp = { 0 };
for (int k = 0; k < 32; k ++) {
reset_va_picture_h264(&sp.RefPicList0[k]);
reset_va_picture_h264(&sp.RefPicList1[k]);
}
rbsp_get_u(st, 1); // forbidden_zero_bit
sp.nal_ref_idc = rbsp_get_u(st, 2);
sp.nal_unit_type = rbsp_get_u(st, 5);
if (sp.nal_unit_type == 14 || sp.nal_unit_type == 20) {
NOT_IMPLEMENTED("nal unit types 14 and 20");
}
sp.first_mb_in_slice = rbsp_get_uev(st);
sp.slice_type = rbsp_get_uev(st);
if (sp.slice_type > 4)
sp.slice_type -= 5; // wrap 5-9 to 0-4
// as now we know slice_type, time to fill RefPicListX
fill_ref_pic_list(&sp, vapp);
sp.pic_parameter_set_id = rbsp_get_uev(st);
// TODO: separate_colour_plane_flag is 0 for all but YUV444. Now ok, but should detect properly.
// See 7.3.3
sp.frame_num = rbsp_get_u(st, vapp->seq_fields.bits.log2_max_frame_num_minus4 + 4);
sp.field_pic_flag = 0;
sp.bottom_field_flag = 0;
if (!vapp->seq_fields.bits.frame_mbs_only_flag) {
sp.field_pic_flag = rbsp_get_u(st, 1);
if (sp.field_pic_flag) {
sp.bottom_field_flag = rbsp_get_u(st, 1);
}
}
sp.idr_pic_id = 0;
if (sp.nal_unit_type == NAL_IDR_SLICE) // IDR picture
sp.idr_pic_id = rbsp_get_uev(st);
sp.pic_order_cnt_lsb = 0;
sp.delta_pic_order_cnt_bottom = 0;
if (vapp->seq_fields.bits.pic_order_cnt_type == 0) {
sp.pic_order_cnt_lsb =
rbsp_get_u(st, vapp->seq_fields.bits.log2_max_pic_order_cnt_lsb_minus4 + 4);
if (vapp->pic_fields.bits.pic_order_present_flag &&
!vapp->pic_fields.bits.field_pic_flag)
{
sp.delta_pic_order_cnt_bottom = rbsp_get_sev(st);
}
}
sp.delta_pic_order_cnt[0] = 0;
sp.delta_pic_order_cnt[1] = 0;
if (vapp->seq_fields.bits.pic_order_cnt_type == 1 &&
!vapp->seq_fields.bits.delta_pic_order_always_zero_flag)
{
sp.delta_pic_order_cnt[0] = rbsp_get_sev(st);
if (vapp->pic_fields.bits.pic_order_present_flag && !vapp->pic_fields.bits.field_pic_flag)
sp.delta_pic_order_cnt[1] = rbsp_get_sev(st);
}
sp.redundant_pic_cnt = 0;
if (vapp->pic_fields.bits.redundant_pic_cnt_present_flag)
sp.redundant_pic_cnt = rbsp_get_uev(st);
sp.direct_spatial_mv_pred_flag = 0;
if (sp.slice_type == SLICE_TYPE_B)
sp.direct_spatial_mv_pred_flag = rbsp_get_u(st, 1);
sp.num_ref_idx_active_override_flag = 0;
sp.num_ref_idx_l0_active_minus1 = 0;
sp.num_ref_idx_l1_active_minus1 = 0;
if (sp.slice_type == SLICE_TYPE_P || sp.slice_type == SLICE_TYPE_SP ||
sp.slice_type == SLICE_TYPE_B)
{
sp.num_ref_idx_l0_active_minus1 = p_num_ref_idx_l0_active_minus1;
if (sp.slice_type != SLICE_TYPE_P)
sp.num_ref_idx_l1_active_minus1 = p_num_ref_idx_l1_active_minus1;
sp.num_ref_idx_active_override_flag = rbsp_get_u(st, 1);
if (sp.num_ref_idx_active_override_flag) {
sp.num_ref_idx_l0_active_minus1 = rbsp_get_uev(st);
if (sp.slice_type == SLICE_TYPE_B)
sp.num_ref_idx_l1_active_minus1 = rbsp_get_uev(st);
}
}
if (sp.nal_unit_type == 20) {
NOT_IMPLEMENTED("nal unit type 20");
} else {
parse_ref_pic_list_modification(st, vapp, &sp);
}
// here fields {luma,chroma}_weight_l{0,1}_flag differ from same-named flags from
// H.264 recommendation. Each of those flags should be set to 1 if any of
// weight tables differ from default
sp.luma_weight_l0_flag = 0;
sp.luma_weight_l1_flag = 0;
sp.chroma_weight_l0_flag = 0;
sp.chroma_weight_l1_flag = 0;
if ((vapp->pic_fields.bits.weighted_pred_flag &&
(sp.slice_type == SLICE_TYPE_P || sp.slice_type == SLICE_TYPE_SP)) ||
(vapp->pic_fields.bits.weighted_bipred_idc == 1 && sp.slice_type == SLICE_TYPE_B))
{
parse_pred_weight_table(st, ChromaArrayType, &sp);
}
if (sp.nal_ref_idc != 0) {
parse_dec_ref_pic_marking(st, &sp);
}
sp.cabac_init_idc = 0;
if (vapp->pic_fields.bits.entropy_coding_mode_flag &&
sp.slice_type != SLICE_TYPE_I && sp.slice_type != SLICE_TYPE_SI)
{
sp.cabac_init_idc = rbsp_get_uev(st);
}
sp.slice_qp_delta = rbsp_get_sev(st);
sp.sp_for_switch_flag = 0;
sp.slice_qs_delta = 0;
if (sp.slice_type == SLICE_TYPE_SP || sp.slice_type == SLICE_TYPE_SI) {
if (sp.slice_type == SLICE_TYPE_SP)
sp.sp_for_switch_flag = rbsp_get_u(st, 1);
sp.slice_qs_delta = rbsp_get_sev(st);
}
sp.disable_deblocking_filter_idc = 0;
sp.slice_alpha_c0_offset_div2 = 0;
sp.slice_beta_offset_div2 = 0;
if (vapp->pic_fields.bits.deblocking_filter_control_present_flag) {
sp.disable_deblocking_filter_idc = rbsp_get_uev(st);
if (sp.disable_deblocking_filter_idc != 1) {
sp.slice_alpha_c0_offset_div2 = rbsp_get_sev(st);
sp.slice_beta_offset_div2 = rbsp_get_sev(st);
}
}
if (vapp->num_slice_groups_minus1 > 0 && vapp->slice_group_map_type >= 3 &&
vapp->slice_group_map_type <= 5)
{
NOT_IMPLEMENTED("don't know what length to consume\n");
}
do_fill_va_slice_parameter_buffer(&sp, vasp, st->bits_eaten);
}
static
void
parse_ref_pic_list_modification(rbsp_state_t *st, const VAPictureParameterBufferH264 *vapp,
struct slice_parameters *sp)
{
const int MaxFrameNum = 1 << (vapp->seq_fields.bits.log2_max_frame_num_minus4 + 4);
const int MaxPicNum = (vapp->pic_fields.bits.field_pic_flag) ? 2*MaxFrameNum : MaxFrameNum;
if (sp->slice_type != SLICE_TYPE_I && sp->slice_type != SLICE_TYPE_SI) {
int ref_pic_list_modification_flag_l0 = rbsp_get_u(st, 1);
if (ref_pic_list_modification_flag_l0) {
int modification_of_pic_nums_idc;
int refIdxL0 = 0;
unsigned int picNumL0 = vapp->frame_num;
do {
modification_of_pic_nums_idc = rbsp_get_uev(st);
if (modification_of_pic_nums_idc < 2) {
int abs_diff_pic_num_minus1 = rbsp_get_uev(st);
if (modification_of_pic_nums_idc == 0) {
picNumL0 -= (abs_diff_pic_num_minus1 + 1);
} else { // modification_of_pic_nums_idc == 1
picNumL0 += (abs_diff_pic_num_minus1 + 1);
}
// wrap picNumL0
picNumL0 &= (MaxPicNum - 1);
// there is no need to subtract MaxPicNum as in (8-36) in 8.2.4.3.1
// because frame_num already wrapped
int j;
for (j = 0; j < vapp->num_ref_frames; j ++) {
if (vapp->ReferenceFrames[j].flags & VA_PICTURE_H264_INVALID)
continue;
if (vapp->ReferenceFrames[j].frame_idx == picNumL0 &&
(vapp->ReferenceFrames[j].flags & VA_PICTURE_H264_SHORT_TERM_REFERENCE))
break;
}
assert (j < vapp->num_ref_frames);
VAPictureH264 swp = vapp->ReferenceFrames[j];
for (int k = sp->num_ref_idx_l0_active_minus1; k > refIdxL0; k --)
sp->RefPicList0[k] = sp->RefPicList0[k-1];
sp->RefPicList0[refIdxL0 ++] = swp;
j = refIdxL0;
for (int k = refIdxL0; k <= sp->num_ref_idx_l0_active_minus1 + 1; k ++) {
if (sp->RefPicList0[k].frame_idx != picNumL0 &&
(sp->RefPicList0[k].flags & VA_PICTURE_H264_SHORT_TERM_REFERENCE))
{
sp->RefPicList0[j++] = sp->RefPicList0[k];
}
}
} else if (modification_of_pic_nums_idc == 2) {
NOT_IMPLEMENTED("long");
fprintf(stderr, "long_term_pic_num = %d\n", rbsp_get_uev(st));
}
} while (modification_of_pic_nums_idc != 3);
}
}
if (sp->slice_type == SLICE_TYPE_B) {
int ref_pic_list_modification_flag_l1 = rbsp_get_u(st, 1);
if (ref_pic_list_modification_flag_l1) {
NOT_IMPLEMENTED("ref pic list modification 1"); // TODO: implement this
int modification_of_pic_nums_idc;
do {
modification_of_pic_nums_idc = rbsp_get_uev(st);
if (modification_of_pic_nums_idc == 0 ||
modification_of_pic_nums_idc == 1)
{
fprintf(stderr, "abs_diff_pic_num_minus1 = %d\n", rbsp_get_uev(st));
} else if (modification_of_pic_nums_idc == 2) {
fprintf(stderr, "long_term_pic_num = %d\n", rbsp_get_uev(st));
}
} while (modification_of_pic_nums_idc != 3);
}
}
}
static
void
fill_default_pred_weight_table(struct slice_parameters *sp)
{
const int default_luma_weight = (1 << sp->luma_log2_weight_denom);
const int default_chroma_weight = (1 << sp->chroma_log2_weight_denom);
for (int k = 0; k < sp->num_ref_idx_l0_active_minus1 + 1; k ++) {
sp->luma_weight_l0[k] = default_luma_weight;
sp->luma_offset_l0[k] = 0;
sp->chroma_weight_l0[k][0] = sp->chroma_weight_l0[k][1] = default_chroma_weight;
sp->chroma_offset_l0[k][0] = sp->chroma_offset_l0[k][1] = 0;
}
for (int k = 0; k < sp->num_ref_idx_l1_active_minus1 + 1; k ++) {
sp->luma_weight_l1[k] = default_luma_weight;
sp->luma_offset_l1[k] = 0;
sp->chroma_weight_l1[k][0] = sp->chroma_weight_l1[k][1] = default_chroma_weight;
sp->chroma_offset_l1[k][0] = sp->chroma_offset_l1[k][1] = 0;
}
}
static
void
parse_pred_weight_table(rbsp_state_t *st, const int ChromaArrayType, struct slice_parameters *sp)
{
sp->luma_log2_weight_denom = rbsp_get_uev(st);
sp->chroma_log2_weight_denom = 0;
if (ChromaArrayType != 0)
sp->chroma_log2_weight_denom = rbsp_get_uev(st);
fill_default_pred_weight_table(sp);
const int default_luma_weight = (1 << sp->luma_log2_weight_denom);
const int default_chroma_weight = (1 << sp->chroma_log2_weight_denom);
for (int k = 0; k <= sp->num_ref_idx_l0_active_minus1; k ++) {
int luma_weight_l0_flag = rbsp_get_u(st, 1);
if (luma_weight_l0_flag) {
sp->luma_weight_l0[k] = rbsp_get_sev(st);
sp->luma_offset_l0[k] = rbsp_get_sev(st);
if (default_luma_weight != sp->luma_weight_l0[k])
sp->luma_weight_l0_flag = 1;
}
if (ChromaArrayType != 0) {
int chroma_weight_l0_flag = rbsp_get_u(st, 1);
if (chroma_weight_l0_flag) {
for (int j = 0; j < 2; j ++) {
sp->chroma_weight_l0[k][j] = rbsp_get_sev(st);
sp->chroma_offset_l0[k][j] = rbsp_get_sev(st);
if (default_chroma_weight != sp->chroma_weight_l0[k][j])
sp->chroma_weight_l0_flag = 1;
}
}
}
}
if (sp->slice_type == SLICE_TYPE_B) {
for (int k = 0; k <= sp->num_ref_idx_l1_active_minus1; k ++) {
int luma_weight_l1_flag = rbsp_get_u(st, 1);
if (luma_weight_l1_flag) {
sp->luma_weight_l1[k] = rbsp_get_sev(st);
sp->luma_offset_l1[k] = rbsp_get_sev(st);
if (default_luma_weight != sp->luma_weight_l1[k])
sp->luma_weight_l1_flag = 1;
}
if (ChromaArrayType != 0) {
int chroma_weight_l1_flag = rbsp_get_u(st, 1);
if (chroma_weight_l1_flag) {
for (int j = 0; j < 2; j ++) {
sp->chroma_weight_l1[k][j] = rbsp_get_sev(st);
sp->chroma_offset_l1[k][j] = rbsp_get_sev(st);
if (default_chroma_weight != sp->chroma_weight_l1[k][j])
sp->chroma_weight_l1_flag = 1;
}
}
}
}
}
}
static
void
parse_dec_ref_pic_marking(rbsp_state_t *st, struct slice_parameters *sp)
{
if (sp->nal_unit_type == NAL_IDR_SLICE) {
sp->no_output_of_prior_pics_flag = rbsp_get_u(st, 1);
sp->long_term_reference_flag = rbsp_get_u(st, 1);
} else {
int adaptive_ref_pic_marking_mode_flag = rbsp_get_u(st, 1);
if (adaptive_ref_pic_marking_mode_flag) {
// no need to do any action, just consume bits. All management should be done
// on client side
int memory_management_control_operation;
do {
memory_management_control_operation = rbsp_get_uev(st);
if (memory_management_control_operation == 1 ||
memory_management_control_operation == 3)
{
rbsp_get_uev(st); // difference_of_pic_nums_minus1
}
if (memory_management_control_operation == 2) {
rbsp_get_uev(st); // long_term_pic_num
}
if (memory_management_control_operation == 3 ||
memory_management_control_operation == 6)
{
rbsp_get_uev(st); // long_term_frame_idx
}
if (memory_management_control_operation == 4) {
rbsp_get_uev(st); // max_long_term_frame_idx_plus1
}
} while (memory_management_control_operation != 0);
}
}
}
libvdpau-va-gl-0.3.6/src/h264-parse.h 0000664 0000000 0000000 00000001210 12665616006 0017067 0 ustar 00root root 0000000 0000000 /*
* Copyright 2013-2014 Rinat Ibragimov
*
* This file is part of libvdpau-va-gl
*
* libvdpau-va-gl is distributed under the terms of the LGPLv3. See COPYING for details.
*/
#ifndef VA_GL_SRC_H264_PARSE_H
#define VA_GL_SRC_H264_PARSE_H
#include
#include "bitstream.h"
void
parse_slice_header(rbsp_state_t *st, const VAPictureParameterBufferH264 *vapp,
const int ChromaArrayType, unsigned int p_num_ref_idx_l0_active_minus1,
unsigned int p_num_ref_idx_l1_active_minus1, VASliceParameterBufferH264 *vasp);
void
reset_va_picture_h264(VAPictureH264 *p);
#endif /* VA_GL_SRC_H264_PARSE_H */
libvdpau-va-gl-0.3.6/src/handle-storage.c 0000664 0000000 0000000 00000012415 12665616006 0020175 0 ustar 00root root 0000000 0000000 /*
* Copyright 2013-2014 Rinat Ibragimov
*
* This file is part of libvdpau-va-gl
*
* libvdpau-va-gl is distributed under the terms of the LGPLv3. See COPYING for details.
*/
#define _XOPEN_SOURCE 600
#include "handle-storage.h"
#include
#include
#include
#include "api.h"
static GHashTable *vdp_handles;
static GHashTable *xdpy_copies; //< Copies of X Display connections
static GHashTable *xdpy_copies_refcount; //< Reference count of X Display connection copy
static uint32_t next_handle_id;
static pthread_mutex_t lock = PTHREAD_MUTEX_INITIALIZER;
void
handle_initialize_storage(void)
{
pthread_mutex_lock(&lock);
vdp_handles = g_hash_table_new(g_direct_hash, g_direct_equal);
next_handle_id = 1;
xdpy_copies = g_hash_table_new(g_direct_hash, g_direct_equal);
xdpy_copies_refcount = g_hash_table_new(g_direct_hash, g_direct_equal);
pthread_mutex_unlock(&lock);
}
int
handle_insert(void *data)
{
pthread_mutexattr_t mattr;
pthread_mutexattr_init(&mattr);
pthread_mutexattr_settype(&mattr, PTHREAD_MUTEX_RECURSIVE);
VdpGenericData *gh = data;
pthread_mutex_init(&gh->lock, &mattr);
pthread_mutexattr_destroy(&mattr);
pthread_mutex_lock(&lock);
while (g_hash_table_lookup(vdp_handles, GINT_TO_POINTER(next_handle_id)))
next_handle_id ++;
int id = next_handle_id ++;
g_hash_table_insert(vdp_handles, GINT_TO_POINTER(id), data);
pthread_mutex_unlock(&lock);
return id;
}
// lock unsafe function
static
int
_is_valid(int handle, HandleType type)
{
VdpGenericData *gh;
gh = g_hash_table_lookup(vdp_handles, GINT_TO_POINTER(handle));
if (!gh)
return 0;
// return true if caller wants any handle type
if (HANDLETYPE_ANY == type)
return 1;
// check handle type
if (gh->type == type)
return 1;
return 0;
}
void *
handle_acquire(int handle, HandleType type)
{
VdpGenericData *res = NULL;
while (1) {
pthread_mutex_lock(&lock);
if (!_is_valid(handle, type)) {
res = NULL;
break;
}
res = g_hash_table_lookup(vdp_handles, GINT_TO_POINTER(handle));
if (pthread_mutex_trylock(&res->lock) == 0)
break;
pthread_mutex_unlock(&lock);
usleep(1);
}
pthread_mutex_unlock(&lock);
return res;
}
void
handle_release(int handle)
{
pthread_mutex_lock(&lock);
VdpGenericData *gh = g_hash_table_lookup(vdp_handles, GINT_TO_POINTER(handle));
if (gh)
pthread_mutex_unlock(&gh->lock);
pthread_mutex_unlock(&lock);
}
void
handle_expunge(int handle)
{
pthread_mutex_lock(&lock);
if (_is_valid(handle, HANDLETYPE_ANY)) {
VdpGenericData *gh = g_hash_table_lookup(vdp_handles, GINT_TO_POINTER(handle));
if (gh)
pthread_mutex_unlock(&gh->lock);
g_hash_table_remove(vdp_handles, GINT_TO_POINTER(handle));
}
pthread_mutex_unlock(&lock);
}
void
handle_destory_storage(void)
{
pthread_mutex_lock(&lock);
g_hash_table_unref(vdp_handles); vdp_handles = NULL;
g_hash_table_unref(xdpy_copies); xdpy_copies = NULL;
g_hash_table_unref(xdpy_copies_refcount); xdpy_copies_refcount = NULL;
pthread_mutex_unlock(&lock);
}
void
handle_execute_for_all(void (*callback)(int idx, void *entry, void *p), void *param)
{
pthread_mutex_lock(&lock);
GList *tmp = g_hash_table_get_keys(vdp_handles);
GList *keys = g_list_copy(tmp);
g_list_free(tmp);
GList *ptr = g_list_first(keys);
while (ptr) {
HandleType handle = GPOINTER_TO_INT(ptr->data);
void *item = g_hash_table_lookup(vdp_handles, GINT_TO_POINTER(handle));
if (item) {
pthread_mutex_unlock(&lock);
// TODO: race condition. Supply integer handle instead of pointer to fix.
callback(handle, item, param);
pthread_mutex_lock(&lock);
}
ptr = g_list_next(ptr);
}
g_list_free(keys);
pthread_mutex_unlock(&lock);
}
void *
handle_xdpy_ref(void *dpy_orig)
{
pthread_mutex_lock(&lock);
Display *dpy = g_hash_table_lookup(xdpy_copies, dpy_orig);
if (NULL == dpy) {
dpy = XOpenDisplay(XDisplayString(dpy_orig));
if (!dpy)
goto quit;
g_hash_table_replace(xdpy_copies, dpy_orig, dpy);
g_hash_table_replace(xdpy_copies_refcount, dpy_orig, GINT_TO_POINTER(1));
} else {
int refcount = GPOINTER_TO_INT(g_hash_table_lookup(xdpy_copies_refcount, dpy_orig));
g_hash_table_replace(xdpy_copies_refcount, dpy_orig, GINT_TO_POINTER(refcount+1));
}
quit:
pthread_mutex_unlock(&lock);
return dpy;
}
void
handle_xdpy_unref(void *dpy_orig)
{
pthread_mutex_lock(&lock);
int refcount = GPOINTER_TO_INT(g_hash_table_lookup(xdpy_copies_refcount, dpy_orig));
refcount = refcount - 1;
if (0 == refcount) {
// do close connection, nobody refers it anymore
Display *dpy = g_hash_table_lookup(xdpy_copies, dpy_orig);
XCloseDisplay(dpy);
g_hash_table_remove(xdpy_copies, dpy_orig);
g_hash_table_remove(xdpy_copies_refcount, dpy_orig);
} else {
// just update refcount
g_hash_table_replace(xdpy_copies_refcount, dpy_orig, GINT_TO_POINTER(refcount));
}
pthread_mutex_unlock(&lock);
}
libvdpau-va-gl-0.3.6/src/handle-storage.h 0000664 0000000 0000000 00000003262 12665616006 0020202 0 ustar 00root root 0000000 0000000 /*
* Copyright 2013-2014 Rinat Ibragimov
*
* This file is part of libvdpau-va-gl
*
* libvdpau-va-gl is distributed under the terms of the LGPLv3. See COPYING for details.
*/
#ifndef VA_GL_SRC_HANDLE_STORAGE_H
#define VA_GL_SRC_HANDLE_STORAGE_H
#include
#include
typedef uint32_t HandleType;
#define HANDLETYPE_ANY (HandleType)0
#define HANDLETYPE_DEVICE (HandleType)1
#define HANDLETYPE_PRESENTATION_QUEUE_TARGET (HandleType)2
#define HANDLETYPE_PRESENTATION_QUEUE (HandleType)3
#define HANDLETYPE_VIDEO_MIXER (HandleType)4
#define HANDLETYPE_OUTPUT_SURFACE (HandleType)5
#define HANDLETYPE_VIDEO_SURFACE (HandleType)6
#define HANDLETYPE_BITMAP_SURFACE (HandleType)7
#define HANDLETYPE_DECODER (HandleType)8
void handle_initialize_storage(void);
int handle_insert(void *data);
void *handle_acquire(int handle, HandleType type);
void handle_release(int handle);
void handle_expunge(int handle);
void handle_destory_storage(void);
void handle_execute_for_all(void (*callback)(int idx, void *entry, void *p), void *param);
void *handle_xdpy_ref(void *dpy_orig);
void handle_xdpy_unref(void *dpy_orig);
static inline
void
free_list_push(int32_t *free_list, int32_t *free_list_head, int32_t value)
{
free_list[value] = *free_list_head;
*free_list_head = value;
}
static inline
int32_t
free_list_pop(int32_t *free_list, int32_t *free_list_head)
{
int32_t value = *free_list_head;
if (value >= 0)
*free_list_head = free_list[value];
return value;
}
#endif /* VA_GL_SRC_HANDLE_STORAGE_H */
libvdpau-va-gl-0.3.6/src/reverse-constant.c 0000664 0000000 0000000 00000027347 12665616006 0020614 0 ustar 00root root 0000000 0000000 /*
* Copyright 2013-2014 Rinat Ibragimov
*
* This file is part of libvdpau-va-gl
*
* libvdpau-va-gl is distributed under the terms of the LGPLv3. See COPYING for details.
*/
#include "reverse-constant.h"
#include
#include
#define CASE(q) case q: return #q
const char *
reverse_func_id(VdpFuncId func_id)
{
switch (func_id) {
CASE(VDP_FUNC_ID_GET_ERROR_STRING);
CASE(VDP_FUNC_ID_GET_PROC_ADDRESS);
CASE(VDP_FUNC_ID_GET_API_VERSION);
CASE(VDP_FUNC_ID_GET_INFORMATION_STRING);
CASE(VDP_FUNC_ID_DEVICE_DESTROY);
CASE(VDP_FUNC_ID_GENERATE_CSC_MATRIX);
CASE(VDP_FUNC_ID_VIDEO_SURFACE_QUERY_CAPABILITIES);
CASE(VDP_FUNC_ID_VIDEO_SURFACE_QUERY_GET_PUT_BITS_Y_CB_CR_CAPABILITIES);
CASE(VDP_FUNC_ID_VIDEO_SURFACE_CREATE);
CASE(VDP_FUNC_ID_VIDEO_SURFACE_DESTROY);
CASE(VDP_FUNC_ID_VIDEO_SURFACE_GET_PARAMETERS);
CASE(VDP_FUNC_ID_VIDEO_SURFACE_GET_BITS_Y_CB_CR);
CASE(VDP_FUNC_ID_VIDEO_SURFACE_PUT_BITS_Y_CB_CR);
CASE(VDP_FUNC_ID_OUTPUT_SURFACE_QUERY_CAPABILITIES);
CASE(VDP_FUNC_ID_OUTPUT_SURFACE_QUERY_GET_PUT_BITS_NATIVE_CAPABILITIES);
CASE(VDP_FUNC_ID_OUTPUT_SURFACE_QUERY_PUT_BITS_INDEXED_CAPABILITIES);
CASE(VDP_FUNC_ID_OUTPUT_SURFACE_QUERY_PUT_BITS_Y_CB_CR_CAPABILITIES);
CASE(VDP_FUNC_ID_OUTPUT_SURFACE_CREATE);
CASE(VDP_FUNC_ID_OUTPUT_SURFACE_DESTROY);
CASE(VDP_FUNC_ID_OUTPUT_SURFACE_GET_PARAMETERS);
CASE(VDP_FUNC_ID_OUTPUT_SURFACE_GET_BITS_NATIVE);
CASE(VDP_FUNC_ID_OUTPUT_SURFACE_PUT_BITS_NATIVE);
CASE(VDP_FUNC_ID_OUTPUT_SURFACE_PUT_BITS_INDEXED);
CASE(VDP_FUNC_ID_OUTPUT_SURFACE_PUT_BITS_Y_CB_CR);
CASE(VDP_FUNC_ID_BITMAP_SURFACE_QUERY_CAPABILITIES);
CASE(VDP_FUNC_ID_BITMAP_SURFACE_CREATE);
CASE(VDP_FUNC_ID_BITMAP_SURFACE_DESTROY);
CASE(VDP_FUNC_ID_BITMAP_SURFACE_GET_PARAMETERS);
CASE(VDP_FUNC_ID_BITMAP_SURFACE_PUT_BITS_NATIVE);
CASE(VDP_FUNC_ID_OUTPUT_SURFACE_RENDER_OUTPUT_SURFACE);
CASE(VDP_FUNC_ID_OUTPUT_SURFACE_RENDER_BITMAP_SURFACE);
CASE(VDP_FUNC_ID_OUTPUT_SURFACE_RENDER_VIDEO_SURFACE_LUMA);
CASE(VDP_FUNC_ID_DECODER_QUERY_CAPABILITIES);
CASE(VDP_FUNC_ID_DECODER_CREATE);
CASE(VDP_FUNC_ID_DECODER_DESTROY);
CASE(VDP_FUNC_ID_DECODER_GET_PARAMETERS);
CASE(VDP_FUNC_ID_DECODER_RENDER);
CASE(VDP_FUNC_ID_VIDEO_MIXER_QUERY_FEATURE_SUPPORT);
CASE(VDP_FUNC_ID_VIDEO_MIXER_QUERY_PARAMETER_SUPPORT);
CASE(VDP_FUNC_ID_VIDEO_MIXER_QUERY_ATTRIBUTE_SUPPORT);
CASE(VDP_FUNC_ID_VIDEO_MIXER_QUERY_PARAMETER_VALUE_RANGE);
CASE(VDP_FUNC_ID_VIDEO_MIXER_QUERY_ATTRIBUTE_VALUE_RANGE);
CASE(VDP_FUNC_ID_VIDEO_MIXER_CREATE);
CASE(VDP_FUNC_ID_VIDEO_MIXER_SET_FEATURE_ENABLES);
CASE(VDP_FUNC_ID_VIDEO_MIXER_SET_ATTRIBUTE_VALUES);
CASE(VDP_FUNC_ID_VIDEO_MIXER_GET_FEATURE_SUPPORT);
CASE(VDP_FUNC_ID_VIDEO_MIXER_GET_FEATURE_ENABLES);
CASE(VDP_FUNC_ID_VIDEO_MIXER_GET_PARAMETER_VALUES);
CASE(VDP_FUNC_ID_VIDEO_MIXER_GET_ATTRIBUTE_VALUES);
CASE(VDP_FUNC_ID_VIDEO_MIXER_DESTROY);
CASE(VDP_FUNC_ID_VIDEO_MIXER_RENDER);
CASE(VDP_FUNC_ID_PRESENTATION_QUEUE_TARGET_DESTROY);
CASE(VDP_FUNC_ID_PRESENTATION_QUEUE_CREATE);
CASE(VDP_FUNC_ID_PRESENTATION_QUEUE_DESTROY);
CASE(VDP_FUNC_ID_PRESENTATION_QUEUE_SET_BACKGROUND_COLOR);
CASE(VDP_FUNC_ID_PRESENTATION_QUEUE_GET_BACKGROUND_COLOR);
CASE(VDP_FUNC_ID_PRESENTATION_QUEUE_GET_TIME);
CASE(VDP_FUNC_ID_PRESENTATION_QUEUE_DISPLAY);
CASE(VDP_FUNC_ID_PRESENTATION_QUEUE_BLOCK_UNTIL_SURFACE_IDLE);
CASE(VDP_FUNC_ID_PRESENTATION_QUEUE_QUERY_SURFACE_STATUS);
CASE(VDP_FUNC_ID_PREEMPTION_CALLBACK_REGISTER);
CASE(VDP_FUNC_ID_PRESENTATION_QUEUE_TARGET_CREATE_X11);
default:
return "Unknown";
}
}
const char *
reverse_video_mixer_feature(VdpVideoMixerFeature mixer_feature)
{
switch (mixer_feature) {
CASE(VDP_VIDEO_MIXER_FEATURE_DEINTERLACE_TEMPORAL);
CASE(VDP_VIDEO_MIXER_FEATURE_DEINTERLACE_TEMPORAL_SPATIAL);
CASE(VDP_VIDEO_MIXER_FEATURE_INVERSE_TELECINE);
CASE(VDP_VIDEO_MIXER_FEATURE_NOISE_REDUCTION);
CASE(VDP_VIDEO_MIXER_FEATURE_SHARPNESS);
CASE(VDP_VIDEO_MIXER_FEATURE_LUMA_KEY);
CASE(VDP_VIDEO_MIXER_FEATURE_HIGH_QUALITY_SCALING_L1);
CASE(VDP_VIDEO_MIXER_FEATURE_HIGH_QUALITY_SCALING_L2);
CASE(VDP_VIDEO_MIXER_FEATURE_HIGH_QUALITY_SCALING_L3);
CASE(VDP_VIDEO_MIXER_FEATURE_HIGH_QUALITY_SCALING_L4);
CASE(VDP_VIDEO_MIXER_FEATURE_HIGH_QUALITY_SCALING_L5);
CASE(VDP_VIDEO_MIXER_FEATURE_HIGH_QUALITY_SCALING_L6);
CASE(VDP_VIDEO_MIXER_FEATURE_HIGH_QUALITY_SCALING_L7);
CASE(VDP_VIDEO_MIXER_FEATURE_HIGH_QUALITY_SCALING_L8);
CASE(VDP_VIDEO_MIXER_FEATURE_HIGH_QUALITY_SCALING_L9);
default:
return "Unknown video mixer feature";
}
}
const char *
reverse_video_mixer_attribute(VdpVideoMixerAttribute attr)
{
switch (attr) {
CASE(VDP_VIDEO_MIXER_ATTRIBUTE_BACKGROUND_COLOR);
CASE(VDP_VIDEO_MIXER_ATTRIBUTE_CSC_MATRIX);
CASE(VDP_VIDEO_MIXER_ATTRIBUTE_NOISE_REDUCTION_LEVEL);
CASE(VDP_VIDEO_MIXER_ATTRIBUTE_SHARPNESS_LEVEL);
CASE(VDP_VIDEO_MIXER_ATTRIBUTE_LUMA_KEY_MIN_LUMA);
CASE(VDP_VIDEO_MIXER_ATTRIBUTE_LUMA_KEY_MAX_LUMA);
CASE(VDP_VIDEO_MIXER_ATTRIBUTE_SKIP_CHROMA_DEINTERLACE);
default:
return "Unknown video mixer attribute";
}
}
const char *
reverse_rgba_format(VdpRGBAFormat rgba_format)
{
switch (rgba_format) {
CASE(VDP_RGBA_FORMAT_B8G8R8A8);
CASE(VDP_RGBA_FORMAT_R8G8B8A8);
CASE(VDP_RGBA_FORMAT_R10G10B10A2);
CASE(VDP_RGBA_FORMAT_B10G10R10A2);
CASE(VDP_RGBA_FORMAT_A8);
default:
return "Unknown RGBA format";
}
}
const char *
reverse_chroma_type(VdpChromaType chroma_type)
{
switch (chroma_type) {
CASE(VDP_CHROMA_TYPE_420);
CASE(VDP_CHROMA_TYPE_422);
CASE(VDP_CHROMA_TYPE_444);
default:
return "Unknown chroma type";
}
}
const char *
reverse_ycbcr_format(VdpYCbCrFormat ycbcr_format)
{
switch (ycbcr_format) {
CASE(VDP_YCBCR_FORMAT_NV12);
CASE(VDP_YCBCR_FORMAT_YV12);
CASE(VDP_YCBCR_FORMAT_UYVY);
CASE(VDP_YCBCR_FORMAT_YUYV);
CASE(VDP_YCBCR_FORMAT_Y8U8V8A8);
CASE(VDP_YCBCR_FORMAT_V8U8Y8A8);
default:
return "Unknown YCbCr format";
}
}
const char *
reverser_video_mixer_picture_structure(VdpVideoMixerPictureStructure s)
{
switch (s) {
CASE(VDP_VIDEO_MIXER_PICTURE_STRUCTURE_TOP_FIELD);
CASE(VDP_VIDEO_MIXER_PICTURE_STRUCTURE_BOTTOM_FIELD);
CASE(VDP_VIDEO_MIXER_PICTURE_STRUCTURE_FRAME);
default:
return "Unknown video mixer picture structure";
}
}
const char *
reverse_blend_factor(VdpOutputSurfaceRenderBlendFactor blend_factor)
{
switch (blend_factor) {
CASE(VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ZERO);
CASE(VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ONE);
CASE(VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_SRC_COLOR);
CASE(VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ONE_MINUS_SRC_COLOR);
CASE(VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_SRC_ALPHA);
CASE(VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA);
CASE(VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_DST_ALPHA);
CASE(VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ONE_MINUS_DST_ALPHA);
CASE(VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_DST_COLOR);
CASE(VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ONE_MINUS_DST_COLOR);
CASE(VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_SRC_ALPHA_SATURATE);
CASE(VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_CONSTANT_COLOR);
CASE(VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR);
CASE(VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_CONSTANT_ALPHA);
CASE(VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA);
default:
return "Unknown blend factor";
}
}
const char *
reverse_blend_equation(VdpOutputSurfaceRenderBlendEquation blend_equation)
{
switch (blend_equation) {
CASE(VDP_OUTPUT_SURFACE_RENDER_BLEND_EQUATION_SUBTRACT);
CASE(VDP_OUTPUT_SURFACE_RENDER_BLEND_EQUATION_REVERSE_SUBTRACT);
CASE(VDP_OUTPUT_SURFACE_RENDER_BLEND_EQUATION_ADD);
CASE(VDP_OUTPUT_SURFACE_RENDER_BLEND_EQUATION_MIN);
CASE(VDP_OUTPUT_SURFACE_RENDER_BLEND_EQUATION_MAX);
default:
return "Unknown blend equation";
}
}
const char *
reverse_decoder_profile(VdpDecoderProfile profile)
{
switch (profile) {
CASE(VDP_DECODER_PROFILE_MPEG1);
CASE(VDP_DECODER_PROFILE_MPEG2_SIMPLE);
CASE(VDP_DECODER_PROFILE_MPEG2_MAIN);
CASE(VDP_DECODER_PROFILE_H264_CONSTRAINED_BASELINE);
CASE(VDP_DECODER_PROFILE_H264_BASELINE);
CASE(VDP_DECODER_PROFILE_H264_MAIN);
CASE(VDP_DECODER_PROFILE_H264_HIGH);
CASE(VDP_DECODER_PROFILE_VC1_SIMPLE);
CASE(VDP_DECODER_PROFILE_VC1_MAIN);
CASE(VDP_DECODER_PROFILE_VC1_ADVANCED);
CASE(VDP_DECODER_PROFILE_MPEG4_PART2_SP);
CASE(VDP_DECODER_PROFILE_MPEG4_PART2_ASP);
CASE(VDP_DECODER_PROFILE_DIVX4_QMOBILE);
CASE(VDP_DECODER_PROFILE_DIVX4_MOBILE);
CASE(VDP_DECODER_PROFILE_DIVX4_HOME_THEATER);
CASE(VDP_DECODER_PROFILE_DIVX4_HD_1080P);
CASE(VDP_DECODER_PROFILE_DIVX5_QMOBILE);
CASE(VDP_DECODER_PROFILE_DIVX5_MOBILE);
CASE(VDP_DECODER_PROFILE_DIVX5_HOME_THEATER);
CASE(VDP_DECODER_PROFILE_DIVX5_HD_1080P);
default:
return "Unknown decoder profile";
}
}
const char *
reverse_status(VdpStatus status)
{
switch (status) {
CASE(VDP_STATUS_OK);
CASE(VDP_STATUS_NO_IMPLEMENTATION);
CASE(VDP_STATUS_DISPLAY_PREEMPTED);
CASE(VDP_STATUS_INVALID_HANDLE);
CASE(VDP_STATUS_INVALID_POINTER);
CASE(VDP_STATUS_INVALID_CHROMA_TYPE);
CASE(VDP_STATUS_INVALID_Y_CB_CR_FORMAT);
CASE(VDP_STATUS_INVALID_RGBA_FORMAT);
CASE(VDP_STATUS_INVALID_INDEXED_FORMAT);
CASE(VDP_STATUS_INVALID_COLOR_STANDARD);
CASE(VDP_STATUS_INVALID_COLOR_TABLE_FORMAT);
CASE(VDP_STATUS_INVALID_BLEND_FACTOR);
CASE(VDP_STATUS_INVALID_BLEND_EQUATION);
CASE(VDP_STATUS_INVALID_FLAG);
CASE(VDP_STATUS_INVALID_DECODER_PROFILE);
CASE(VDP_STATUS_INVALID_VIDEO_MIXER_FEATURE);
CASE(VDP_STATUS_INVALID_VIDEO_MIXER_PARAMETER);
CASE(VDP_STATUS_INVALID_VIDEO_MIXER_ATTRIBUTE);
CASE(VDP_STATUS_INVALID_VIDEO_MIXER_PICTURE_STRUCTURE);
CASE(VDP_STATUS_INVALID_FUNC_ID);
CASE(VDP_STATUS_INVALID_SIZE);
CASE(VDP_STATUS_INVALID_VALUE);
CASE(VDP_STATUS_INVALID_STRUCT_VERSION);
CASE(VDP_STATUS_RESOURCES);
CASE(VDP_STATUS_HANDLE_DEVICE_MISMATCH);
CASE(VDP_STATUS_ERROR);
default:
return "Unknown VDP error";
}
}
const char *
reverse_indexed_format(VdpIndexedFormat indexed_format)
{
switch (indexed_format) {
CASE(VDP_INDEXED_FORMAT_A4I4);
CASE(VDP_INDEXED_FORMAT_I4A4);
CASE(VDP_INDEXED_FORMAT_A8I8);
CASE(VDP_INDEXED_FORMAT_I8A8);
default:
return "Unknown indexed format";
}
}
const char *
reverse_color_table_format(VdpColorTableFormat color_table_format)
{
switch (color_table_format) {
CASE(VDP_COLOR_TABLE_FORMAT_B8G8R8X8);
default:
return "Unknown color table format";
}
}
const char *
reverse_video_mixer_parameter(VdpVideoMixerParameter parameter)
{
switch (parameter) {
CASE(VDP_VIDEO_MIXER_PARAMETER_VIDEO_SURFACE_WIDTH);
CASE(VDP_VIDEO_MIXER_PARAMETER_VIDEO_SURFACE_HEIGHT);
CASE(VDP_VIDEO_MIXER_PARAMETER_CHROMA_TYPE);
CASE(VDP_VIDEO_MIXER_PARAMETER_LAYERS);
default:
return "Unknown video mixer parameter";
}
}
const char *
reverse_color_standard(VdpColorStandard color_standard)
{
switch (color_standard) {
CASE(VDP_COLOR_STANDARD_ITUR_BT_601);
CASE(VDP_COLOR_STANDARD_ITUR_BT_709);
CASE(VDP_COLOR_STANDARD_SMPTE_240M);
default:
return "Unknown color standard";
}
}
const char *
reverse_output_surface_render_rotate(int flags)
{
switch (flags & 3) {
CASE(VDP_OUTPUT_SURFACE_RENDER_ROTATE_0);
CASE(VDP_OUTPUT_SURFACE_RENDER_ROTATE_90);
CASE(VDP_OUTPUT_SURFACE_RENDER_ROTATE_180);
CASE(VDP_OUTPUT_SURFACE_RENDER_ROTATE_270);
default:
return "Unknown rotate";
}
}
libvdpau-va-gl-0.3.6/src/reverse-constant.h 0000664 0000000 0000000 00000002643 12665616006 0020611 0 ustar 00root root 0000000 0000000 /*
* Copyright 2013-2014 Rinat Ibragimov
*
* This file is part of libvdpau-va-gl
*
* libvdpau-va-gl is distributed under the terms of the LGPLv3. See COPYING for details.
*/
#ifndef VA_GL_SRC_REVERSE_CONSTANT_H
#define VA_GL_SRC_REVERSE_CONSTANT_H
#include
const char * reverse_status(VdpStatus status);
const char * reverse_func_id(VdpFuncId func_id);
const char * reverse_video_mixer_feature(VdpVideoMixerFeature mixer_feature);
const char * reverse_video_mixer_parameter(VdpVideoMixerParameter parameter);
const char * reverse_video_mixer_attribute(VdpVideoMixerAttribute attr);
const char * reverse_rgba_format(VdpRGBAFormat rgba_format);
const char * reverse_chroma_type(VdpChromaType chroma_type);
const char * reverse_ycbcr_format(VdpYCbCrFormat ycbcr_format);
const char * reverser_video_mixer_picture_structure(VdpVideoMixerPictureStructure s);
const char * reverse_blend_factor(VdpOutputSurfaceRenderBlendFactor blend_factor);
const char * reverse_blend_equation(VdpOutputSurfaceRenderBlendEquation blend_equation);
const char * reverse_decoder_profile(VdpDecoderProfile profile);
const char * reverse_indexed_format(VdpIndexedFormat indexed_format);
const char * reverse_color_table_format(VdpColorTableFormat color_table_format);
const char * reverse_color_standard(VdpColorStandard color_standard);
const char * reverse_output_surface_render_rotate(int flags);
#endif /* VA_GL_SRC_REVERSE_CONSTANT_H */
libvdpau-va-gl-0.3.6/src/trace.c 0000664 0000000 0000000 00000160634 12665616006 0016405 0 ustar 00root root 0000000 0000000 /*
* Copyright 2013-2014 Rinat Ibragimov
*
* This file is part of libvdpau-va-gl
*
* libvdpau-va-gl is distributed under the terms of the LGPLv3. See COPYING for details.
*/
#include
#include
#include
#include "api.h"
#include "trace.h"
#include "reverse-constant.h"
static FILE *tlog = NULL; ///< trace target
static const char *trace_header = "[VS] ";
static const char *trace_header_blank = " ";
static int trace_enabled = 1;
static void (*trace_hook)(void *, void *, int, int);
static void *trace_hook_longterm_param = NULL;
void
traceEnableTracing(int flag)
{
trace_enabled = !!flag;
}
void
traceSetTarget(FILE *target)
{
tlog = target;
}
void
traceResetTarget(void)
{
tlog = stdout;
}
void
traceSetHook(void (*hook)(void *param1, void *param2, int origin, int after), void *param)
{
trace_hook = hook;
trace_hook_longterm_param = param;
}
void
traceCallHook(int origin, int after, void *shortterm_param)
{
if (!trace_enabled)
return;
if (trace_hook)
trace_hook(trace_hook_longterm_param, shortterm_param, origin, after);
}
void
traceSetHeader(const char *header, const char *header_blank)
{
trace_header = header;
trace_header_blank = header_blank;
}
void
traceInfo(const char *fmt, ...)
{
if (!trace_enabled)
return;
va_list args;
traceCallHook(-2, 0, NULL);
fprintf(tlog, "%s", trace_header);
va_start(args, fmt);
vfprintf(tlog, fmt, args);
va_end(args);
}
void
traceError(const char *fmt, ...)
{
va_list args;
fprintf(stderr, "%s", trace_header);
va_start(args, fmt);
vfprintf(stderr, fmt, args);
va_end(args);
}
static
const char *
rect2string(VdpRect const *rect)
{
// use buffer pool to enable printing many rects in one printf expression
static char bufs[8][100];
static int i_ptr = 0;
i_ptr = (i_ptr + 1) % 8;
char *buf = &bufs[i_ptr][0];
if (NULL == rect) {
snprintf(buf, 100, "NULL");
} else {
snprintf(buf, 100, "(%d,%d,%d,%d)", rect->x0, rect->y0, rect->x1, rect->y1);
}
return buf;
}
VdpStatus
traceVdpGetApiVersion(uint32_t *api_version)
{
const char *impl_state = "{full}";
if (!trace_enabled)
goto skip;
traceCallHook(VDP_FUNC_ID_GET_API_VERSION, 0, NULL);
fprintf(tlog, "%s%s VdpGetApiVersion\n", trace_header, impl_state);
skip:;
VdpStatus ret = vdpGetApiVersion(api_version);
traceCallHook(VDP_FUNC_ID_GET_API_VERSION, 1, (void *)ret);
return ret;
}
VdpStatus
traceVdpDecoderQueryCapabilities(VdpDevice device,
VdpDecoderProfile profile, VdpBool *is_supported,
uint32_t *max_level, uint32_t *max_macroblocks,
uint32_t *max_width, uint32_t *max_height)
{
const char *impl_state = "{part}";
if (!trace_enabled)
goto skip;
traceCallHook(VDP_FUNC_ID_DECODER_QUERY_CAPABILITIES, 0, NULL);
fprintf(tlog, "%s%s VdpDecoderQueryCapabilities device=%d, profile=%s(%d)\n",
trace_header, impl_state, device, reverse_decoder_profile(profile), profile);
skip:;
VdpStatus ret = vdpDecoderQueryCapabilities(device, profile, is_supported, max_level,
max_macroblocks, max_width, max_height);
traceCallHook(VDP_FUNC_ID_DECODER_QUERY_CAPABILITIES, 1, (void*)ret);
return ret;
}
VdpStatus
traceVdpDecoderCreate(VdpDevice device, VdpDecoderProfile profile,
uint32_t width, uint32_t height, uint32_t max_references, VdpDecoder *decoder)
{
const char *impl_state = "{full}";
if (!trace_enabled)
goto skip;
traceCallHook(VDP_FUNC_ID_DECODER_CREATE, 0, NULL);
fprintf(tlog, "%s%s VdpDecoderCreate device=%d, profile=%s, width=%d, height=%d, "
"max_references=%d\n", trace_header, impl_state, device, reverse_decoder_profile(profile),
width, height, max_references);
skip:;
VdpStatus ret = vdpDecoderCreate(device, profile, width, height, max_references, decoder);
traceCallHook(VDP_FUNC_ID_DECODER_CREATE, 1, (void*)ret);
return ret;
}
VdpStatus
traceVdpDecoderDestroy(VdpDecoder decoder)
{
const char *impl_state = "{full}";
if (!trace_enabled)
goto skip;
traceCallHook(VDP_FUNC_ID_DECODER_DESTROY, 0, NULL);
fprintf(tlog, "%s%s VdpDecoderDestroy decoder=%d\n", trace_header, impl_state, decoder);
skip:;
VdpStatus ret = vdpDecoderDestroy(decoder);
traceCallHook(VDP_FUNC_ID_DECODER_DESTROY, 1, (void*)ret);
return ret;
}
VdpStatus
traceVdpDecoderGetParameters(VdpDecoder decoder,
VdpDecoderProfile *profile, uint32_t *width, uint32_t *height)
{
const char *impl_state = "{full}";
if (!trace_enabled)
goto skip;
traceCallHook(VDP_FUNC_ID_DECODER_GET_PARAMETERS, 0, NULL);
fprintf(tlog, "%s%s VdpDecoderGetParameters decoder=%d\n", trace_header, impl_state, decoder);
skip:;
VdpStatus ret = vdpDecoderGetParameters(decoder, profile, width, height);
traceCallHook(VDP_FUNC_ID_DECODER_GET_PARAMETERS, 1, (void*)ret);
return ret;
}
VdpStatus
traceVdpDecoderRender(VdpDecoder decoder, VdpVideoSurface target,
VdpPictureInfo const *picture_info, uint32_t bitstream_buffer_count,
VdpBitstreamBuffer const *bitstream_buffers)
{
const char *impl_state = "{part}";
if (!trace_enabled)
goto skip;
traceCallHook(VDP_FUNC_ID_DECODER_RENDER, 0, NULL);
fprintf(tlog, "%s%s VdpDecoderRender decoder=%d, target=%d, picture_info=%p, "
"bitstream_buffer_count=%d\n", trace_header, impl_state, decoder, target, picture_info,
bitstream_buffer_count);
skip:;
VdpStatus ret = vdpDecoderRender(decoder, target, picture_info, bitstream_buffer_count,
bitstream_buffers);
traceCallHook(VDP_FUNC_ID_DECODER_RENDER, 1, (void*)ret);
return ret;
}
VdpStatus
traceVdpOutputSurfaceQueryCapabilities(VdpDevice device,
VdpRGBAFormat surface_rgba_format, VdpBool *is_supported,
uint32_t *max_width, uint32_t *max_height)
{
const char *impl_state = "{full}";
if (!trace_enabled)
goto skip;
traceCallHook(VDP_FUNC_ID_OUTPUT_SURFACE_QUERY_CAPABILITIES, 0, NULL);
fprintf(tlog, "%s%s VdpOutputSurfaceQueryCapabilities device=%d, surface_rgba_format=%s\n",
trace_header, impl_state, device, reverse_rgba_format(surface_rgba_format));
skip:;
VdpStatus ret = vdpOutputSurfaceQueryCapabilities(device, surface_rgba_format, is_supported,
max_width, max_height);
traceCallHook(VDP_FUNC_ID_OUTPUT_SURFACE_QUERY_CAPABILITIES, 1, (void*)ret);
return ret;
}
VdpStatus
traceVdpOutputSurfaceQueryGetPutBitsNativeCapabilities(VdpDevice device,
VdpRGBAFormat surface_rgba_format,
VdpBool *is_supported)
{
const char *impl_state = "{zilch}";
if (!trace_enabled)
goto skip;
traceCallHook(VDP_FUNC_ID_OUTPUT_SURFACE_QUERY_GET_PUT_BITS_NATIVE_CAPABILITIES, 0, NULL);
fprintf(tlog, "%s%s VdpOutputSurfaceQueryGetPutBitsNativeCapabilities device=%d, "
"surface_rgba_format=%s\n", trace_header, impl_state, device,
reverse_rgba_format(surface_rgba_format));
skip:;
VdpStatus ret =
vdpOutputSurfaceQueryGetPutBitsNativeCapabilities(device, surface_rgba_format,
is_supported);
traceCallHook(VDP_FUNC_ID_OUTPUT_SURFACE_QUERY_GET_PUT_BITS_NATIVE_CAPABILITIES, 1, (void*)ret);
return ret;
}
VdpStatus
traceVdpOutputSurfaceQueryPutBitsIndexedCapabilities(VdpDevice device,
VdpRGBAFormat surface_rgba_format,
VdpIndexedFormat bits_indexed_format,
VdpColorTableFormat color_table_format,
VdpBool *is_supported)
{
const char *impl_state = "{zilch}";
if (!trace_enabled)
goto skip;
traceCallHook(VDP_FUNC_ID_OUTPUT_SURFACE_QUERY_PUT_BITS_INDEXED_CAPABILITIES, 0, NULL);
fprintf(tlog, "%s%s VdpOutputSurfaceQueryPutBitsIndexedCapabilities device=%d, "
"surface_rgba_format=%s, bits_indexed_format=%s, color_table_format=%s\n",
trace_header, impl_state, device, reverse_rgba_format(surface_rgba_format),
reverse_indexed_format(bits_indexed_format),
reverse_color_table_format(color_table_format));
skip:;
VdpStatus ret = vdpOutputSurfaceQueryPutBitsIndexedCapabilities(device, surface_rgba_format,
bits_indexed_format, color_table_format, is_supported);
traceCallHook(VDP_FUNC_ID_OUTPUT_SURFACE_QUERY_PUT_BITS_INDEXED_CAPABILITIES, 1, (void*)ret);
return ret;
}
VdpStatus
traceVdpOutputSurfaceQueryPutBitsYCbCrCapabilities(VdpDevice device,
VdpRGBAFormat surface_rgba_format,
VdpYCbCrFormat bits_ycbcr_format,
VdpBool *is_supported)
{
const char *impl_state = "{zilch}";
if (!trace_enabled)
goto skip;
traceCallHook(VDP_FUNC_ID_OUTPUT_SURFACE_QUERY_PUT_BITS_Y_CB_CR_CAPABILITIES, 0, NULL);
fprintf(tlog, "%s%s VdpOutputSurfaceQueryPutBitsYCbCrCapabilities device=%d, "
"surface_rgba_format=%s, bits_ycbcr_format=%s\n", trace_header, impl_state,
device, reverse_rgba_format(surface_rgba_format), reverse_ycbcr_format(bits_ycbcr_format));
skip:;
VdpStatus ret = vdpOutputSurfaceQueryPutBitsYCbCrCapabilities(device, surface_rgba_format,
bits_ycbcr_format, is_supported);
traceCallHook(VDP_FUNC_ID_OUTPUT_SURFACE_QUERY_PUT_BITS_Y_CB_CR_CAPABILITIES, 1, (void*)ret);
return ret;
}
VdpStatus
traceVdpOutputSurfaceCreate(VdpDevice device, VdpRGBAFormat rgba_format,
uint32_t width, uint32_t height, VdpOutputSurface *surface)
{
const char *impl_state = "{part}";
if (!trace_enabled)
goto skip;
traceCallHook(VDP_FUNC_ID_OUTPUT_SURFACE_CREATE, 0, NULL);
fprintf(tlog, "%s%s VdpOutputSurfaceCreate device=%d, rgba_format=%s, width=%d, height=%d\n",
trace_header, impl_state, device, reverse_rgba_format(rgba_format), width, height);
skip:;
VdpStatus ret = vdpOutputSurfaceCreate(device, rgba_format, width, height, surface);
traceCallHook(VDP_FUNC_ID_OUTPUT_SURFACE_CREATE, 1, (void*)ret);
return ret;
}
VdpStatus
traceVdpOutputSurfaceDestroy(VdpOutputSurface surface)
{
const char *impl_state = "{full}";
if (!trace_enabled)
goto skip;
traceCallHook(VDP_FUNC_ID_OUTPUT_SURFACE_DESTROY, 0, NULL);
fprintf(tlog, "%s%s VdpOutputSurfaceDestroy surface=%d\n", trace_header, impl_state, surface);
skip:;
VdpStatus ret = vdpOutputSurfaceDestroy(surface);
traceCallHook(VDP_FUNC_ID_OUTPUT_SURFACE_DESTROY, 1, (void*)ret);
return ret;
}
VdpStatus
traceVdpOutputSurfaceGetParameters(VdpOutputSurface surface,
VdpRGBAFormat *rgba_format, uint32_t *width, uint32_t *height)
{
const char *impl_state = "{full}";
if (!trace_enabled)
goto skip;
traceCallHook(VDP_FUNC_ID_OUTPUT_SURFACE_GET_PARAMETERS, 0, NULL);
fprintf(tlog, "%s%s VdpOutputSurfaceGetParameters surface=%d\n", trace_header, impl_state,
surface);
skip:;
VdpStatus ret = vdpOutputSurfaceGetParameters(surface, rgba_format, width, height);
traceCallHook(VDP_FUNC_ID_OUTPUT_SURFACE_GET_PARAMETERS, 1, (void*)ret);
return ret;
}
VdpStatus
traceVdpOutputSurfaceGetBitsNative(VdpOutputSurface surface,
VdpRect const *source_rect, void *const *destination_data,
uint32_t const *destination_pitches)
{
const char *impl_state = "{part}";
if (!trace_enabled)
goto skip;
traceCallHook(VDP_FUNC_ID_OUTPUT_SURFACE_GET_BITS_NATIVE, 0, NULL);
fprintf(tlog, "%s%s VdpOutputSurfaceGetBitsNative surface=%d, source_rect=%s\n",
trace_header, impl_state, surface, rect2string(source_rect));
skip:;
VdpStatus ret = vdpOutputSurfaceGetBitsNative(surface, source_rect, destination_data,
destination_pitches);
traceCallHook(VDP_FUNC_ID_OUTPUT_SURFACE_GET_BITS_NATIVE, 1, (void*)ret);
return ret;
}
VdpStatus
traceVdpOutputSurfacePutBitsNative(VdpOutputSurface surface,
void const *const *source_data, uint32_t const *source_pitches,
VdpRect const *destination_rect)
{
const char *impl_state = "{full}";
if (!trace_enabled)
goto skip;
traceCallHook(VDP_FUNC_ID_OUTPUT_SURFACE_PUT_BITS_NATIVE, 0, NULL);
fprintf(tlog, "%s%s VdpOutputSurfacePutBitsNative surface=%d, destination_rect=%s\n",
trace_header, impl_state, surface, rect2string(destination_rect));
skip:;
VdpStatus ret = vdpOutputSurfacePutBitsNative(surface, source_data, source_pitches,
destination_rect);
traceCallHook(VDP_FUNC_ID_OUTPUT_SURFACE_PUT_BITS_NATIVE, 1, (void*)ret);
return ret;
}
VdpStatus
traceVdpOutputSurfacePutBitsIndexed(VdpOutputSurface surface,
VdpIndexedFormat source_indexed_format,
void const *const *source_data, uint32_t const *source_pitch,
VdpRect const *destination_rect,
VdpColorTableFormat color_table_format, void const *color_table)
{
const char *impl_state = "{part}";
if (!trace_enabled)
goto skip;
traceCallHook(VDP_FUNC_ID_OUTPUT_SURFACE_PUT_BITS_INDEXED, 0, NULL);
fprintf(tlog, "%s%s VdpOutputSurfacePutBitsIndexed surface=%d, source_indexed_format=%s, "
"destination_rect=%s, color_table_format=%s\n", trace_header, impl_state, surface,
reverse_indexed_format(source_indexed_format), rect2string(destination_rect),
reverse_color_table_format(color_table_format));
skip:;
VdpStatus ret = vdpOutputSurfacePutBitsIndexed(surface, source_indexed_format, source_data,
source_pitch, destination_rect, color_table_format, color_table);
traceCallHook(VDP_FUNC_ID_OUTPUT_SURFACE_PUT_BITS_INDEXED, 1, (void*)ret);
return ret;
}
VdpStatus
traceVdpOutputSurfacePutBitsYCbCr(VdpOutputSurface surface,
VdpYCbCrFormat source_ycbcr_format,
void const *const *source_data, uint32_t const *source_pitches,
VdpRect const *destination_rect, VdpCSCMatrix const *csc_matrix)
{
const char *impl_state = "{zilch}";
if (!trace_enabled)
goto skip;
traceCallHook(VDP_FUNC_ID_OUTPUT_SURFACE_PUT_BITS_Y_CB_CR, 0, NULL);
fprintf(tlog, "%s%s VdpOutputSurfacePutBitsYCbCr surface=%d, source_ycbcr_format=%s, "
"destination_rect=%s, csc_matrix=%p\n", trace_header, impl_state, surface,
reverse_ycbcr_format(source_ycbcr_format), rect2string(destination_rect), csc_matrix);
skip:;
VdpStatus ret = vdpOutputSurfacePutBitsYCbCr(surface, source_ycbcr_format, source_data,
source_pitches, destination_rect, csc_matrix);
traceCallHook(VDP_FUNC_ID_OUTPUT_SURFACE_PUT_BITS_Y_CB_CR, 1, (void*)ret);
return ret;
}
VdpStatus
traceVdpVideoMixerQueryFeatureSupport(VdpDevice device,
VdpVideoMixerFeature feature, VdpBool *is_supported)
{
const char *impl_state = "{zilch}";
if (!trace_enabled)
goto skip;
traceCallHook(VDP_FUNC_ID_VIDEO_MIXER_QUERY_FEATURE_SUPPORT, 0, NULL);
fprintf(tlog, "%s%s VdpVideoMixerQueryFeatureSupport device=%d, feature=%s\n",
trace_header, impl_state, device, reverse_video_mixer_feature(feature));
skip:;
VdpStatus ret = vdpVideoMixerQueryFeatureSupport(device, feature, is_supported);
traceCallHook(VDP_FUNC_ID_VIDEO_MIXER_QUERY_FEATURE_SUPPORT, 1, (void*)ret);
return ret;
}
VdpStatus
traceVdpVideoMixerQueryParameterSupport(VdpDevice device,
VdpVideoMixerParameter parameter,
VdpBool *is_supported)
{
const char *impl_state = "{zilch}";
if (!trace_enabled)
goto skip;
traceCallHook(VDP_FUNC_ID_VIDEO_MIXER_QUERY_PARAMETER_SUPPORT, 0, NULL);
fprintf(tlog, "%s%s VdpVideoMixerQueryParameterSupport device=%d, parameter=%s\n",
trace_header, impl_state, device, reverse_video_mixer_parameter(parameter));
skip:;
VdpStatus ret = vdpVideoMixerQueryParameterSupport(device, parameter, is_supported);
traceCallHook(VDP_FUNC_ID_VIDEO_MIXER_QUERY_PARAMETER_SUPPORT, 1, (void*)ret);
return ret;
}
VdpStatus
traceVdpVideoMixerQueryAttributeSupport(VdpDevice device,
VdpVideoMixerAttribute attribute, VdpBool *is_supported)
{
const char *impl_state = "{zilch}";
if (!trace_enabled)
goto skip;
traceCallHook(VDP_FUNC_ID_VIDEO_MIXER_QUERY_ATTRIBUTE_SUPPORT, 0, NULL);
fprintf(tlog, "%s%s VdpVideoMixerQueryAttributeSupport device=%d, attribute=%s\n",
trace_header, impl_state, device, reverse_video_mixer_attribute(attribute));
skip:;
VdpStatus ret = vdpVideoMixerQueryAttributeSupport(device, attribute, is_supported);
traceCallHook(VDP_FUNC_ID_VIDEO_MIXER_QUERY_ATTRIBUTE_SUPPORT, 1, (void*)ret);
return ret;
}
VdpStatus
traceVdpVideoMixerQueryParameterValueRange(VdpDevice device,
VdpVideoMixerParameter parameter,
void *min_value, void *max_value)
{
const char *impl_state = "{part}";
if (!trace_enabled)
goto skip;
traceCallHook(VDP_FUNC_ID_VIDEO_MIXER_QUERY_PARAMETER_VALUE_RANGE, 0, NULL);
fprintf(tlog, "%s%s VdpVideoMixerQueryParameterValueRange device=%d, parameter=%s\n",
trace_header, impl_state, device, reverse_video_mixer_parameter(parameter));
skip:;
VdpStatus ret = vdpVideoMixerQueryParameterValueRange(device, parameter, min_value,
max_value);
traceCallHook(VDP_FUNC_ID_VIDEO_MIXER_QUERY_PARAMETER_VALUE_RANGE, 1, (void*)ret);
return ret;
}
VdpStatus
traceVdpVideoMixerQueryAttributeValueRange(VdpDevice device,
VdpVideoMixerAttribute attribute,
void *min_value, void *max_value)
{
const char *impl_state = "{zilch}";
if (!trace_enabled)
goto skip;
traceCallHook(VDP_FUNC_ID_VIDEO_MIXER_QUERY_ATTRIBUTE_VALUE_RANGE, 0, NULL);
fprintf(tlog, "%s%s VdpVideoMixerQueryAttributeValueRange device=%d, attribute=%s\n",
trace_header, impl_state, device, reverse_video_mixer_attribute(attribute));
skip:;
VdpStatus ret = vdpVideoMixerQueryAttributeValueRange(device, attribute, min_value,
max_value);
traceCallHook(VDP_FUNC_ID_VIDEO_MIXER_QUERY_ATTRIBUTE_VALUE_RANGE, 1, (void*)ret);
return ret;
}
VdpStatus
traceVdpVideoMixerCreate(VdpDevice device, uint32_t feature_count,
VdpVideoMixerFeature const *features, uint32_t parameter_count,
VdpVideoMixerParameter const *parameters,
void const *const *parameter_values, VdpVideoMixer *mixer)
{
const char *impl_state = "{part}";
if (!trace_enabled)
goto skip;
traceCallHook(VDP_FUNC_ID_VIDEO_MIXER_CREATE, 0, NULL);
fprintf(tlog, "%s%s VdpVideoMixerCreate device=%d, feature_count=%d, parameter_count=%d\n",
trace_header, impl_state, device, feature_count, parameter_count);
for (uint32_t k = 0; k < feature_count; k ++)
fprintf(tlog, "%s feature %s\n", trace_header_blank,
reverse_video_mixer_feature(features[k]));
for (uint32_t k = 0; k < parameter_count; k ++) {
fprintf(tlog, "%s parameter ", trace_header_blank);
switch (parameters[k]) {
case VDP_VIDEO_MIXER_PARAMETER_VIDEO_SURFACE_WIDTH:
fprintf(tlog, "video surface width = %d\n", *(uint32_t*)parameter_values[k]);
break;
case VDP_VIDEO_MIXER_PARAMETER_VIDEO_SURFACE_HEIGHT:
fprintf(tlog, "video surface height = %d\n", *(uint32_t*)parameter_values[k]);
break;
case VDP_VIDEO_MIXER_PARAMETER_CHROMA_TYPE:
fprintf(tlog, "chroma type = %s\n",
reverse_chroma_type(*(uint32_t*)parameter_values[k]));
break;
case VDP_VIDEO_MIXER_PARAMETER_LAYERS:
fprintf(tlog, "layers = %d\n", *(uint32_t*)parameter_values[k]);
break;
default:
fprintf(tlog, "invalid\n");
break;
}
}
skip:;
VdpStatus ret = vdpVideoMixerCreate(device, feature_count, features, parameter_count,
parameters, parameter_values, mixer);
traceCallHook(VDP_FUNC_ID_VIDEO_MIXER_CREATE, 1, (void*)ret);
return ret;
}
VdpStatus
traceVdpVideoMixerSetFeatureEnables(VdpVideoMixer mixer,
uint32_t feature_count, VdpVideoMixerFeature const *features,
VdpBool const *feature_enables)
{
const char *impl_state = "{part}";
if (!trace_enabled)
goto skip;
traceCallHook(VDP_FUNC_ID_VIDEO_MIXER_SET_FEATURE_ENABLES, 0, NULL);
fprintf(tlog, "%s%s VdpVideoMixerSetFeatureEnables mixer=%d, feature_count=%d\n",
trace_header, impl_state, mixer, feature_count);
for (uint32_t k = 0; k < feature_count; k ++) {
fprintf(tlog, "%s feature %d (%s) %s\n", trace_header_blank,
features[k], reverse_video_mixer_feature(features[k]),
feature_enables[k] ? "enabled" : "disabled");
}
skip:;
VdpStatus ret = vdpVideoMixerSetFeatureEnables(mixer, feature_count, features,
feature_enables);
traceCallHook(VDP_FUNC_ID_VIDEO_MIXER_SET_FEATURE_ENABLES, 1, (void*)ret);
return ret;
}
VdpStatus
traceVdpVideoMixerSetAttributeValues(VdpVideoMixer mixer,
uint32_t attribute_count,
VdpVideoMixerAttribute const *attributes,
void const *const *attribute_values)
{
const char *impl_state = "{part}";
if (!trace_enabled)
goto skip;
traceCallHook(VDP_FUNC_ID_VIDEO_MIXER_SET_ATTRIBUTE_VALUES, 0, NULL);
fprintf(tlog, "%s%s VdpVideoMixerSetAttributeValues mixer=%d, attribute_count=%d\n",
trace_header, impl_state, mixer, attribute_count);
for (uint32_t k = 0; k < attribute_count; k ++) {
fprintf(tlog, "%s attribute %d (%s)\n", trace_header_blank, attributes[k],
reverse_video_mixer_attribute(attributes[k]));
if (VDP_VIDEO_MIXER_ATTRIBUTE_CSC_MATRIX == attributes[k]) {
VdpCSCMatrix *matrix = (VdpCSCMatrix *)(attribute_values[k]);
for (uint32_t j1 = 0; j1 < 3; j1 ++) {
fprintf(tlog, "%s ", trace_header_blank);
for (uint32_t j2 = 0; j2 < 4; j2 ++) {
fprintf(tlog, "%11f", (double)((*matrix)[j1][j2]));
}
fprintf(tlog, "\n");
}
}
}
skip:;
VdpStatus ret = vdpVideoMixerSetAttributeValues(mixer, attribute_count, attributes,
attribute_values);
traceCallHook(VDP_FUNC_ID_VIDEO_MIXER_SET_ATTRIBUTE_VALUES, 1, (void*)ret);
return ret;
}
VdpStatus
traceVdpVideoMixerGetFeatureSupport(VdpVideoMixer mixer,
uint32_t feature_count, VdpVideoMixerFeature const *features,
VdpBool *feature_supports)
{
const char *impl_state = "{zilch}";
if (!trace_enabled)
goto skip;
traceCallHook(VDP_FUNC_ID_VIDEO_MIXER_GET_FEATURE_SUPPORT, 0, NULL);
fprintf(tlog, "%s%s VdpVideoMixerGetFeatureSupport mixer=%d, feature_count=%d\n",
trace_header, impl_state, mixer, feature_count);
for (unsigned int k = 0; k < feature_count; k ++)
fprintf(tlog, "%s feature %s\n", trace_header_blank,
reverse_video_mixer_feature(features[k]));
skip:;
VdpStatus ret = vdpVideoMixerGetFeatureSupport(mixer, feature_count, features,
feature_supports);
traceCallHook(VDP_FUNC_ID_VIDEO_MIXER_GET_FEATURE_SUPPORT, 1, (void*)ret);
return ret;
}
VdpStatus
traceVdpVideoMixerGetFeatureEnables(VdpVideoMixer mixer,
uint32_t feature_count, VdpVideoMixerFeature const *features,
VdpBool *feature_enables)
{
const char *impl_state = "{zilch}";
if (!trace_enabled)
goto skip;
traceCallHook(VDP_FUNC_ID_VIDEO_MIXER_GET_FEATURE_ENABLES, 0, NULL);
fprintf(tlog, "%s%s VdpVideoMixerGetFeatureEnables mixer=%d, feature_count=%d\n",
trace_header, impl_state, mixer, feature_count);
for (unsigned int k = 0; k < feature_count; k ++)
fprintf(tlog, "%s feature %s\n", trace_header_blank,
reverse_video_mixer_feature(features[k]));
skip:;
VdpStatus ret = vdpVideoMixerGetFeatureEnables(mixer, feature_count, features,
feature_enables);
traceCallHook(VDP_FUNC_ID_VIDEO_MIXER_GET_FEATURE_ENABLES, 1, (void*)ret);
return ret;
}
VdpStatus
traceVdpVideoMixerGetParameterValues(VdpVideoMixer mixer,
uint32_t parameter_count,
VdpVideoMixerParameter const *parameters,
void *const *parameter_values)
{
const char *impl_state = "{zilch}";
if (!trace_enabled)
goto skip;
traceCallHook(VDP_FUNC_ID_VIDEO_MIXER_GET_PARAMETER_VALUES, 0, NULL);
fprintf(tlog, "%s%s VdpVideoMixerGetParameterValues mixer=%d, parameter_count=%d\n",
trace_header, impl_state, mixer, parameter_count);
for (unsigned int k = 0; k < parameter_count; k ++)
fprintf(tlog, "%s parameter %s\n", trace_header_blank,
reverse_video_mixer_parameter(parameters[k]));
skip:;
VdpStatus ret = vdpVideoMixerGetParameterValues(mixer, parameter_count, parameters,
parameter_values);
traceCallHook(VDP_FUNC_ID_VIDEO_MIXER_GET_PARAMETER_VALUES, 1, (void*)ret);
return ret;
}
VdpStatus
traceVdpVideoMixerGetAttributeValues(VdpVideoMixer mixer,
uint32_t attribute_count,
VdpVideoMixerAttribute const *attributes,
void *const *attribute_values)
{
const char *impl_state = "{zilch}";
if (!trace_enabled)
goto skip;
traceCallHook(VDP_FUNC_ID_VIDEO_MIXER_GET_ATTRIBUTE_VALUES, 0, NULL);
fprintf(tlog, "%s%s VdpVideoMixerGetAttributeValues mixer=%d, attribute_count=%d\n",
trace_header, impl_state, mixer, attribute_count);
for (unsigned int k = 0; k < attribute_count; k ++)
fprintf(tlog, "%s attribute %s\n", trace_header_blank,
reverse_video_mixer_attribute(attributes[k]));
skip:;
VdpStatus ret = vdpVideoMixerGetAttributeValues(mixer, attribute_count, attributes,
attribute_values);
traceCallHook(VDP_FUNC_ID_VIDEO_MIXER_GET_ATTRIBUTE_VALUES, 1, (void*)ret);
return ret;
}
VdpStatus
traceVdpVideoMixerDestroy(VdpVideoMixer mixer)
{
const char *impl_state = "{full}";
if (!trace_enabled)
goto skip;
traceCallHook(VDP_FUNC_ID_VIDEO_MIXER_DESTROY, 0, NULL);
fprintf(tlog, "%s%s VdpVideoMixerDestroy mixer=%d\n", trace_header, impl_state, mixer);
skip:;
VdpStatus ret = vdpVideoMixerDestroy(mixer);
traceCallHook(VDP_FUNC_ID_VIDEO_MIXER_DESTROY, 1, (void*)ret);
return ret;
}
VdpStatus
traceVdpVideoMixerRender(VdpVideoMixer mixer,
VdpOutputSurface background_surface, VdpRect const *background_source_rect,
VdpVideoMixerPictureStructure current_picture_structure,
uint32_t video_surface_past_count,
VdpVideoSurface const *video_surface_past,
VdpVideoSurface video_surface_current, uint32_t video_surface_future_count,
VdpVideoSurface const *video_surface_future,
VdpRect const *video_source_rect, VdpOutputSurface destination_surface,
VdpRect const *destination_rect, VdpRect const *destination_video_rect,
uint32_t layer_count, VdpLayer const *layers)
{
const char *impl_state = "{part}";
if (!trace_enabled)
goto skip;
traceCallHook(VDP_FUNC_ID_VIDEO_MIXER_RENDER, 0, NULL);
fprintf(tlog, "%s%s VdpVideoMixerRender mixer=%d, background_surface=%d, "
"background_source_rect=%s,\n", trace_header, impl_state,
mixer, background_surface, rect2string(background_source_rect));
fprintf(tlog, "%s current_picture_structure=%s, video_surface_past=[",
trace_header_blank, reverser_video_mixer_picture_structure(current_picture_structure));
for (uint32_t k = 0; k < video_surface_past_count; k ++) {
if (0 != k) fprintf(tlog, ",");
fprintf(tlog, "%d", video_surface_past[k]);
}
fprintf(tlog, "],\n%s video_surface_current=%d, video_surface_future=[",
trace_header_blank, video_surface_current);
for (uint32_t k = 0; k < video_surface_future_count; k ++) {
if (0 != k) fprintf(tlog, ",");
fprintf(tlog, "%d", video_surface_future[k]);
}
fprintf(tlog, "],\n%s video_source_rect=%s, destination_surface=%d, destination_rect=%s, "
"destination_video_rect=%s, layers=[", trace_header_blank, rect2string(video_source_rect),
destination_surface, rect2string(destination_rect), rect2string(destination_video_rect));
for (uint32_t k = 0; k < layer_count; k ++) {
if (0 != k) fprintf(tlog, ",");
fprintf(tlog, "{%d,src:%s,dst:%s}", layers[k].source_surface,
rect2string(layers[k].source_rect), rect2string(layers[k].destination_rect));
}
fprintf(tlog, "]\n");
skip:;
VdpStatus ret = vdpVideoMixerRender(mixer, background_surface, background_source_rect,
current_picture_structure, video_surface_past_count, video_surface_past,
video_surface_current, video_surface_future_count, video_surface_future, video_source_rect,
destination_surface, destination_rect, destination_video_rect, layer_count, layers);
traceCallHook(VDP_FUNC_ID_VIDEO_MIXER_RENDER, 1, (void*)ret);
return ret;
}
VdpStatus
traceVdpPresentationQueueTargetDestroy(VdpPresentationQueueTarget presentation_queue_target)
{
const char *impl_state = "{full}";
if (!trace_enabled)
goto skip;
traceCallHook(VDP_FUNC_ID_PRESENTATION_QUEUE_TARGET_DESTROY, 0, NULL);
fprintf(tlog, "%s%s VdpPresentationQueueTargetDestroy presentation_queue_target=%d\n",
trace_header, impl_state, presentation_queue_target);
skip:;
VdpStatus ret = vdpPresentationQueueTargetDestroy(presentation_queue_target);
traceCallHook(VDP_FUNC_ID_PRESENTATION_QUEUE_TARGET_DESTROY, 1, (void*)ret);
return ret;
}
VdpStatus
traceVdpPresentationQueueCreate(VdpDevice device,
VdpPresentationQueueTarget presentation_queue_target,
VdpPresentationQueue *presentation_queue)
{
const char *impl_state = "{full}";
if (!trace_enabled)
goto skip;
traceCallHook(VDP_FUNC_ID_PRESENTATION_QUEUE_CREATE, 0, NULL);
fprintf(tlog, "%s%s VdpPresentationQueueCreate device=%d, presentation_queue_target=%d\n",
trace_header, impl_state, device, presentation_queue_target);
skip:;
VdpStatus ret = vdpPresentationQueueCreate(device, presentation_queue_target,
presentation_queue);
traceCallHook(VDP_FUNC_ID_PRESENTATION_QUEUE_CREATE, 1, (void*)ret);
return ret;
}
VdpStatus
traceVdpPresentationQueueDestroy(VdpPresentationQueue presentation_queue)
{
const char *impl_state = "{full}";
if (!trace_enabled)
goto skip;
traceCallHook(VDP_FUNC_ID_PRESENTATION_QUEUE_DESTROY, 0, NULL);
fprintf(tlog, "%s%s VdpPresentationQueueDestroy presentation_queue=%d\n",
trace_header, impl_state, presentation_queue);
skip:;
VdpStatus ret = vdpPresentationQueueDestroy(presentation_queue);
traceCallHook(VDP_FUNC_ID_PRESENTATION_QUEUE_DESTROY, 1, (void*)ret);
return ret;
}
VdpStatus
traceVdpPresentationQueueSetBackgroundColor(VdpPresentationQueue presentation_queue,
VdpColor *const background_color)
{
const char *impl_state = "{full}";
if (!trace_enabled)
goto skip;
traceCallHook(VDP_FUNC_ID_PRESENTATION_QUEUE_SET_BACKGROUND_COLOR, 0, NULL);
fprintf(tlog, "%s%s VdpPresentationQueueSetBackgroundColor presentation_queue=%d, "
"background_color=", trace_header, impl_state, presentation_queue);
if (background_color) {
fprintf(tlog, "(%.2f,%.2f,%.2f,%.2f)\n", background_color->red,
background_color->green, background_color->blue, background_color->alpha);
} else {
fprintf(tlog, "NULL\n");
}
skip:;
VdpStatus ret = vdpPresentationQueueSetBackgroundColor(presentation_queue,
background_color);
traceCallHook(VDP_FUNC_ID_PRESENTATION_QUEUE_SET_BACKGROUND_COLOR, 1, (void*)ret);
return ret;
}
VdpStatus
traceVdpPresentationQueueGetBackgroundColor(VdpPresentationQueue presentation_queue,
VdpColor *background_color)
{
const char *impl_state = "{full}";
if (!trace_enabled)
goto skip;
traceCallHook(VDP_FUNC_ID_PRESENTATION_QUEUE_GET_BACKGROUND_COLOR, 0, NULL);
fprintf(tlog, "%s%s VdpPresentationQueueGetBackgroundColor presentation_queue=%d\n",
trace_header, impl_state, presentation_queue);
skip:;
VdpStatus ret = vdpPresentationQueueGetBackgroundColor(presentation_queue,
background_color);
traceCallHook(VDP_FUNC_ID_PRESENTATION_QUEUE_GET_BACKGROUND_COLOR, 1, (void*)ret);
return ret;
}
VdpStatus
traceVdpPresentationQueueGetTime(VdpPresentationQueue presentation_queue,
VdpTime *current_time)
{
const char *impl_state = "{full}";
if (!trace_enabled)
goto skip;
traceCallHook(VDP_FUNC_ID_PRESENTATION_QUEUE_GET_TIME, 0, NULL);
fprintf(tlog, "%s%s VdpPresentationQueueGetTime presentation_queue=%d\n",
trace_header, impl_state, presentation_queue);
skip:;
VdpStatus ret = vdpPresentationQueueGetTime(presentation_queue, current_time);
traceCallHook(VDP_FUNC_ID_PRESENTATION_QUEUE_GET_TIME, 1, (void*)ret);
return ret;
}
VdpStatus
traceVdpPresentationQueueDisplay(VdpPresentationQueue presentation_queue,
VdpOutputSurface surface, uint32_t clip_width,
uint32_t clip_height, VdpTime earliest_presentation_time)
{
const char *impl_state = "{full}";
if (!trace_enabled)
goto skip;
traceCallHook(VDP_FUNC_ID_PRESENTATION_QUEUE_DISPLAY, 0, NULL);
fprintf(tlog, "%s%s VdpPresentationQueueDisplay presentation_queue=%d, surface=%d, "
"clip_width=%d, clip_height=%d,\n", trace_header, impl_state, presentation_queue, surface,
clip_width, clip_height);
fprintf(tlog, "%s earliest_presentation_time=%"PRIu64"\n", trace_header_blank,
earliest_presentation_time);
skip:;
VdpStatus ret = vdpPresentationQueueDisplay(presentation_queue, surface, clip_width,
clip_height, earliest_presentation_time);
traceCallHook(VDP_FUNC_ID_PRESENTATION_QUEUE_DISPLAY, 1, (void*)ret);
return ret;
}
VdpStatus
traceVdpPresentationQueueBlockUntilSurfaceIdle(VdpPresentationQueue presentation_queue,
VdpOutputSurface surface,
VdpTime *first_presentation_time)
{
const char *impl_state = "{full}";
if (!trace_enabled)
goto skip;
traceCallHook(VDP_FUNC_ID_PRESENTATION_QUEUE_BLOCK_UNTIL_SURFACE_IDLE, 0, NULL);
fprintf(tlog, "%s%s VdpPresentationQueueBlockUntilSurfaceIdle presentation_queue=%d, "
"surface=%d\n", trace_header, impl_state, presentation_queue, surface);
skip:;
VdpStatus ret = vdpPresentationQueueBlockUntilSurfaceIdle(presentation_queue, surface,
first_presentation_time);
traceCallHook(VDP_FUNC_ID_PRESENTATION_QUEUE_BLOCK_UNTIL_SURFACE_IDLE, 1, (void*)ret);
return ret;
}
VdpStatus
traceVdpPresentationQueueQuerySurfaceStatus(VdpPresentationQueue presentation_queue,
VdpOutputSurface surface,
VdpPresentationQueueStatus *status,
VdpTime *first_presentation_time)
{
const char *impl_state = "{full}";
if (!trace_enabled)
goto skip;
traceCallHook(VDP_FUNC_ID_PRESENTATION_QUEUE_QUERY_SURFACE_STATUS, 0, NULL);
fprintf(tlog, "%s%s VdpPresentationQueueQuerySurfaceStatus presentation_queue=%d, "
"surface=%d\n", trace_header, impl_state, presentation_queue, surface);
skip:;
VdpStatus ret = vdpPresentationQueueQuerySurfaceStatus(presentation_queue, surface,
status, first_presentation_time);
traceCallHook(VDP_FUNC_ID_PRESENTATION_QUEUE_QUERY_SURFACE_STATUS, 1, (void*)ret);
return ret;
}
VdpStatus
traceVdpVideoSurfaceQueryCapabilities(VdpDevice device,
VdpChromaType surface_chroma_type, VdpBool *is_supported,
uint32_t *max_width, uint32_t *max_height)
{
const char *impl_state = "{part}";
if (!trace_enabled)
goto skip;
traceCallHook(VDP_FUNC_ID_VIDEO_SURFACE_QUERY_CAPABILITIES, 0, NULL);
fprintf(tlog, "%s%s VdpVideoSurfaceQueryCapabilities device=%d, surface_chroma_type=%s\n",
trace_header, impl_state, device, reverse_chroma_type(surface_chroma_type));
skip:;
VdpStatus ret = vdpVideoSurfaceQueryCapabilities(device, surface_chroma_type, is_supported,
max_width, max_height);
traceCallHook(VDP_FUNC_ID_VIDEO_SURFACE_QUERY_CAPABILITIES, 1, (void*)ret);
return ret;
}
VdpStatus
traceVdpVideoSurfaceQueryGetPutBitsYCbCrCapabilities(VdpDevice device,
VdpChromaType surface_chroma_type,
VdpYCbCrFormat bits_ycbcr_format,
VdpBool *is_supported)
{
const char *impl_state = "{part}";
if (!trace_enabled)
goto skip;
traceCallHook(VDP_FUNC_ID_VIDEO_SURFACE_QUERY_GET_PUT_BITS_Y_CB_CR_CAPABILITIES, 0, NULL);
fprintf(tlog, "%s%s VdpVideoSurfaceQueryGetPutBitsYCbCrCapabilities device=%d, "
"surface_chroma_type=%s, bits_ycbcr_format=%s\n", trace_header, impl_state,
device, reverse_chroma_type(surface_chroma_type), reverse_ycbcr_format(bits_ycbcr_format));
skip:;
VdpStatus ret = vdpVideoSurfaceQueryGetPutBitsYCbCrCapabilities(device, surface_chroma_type,
bits_ycbcr_format, is_supported);
traceCallHook(VDP_FUNC_ID_VIDEO_SURFACE_QUERY_GET_PUT_BITS_Y_CB_CR_CAPABILITIES, 1, (void*)ret);
return ret;
}
VdpStatus
traceVdpVideoSurfaceCreate(VdpDevice device, VdpChromaType chroma_type,
uint32_t width, uint32_t height, VdpVideoSurface *surface)
{
const char *impl_state = "{part}";
if (!trace_enabled)
goto skip;
traceCallHook(VDP_FUNC_ID_VIDEO_SURFACE_CREATE, 0, NULL);
fprintf(tlog, "%s%s VdpVideoSurfaceCreate, device=%d, chroma_type=%s, width=%d, height=%d\n",
trace_header, impl_state, device, reverse_chroma_type(chroma_type), width, height);
skip:;
VdpStatus ret = vdpVideoSurfaceCreate(device, chroma_type, width, height, surface);
traceCallHook(VDP_FUNC_ID_VIDEO_SURFACE_CREATE, 1, (void*)ret);
return ret;
}
VdpStatus
traceVdpVideoSurfaceDestroy(VdpVideoSurface surface)
{
const char *impl_state = "{full}";
if (!trace_enabled)
goto skip;
traceCallHook(VDP_FUNC_ID_VIDEO_SURFACE_DESTROY, 0, NULL);
fprintf(tlog, "%s%s VdpVideoSurfaceDestroy surface=%d\n", trace_header, impl_state, surface);
skip:;
VdpStatus ret = vdpVideoSurfaceDestroy(surface);
traceCallHook(VDP_FUNC_ID_VIDEO_SURFACE_DESTROY, 1, (void*)ret);
return ret;
}
VdpStatus
traceVdpVideoSurfaceGetParameters(VdpVideoSurface surface,
VdpChromaType *chroma_type, uint32_t *width, uint32_t *height)
{
const char *impl_state = "{full}";
if (!trace_enabled)
goto skip;
traceCallHook(VDP_FUNC_ID_VIDEO_SURFACE_GET_PARAMETERS, 0, NULL);
fprintf(tlog, "%s%s VdpVideoSurfaceGetParameters surface=%d\n", trace_header, impl_state,
surface);
skip:;
VdpStatus ret = vdpVideoSurfaceGetParameters(surface, chroma_type, width, height);
traceCallHook(VDP_FUNC_ID_VIDEO_SURFACE_GET_PARAMETERS, 1, (void*)ret);
return ret;
}
VdpStatus
traceVdpVideoSurfaceGetBitsYCbCr(VdpVideoSurface surface,
VdpYCbCrFormat destination_ycbcr_format,
void *const *destination_data, uint32_t const *destination_pitches)
{
const char *impl_state = "{part}";
if (!trace_enabled)
goto skip;
traceCallHook(VDP_FUNC_ID_VIDEO_SURFACE_GET_BITS_Y_CB_CR, 0, NULL);
fprintf(tlog, "%s%s VdpVideoSurfaceGetBitsYCbCr surface=%d, destination_ycbcr_format=%s\n",
trace_header, impl_state, surface, reverse_ycbcr_format(destination_ycbcr_format));
skip:;
VdpStatus ret = vdpVideoSurfaceGetBitsYCbCr(surface, destination_ycbcr_format,
destination_data, destination_pitches);
traceCallHook(VDP_FUNC_ID_VIDEO_SURFACE_GET_BITS_Y_CB_CR, 1, (void*)ret);
return ret;
}
VdpStatus
traceVdpVideoSurfacePutBitsYCbCr(VdpVideoSurface surface,
VdpYCbCrFormat source_ycbcr_format, void const *const *source_data,
uint32_t const *source_pitches)
{
const char *impl_state = "{part}";
if (!trace_enabled)
goto skip;
traceCallHook(VDP_FUNC_ID_VIDEO_SURFACE_PUT_BITS_Y_CB_CR, 0, NULL);
fprintf(tlog, "%s%s VdpVideoSurfacePutBitsYCbCr surface=%d, source_ycbcr_format=%s\n",
trace_header, impl_state, surface, reverse_ycbcr_format(source_ycbcr_format));
skip:;
VdpStatus ret = vdpVideoSurfacePutBitsYCbCr(surface, source_ycbcr_format, source_data,
source_pitches);
traceCallHook(VDP_FUNC_ID_VIDEO_SURFACE_PUT_BITS_Y_CB_CR, 1, (void*)ret);
return ret;
}
VdpStatus
traceVdpBitmapSurfaceQueryCapabilities(VdpDevice device,
VdpRGBAFormat surface_rgba_format, VdpBool *is_supported,
uint32_t *max_width, uint32_t *max_height)
{
const char *impl_state = "{full}";
if (!trace_enabled)
goto skip;
traceCallHook(VDP_FUNC_ID_BITMAP_SURFACE_QUERY_CAPABILITIES, 0, NULL);
fprintf(tlog, "%s%s VdpBitmapSurfaceQueryCapabilities device=%d, surface_rgba_format=%s\n",
trace_header, impl_state, device, reverse_rgba_format(surface_rgba_format));
skip:;
VdpStatus ret = vdpBitmapSurfaceQueryCapabilities(device, surface_rgba_format, is_supported,
max_width, max_height);
traceCallHook(VDP_FUNC_ID_BITMAP_SURFACE_QUERY_CAPABILITIES, 1, (void*)ret);
return ret;
}
VdpStatus
traceVdpBitmapSurfaceCreate(VdpDevice device, VdpRGBAFormat rgba_format,
uint32_t width, uint32_t height, VdpBool frequently_accessed,
VdpBitmapSurface *surface)
{
const char *impl_state = "{full}";
if (!trace_enabled)
goto skip;
traceCallHook(VDP_FUNC_ID_BITMAP_SURFACE_CREATE, 0, NULL);
fprintf(tlog, "%s%s VdpBitmapSurfaceCreate device=%d, rgba_format=%s, width=%d, height=%d,\n"
"%s frequently_accessed=%d\n", trace_header, impl_state, device,
reverse_rgba_format(rgba_format), width, height, trace_header_blank, frequently_accessed);
skip:;
VdpStatus ret = vdpBitmapSurfaceCreate(device, rgba_format, width, height,
frequently_accessed, surface);
traceCallHook(VDP_FUNC_ID_BITMAP_SURFACE_CREATE, 1, (void*)ret);
return ret;
}
VdpStatus
traceVdpBitmapSurfaceDestroy(VdpBitmapSurface surface)
{
const char *impl_state = "{full}";
if (!trace_enabled)
goto skip;
traceCallHook(VDP_FUNC_ID_BITMAP_SURFACE_DESTROY, 0, NULL);
fprintf(tlog, "%s%s VdpBitmapSurfaceDestroy surface=%d\n", trace_header, impl_state, surface);
skip:;
VdpStatus ret = vdpBitmapSurfaceDestroy(surface);
traceCallHook(VDP_FUNC_ID_BITMAP_SURFACE_DESTROY, 1, (void*)ret);
return ret;
}
VdpStatus
traceVdpBitmapSurfaceGetParameters(VdpBitmapSurface surface,
VdpRGBAFormat *rgba_format, uint32_t *width, uint32_t *height,
VdpBool *frequently_accessed)
{
const char *impl_state = "{full}";
if (!trace_enabled)
goto skip;
traceCallHook(VDP_FUNC_ID_BITMAP_SURFACE_GET_PARAMETERS, 0, NULL);
fprintf(tlog, "%s%s VdpBitmapSurfaceGetParameters surface=%d\n",
trace_header, impl_state, surface);
skip:;
VdpStatus ret = vdpBitmapSurfaceGetParameters(surface, rgba_format, width, height,
frequently_accessed);
traceCallHook(VDP_FUNC_ID_BITMAP_SURFACE_GET_PARAMETERS, 1, (void*)ret);
return ret;
}
VdpStatus
traceVdpBitmapSurfacePutBitsNative(VdpBitmapSurface surface,
void const *const *source_data, uint32_t const *source_pitches,
VdpRect const *destination_rect)
{
const char *impl_state = "{full}";
if (!trace_enabled)
goto skip;
traceCallHook(VDP_FUNC_ID_BITMAP_SURFACE_PUT_BITS_NATIVE, 0, NULL);
fprintf(tlog, "%s%s VdpBitmapSurfacePutBitsNative surface=%d, destination_rect=%s\n",
trace_header, impl_state, surface, rect2string(destination_rect));
skip:;
VdpStatus ret = vdpBitmapSurfacePutBitsNative(surface, source_data, source_pitches,
destination_rect);
traceCallHook(VDP_FUNC_ID_BITMAP_SURFACE_PUT_BITS_NATIVE, 1, (void*)ret);
return ret;
}
VdpStatus
traceVdpDeviceDestroy(VdpDevice device)
{
const char *impl_state = "{full}";
if (!trace_enabled)
goto skip;
traceCallHook(VDP_FUNC_ID_DEVICE_DESTROY, 0, NULL);
fprintf(tlog, "%s%s VdpDeviceDestroy device=%d\n", trace_header, impl_state, device);
skip:;
VdpStatus ret = vdpDeviceDestroy(device);
traceCallHook(VDP_FUNC_ID_DEVICE_DESTROY, 1, (void*)ret);
return ret;
}
VdpStatus
traceVdpGetInformationString(char const **information_string)
{
const char *impl_state = "{full}";
if (!trace_enabled)
goto skip;
traceCallHook(VDP_FUNC_ID_GET_INFORMATION_STRING, 0, NULL);
fprintf(tlog, "%s%s VdpGetInformationString\n", trace_header, impl_state);
skip:;
VdpStatus ret = vdpGetInformationString(information_string);
traceCallHook(VDP_FUNC_ID_GET_INFORMATION_STRING, 1, (void*)ret);
return ret;
}
VdpStatus
traceVdpGenerateCSCMatrix(VdpProcamp *procamp, VdpColorStandard standard,
VdpCSCMatrix *csc_matrix)
{
const char *impl_state = "{part}";
if (!trace_enabled)
goto skip;
traceCallHook(VDP_FUNC_ID_GENERATE_CSC_MATRIX, 0, NULL);
fprintf(tlog, "%s%s VdpGenerateCSCMatrix ", trace_header, impl_state);
if (procamp) {
fprintf(tlog, "brightness=%f, contrast=%f, saturation=%f, ", procamp->brightness,
procamp->contrast, procamp->saturation);
}
fprintf(tlog, "standard=%s\n", reverse_color_standard(standard));
skip:;
VdpStatus ret = vdpGenerateCSCMatrix(procamp, standard, csc_matrix);
traceCallHook(VDP_FUNC_ID_GENERATE_CSC_MATRIX, 1, (void*)ret);
return ret;
}
VdpStatus
traceVdpOutputSurfaceRenderOutputSurface(VdpOutputSurface destination_surface,
VdpRect const *destination_rect,
VdpOutputSurface source_surface,
VdpRect const *source_rect, VdpColor const *colors,
VdpOutputSurfaceRenderBlendState const *blend_state,
uint32_t flags)
{
const char *impl_state = "{full}";
if (!trace_enabled)
goto skip;
traceCallHook(VDP_FUNC_ID_OUTPUT_SURFACE_RENDER_OUTPUT_SURFACE, 0, NULL);
fprintf(tlog, "%s%s VdpOutputSurfaceRenderOutputSurface destination_surface=%d, "
"destination_rect=%s,\n", trace_header, impl_state,
destination_surface, rect2string(destination_rect));
fprintf(tlog, "%s source_surface=%d, source_rect=%s\n",
trace_header_blank, source_surface, rect2string(source_rect));
if (blend_state) {
fprintf(tlog,
"%s blend_state.blend_factor_source_color=%s\n"
"%s blend_state.blend_factor_destination_color=%s\n"
"%s blend_state.blend_factor_source_alpha=%s\n"
"%s blend_state.blend_factor_destination_alpha=%s\n"
"%s blend_state.blend_equation_color=%s\n"
"%s blend_state.blend_equation_alpha=%s\n"
"%s blend_constant = (%11f, %11f, %11f, %11f)\n",
trace_header_blank, reverse_blend_factor(blend_state->blend_factor_source_color),
trace_header_blank, reverse_blend_factor(blend_state->blend_factor_destination_color),
trace_header_blank, reverse_blend_factor(blend_state->blend_factor_source_alpha),
trace_header_blank, reverse_blend_factor(blend_state->blend_factor_destination_alpha),
trace_header_blank, reverse_blend_equation(blend_state->blend_equation_color),
trace_header_blank, reverse_blend_equation(blend_state->blend_equation_alpha),
trace_header_blank,
blend_state->blend_constant.red, blend_state->blend_constant.green,
blend_state->blend_constant.blue, blend_state->blend_constant.alpha);
} else {
fprintf(tlog, "%s blend_state=NULL\n", trace_header_blank);
}
fprintf(tlog, "%s flags = %s", trace_header_blank,
reverse_output_surface_render_rotate(flags));
if (flags & VDP_OUTPUT_SURFACE_RENDER_COLOR_PER_VERTEX)
fprintf(tlog, "| VDP_OUTPUT_SURFACE_RENDER_COLOR_PER_VERTEX");
fprintf(tlog, "\n");
int color_count = 0;
if (colors) {
if (flags & VDP_OUTPUT_SURFACE_RENDER_COLOR_PER_VERTEX)
color_count = 4;
else
color_count = 1;
}
fprintf(tlog, "%s colors=[", trace_header_blank);
for (int k = 0; k < color_count; k ++) {
if (k > 0) fprintf(tlog, ", ");
fprintf(tlog, "(%f,%f,%f,%f)", colors[k].red, colors[k].green, colors[k].blue,
colors[k].alpha);
}
fprintf(tlog, "]\n");
skip:;
VdpStatus ret = vdpOutputSurfaceRenderOutputSurface(destination_surface, destination_rect,
source_surface, source_rect, colors, blend_state, flags);
traceCallHook(VDP_FUNC_ID_OUTPUT_SURFACE_RENDER_OUTPUT_SURFACE, 1, (void*)ret);
return ret;
}
VdpStatus
traceVdpOutputSurfaceRenderBitmapSurface(VdpOutputSurface destination_surface,
VdpRect const *destination_rect,
VdpBitmapSurface source_surface,
VdpRect const *source_rect, VdpColor const *colors,
VdpOutputSurfaceRenderBlendState const *blend_state,
uint32_t flags)
{
const char *impl_state = "{full}";
if (!trace_enabled)
goto skip;
traceCallHook(VDP_FUNC_ID_OUTPUT_SURFACE_RENDER_BITMAP_SURFACE, 0, NULL);
fprintf(tlog, "%s%s VdpOutputSurfaceRenderBitmapSurface destination_surface=%d, "
"destination_rect=%s,\n", trace_header, impl_state,
destination_surface, rect2string(destination_rect));
fprintf(tlog, "%s source_surface=%d, source_rect=%s\n",
trace_header_blank, source_surface, rect2string(source_rect));
if (blend_state) {
fprintf(tlog,
"%s blend_state.blend_factor_source_color=%s\n"
"%s blend_state.blend_factor_destination_color=%s\n"
"%s blend_state.blend_factor_source_alpha=%s\n"
"%s blend_state.blend_factor_destination_alpha=%s\n"
"%s blend_state.blend_equation_color=%s\n"
"%s blend_state.blend_equation_alpha=%s\n"
"%s blend_constant = (%11f, %11f, %11f, %11f)\n",
trace_header_blank, reverse_blend_factor(blend_state->blend_factor_source_color),
trace_header_blank, reverse_blend_factor(blend_state->blend_factor_destination_color),
trace_header_blank, reverse_blend_factor(blend_state->blend_factor_source_alpha),
trace_header_blank, reverse_blend_factor(blend_state->blend_factor_destination_alpha),
trace_header_blank, reverse_blend_equation(blend_state->blend_equation_color),
trace_header_blank, reverse_blend_equation(blend_state->blend_equation_alpha),
trace_header_blank,
blend_state->blend_constant.red, blend_state->blend_constant.green,
blend_state->blend_constant.blue, blend_state->blend_constant.alpha);
} else {
fprintf(tlog, "%s blend_state=NULL\n", trace_header_blank);
}
fprintf(tlog, "%s flags = %s", trace_header_blank,
reverse_output_surface_render_rotate(flags));
if (flags & VDP_OUTPUT_SURFACE_RENDER_COLOR_PER_VERTEX)
fprintf(tlog, "| VDP_OUTPUT_SURFACE_RENDER_COLOR_PER_VERTEX");
fprintf(tlog, "\n");
int color_count = 0;
if (colors) {
if (flags & VDP_OUTPUT_SURFACE_RENDER_COLOR_PER_VERTEX)
color_count = 4;
else
color_count = 1;
}
fprintf(tlog, "%s colors=[", trace_header_blank);
for (int k = 0; k < color_count; k ++) {
if (k > 0) fprintf(tlog, ", ");
fprintf(tlog, "(%f,%f,%f,%f)", colors[k].red, colors[k].green, colors[k].blue,
colors[k].alpha);
}
fprintf(tlog, "]\n");
skip:;
VdpStatus ret = vdpOutputSurfaceRenderBitmapSurface(destination_surface, destination_rect,
source_surface, source_rect, colors, blend_state, flags);
traceCallHook(VDP_FUNC_ID_OUTPUT_SURFACE_RENDER_BITMAP_SURFACE, 1, (void*)ret);
return ret;
}
VdpStatus
traceVdpPreemptionCallbackRegister(VdpDevice device,
VdpPreemptionCallback callback, void *context)
{
const char *impl_state = "{zilch/fake success}";
if (!trace_enabled)
goto skip;
traceCallHook(VDP_FUNC_ID_PREEMPTION_CALLBACK_REGISTER, 0, NULL);
fprintf(tlog, "%s%s VdpPreemptionCallbackRegister device=%d, callback=%p, context=%p\n",
trace_header, impl_state, device, callback, context);
skip:;
VdpStatus ret = vdpPreemptionCallbackRegister(device, callback, context);
traceCallHook(VDP_FUNC_ID_PREEMPTION_CALLBACK_REGISTER, 1, (void*)ret);
return ret;
}
VdpStatus
traceVdpPresentationQueueTargetCreateX11(VdpDevice device, Drawable drawable,
VdpPresentationQueueTarget *target)
{
const char *impl_state = "{full}";
if (!trace_enabled)
goto skip;
traceCallHook(VDP_FUNC_ID_PRESENTATION_QUEUE_TARGET_CREATE_X11, 0, NULL);
fprintf(tlog, "%s%s VdpPresentationQueueTargetCreateX11, device=%d, drawable=%u\n",
trace_header, impl_state, device, ((unsigned int)drawable));
skip:;
VdpStatus ret = vdpPresentationQueueTargetCreateX11(device, drawable, target);
traceCallHook(VDP_FUNC_ID_PRESENTATION_QUEUE_TARGET_CREATE_X11, 1, (void*)ret);
return ret;
}
VdpStatus
traceVdpGetProcAddress(VdpDevice device, VdpFuncId function_id,
void **function_pointer)
{
const char *impl_state = "{full}";
if (!trace_enabled)
goto skip;
traceCallHook(VDP_FUNC_ID_GET_PROC_ADDRESS, 0, NULL);
fprintf(tlog, "%s%s VdpGetProcAddress, device=%d, function_id=%s\n",
trace_header, impl_state, device, reverse_func_id(function_id));
skip:;
VdpStatus ret = vdpGetProcAddress(device, function_id, function_pointer);
traceCallHook(VDP_FUNC_ID_GET_PROC_ADDRESS, 1, (void*)ret);
return ret;
}
VdpStatus
traceVdpDeviceCreateX11(Display *display, int screen, VdpDevice *device,
VdpGetProcAddress **get_proc_address)
{
const char *impl_state = "{full}";
if (!trace_enabled)
goto skip;
traceCallHook(-1, 0, NULL);
fprintf(tlog, "%s%s vdp_imp_device_create_x11 display=%p, screen=%d\n", trace_header,
impl_state, display, screen);
skip:;
VdpStatus ret = vdpDeviceCreateX11(display, screen, device, get_proc_address);
traceCallHook(-1, 1, (void*)ret);
return ret;
}
libvdpau-va-gl-0.3.6/src/trace.h 0000664 0000000 0000000 00000011433 12665616006 0016402 0 ustar 00root root 0000000 0000000 /*
* Copyright 2013-2014 Rinat Ibragimov
*
* This file is part of libvdpau-va-gl
*
* libvdpau-va-gl is distributed under the terms of the LGPLv3. See COPYING for details.
*/
#ifndef VA_GL_SRC_TRACE_H
#define VA_GL_SRC_TRACE_H
#include
#include
#include
#include "reverse-constant.h"
void
traceEnableTracing(int flag);
void
traceSetTarget(FILE *target);
void
traceResetTarget(void);
void
traceSetHeader(const char *header, const char *header_blank);
void
traceInfo(const char *buf, ...);
void
traceError(const char *buf, ...);
void
traceSetHook(void (*hook)(void *param1, void *param2, int origin, int after), void *param);
void
traceCallHook(int origin, int after, void *shortterm_param);
VdpStatus
traceVdpDeviceCreateX11(Display *display, int screen, VdpDevice *device,
VdpGetProcAddress **get_proc_address);
VdpGetApiVersion traceVdpGetApiVersion;
VdpDecoderQueryCapabilities traceVdpDecoderQueryCapabilities;
VdpDecoderCreate traceVdpDecoderCreate;
VdpDecoderDestroy traceVdpDecoderDestroy;
VdpDecoderGetParameters traceVdpDecoderGetParameters;
VdpDecoderRender traceVdpDecoderRender;
VdpOutputSurfaceQueryCapabilities traceVdpOutputSurfaceQueryCapabilities;
VdpOutputSurfaceQueryGetPutBitsNativeCapabilities traceVdpOutputSurfaceQueryGetPutBitsNativeCapabilities;
VdpOutputSurfaceQueryPutBitsIndexedCapabilities traceVdpOutputSurfaceQueryPutBitsIndexedCapabilities;
VdpOutputSurfaceQueryPutBitsYCbCrCapabilities traceVdpOutputSurfaceQueryPutBitsYCbCrCapabilities;
VdpOutputSurfaceCreate traceVdpOutputSurfaceCreate;
VdpOutputSurfaceDestroy traceVdpOutputSurfaceDestroy;
VdpOutputSurfaceGetParameters traceVdpOutputSurfaceGetParameters;
VdpOutputSurfaceGetBitsNative traceVdpOutputSurfaceGetBitsNative;
VdpOutputSurfacePutBitsNative traceVdpOutputSurfacePutBitsNative;
VdpOutputSurfacePutBitsIndexed traceVdpOutputSurfacePutBitsIndexed;
VdpOutputSurfacePutBitsYCbCr traceVdpOutputSurfacePutBitsYCbCr;
VdpVideoMixerQueryFeatureSupport traceVdpVideoMixerQueryFeatureSupport;
VdpVideoMixerQueryParameterSupport traceVdpVideoMixerQueryParameterSupport;
VdpVideoMixerQueryAttributeSupport traceVdpVideoMixerQueryAttributeSupport;
VdpVideoMixerQueryParameterValueRange traceVdpVideoMixerQueryParameterValueRange;
VdpVideoMixerQueryAttributeValueRange traceVdpVideoMixerQueryAttributeValueRange;
VdpVideoMixerCreate traceVdpVideoMixerCreate;
VdpVideoMixerSetFeatureEnables traceVdpVideoMixerSetFeatureEnables;
VdpVideoMixerSetAttributeValues traceVdpVideoMixerSetAttributeValues;
VdpVideoMixerGetFeatureSupport traceVdpVideoMixerGetFeatureSupport;
VdpVideoMixerGetFeatureEnables traceVdpVideoMixerGetFeatureEnables;
VdpVideoMixerGetParameterValues traceVdpVideoMixerGetParameterValues;
VdpVideoMixerGetAttributeValues traceVdpVideoMixerGetAttributeValues;
VdpVideoMixerDestroy traceVdpVideoMixerDestroy;
VdpVideoMixerRender traceVdpVideoMixerRender;
VdpPresentationQueueTargetDestroy traceVdpPresentationQueueTargetDestroy;
VdpPresentationQueueCreate traceVdpPresentationQueueCreate;
VdpPresentationQueueDestroy traceVdpPresentationQueueDestroy;
VdpPresentationQueueSetBackgroundColor traceVdpPresentationQueueSetBackgroundColor;
VdpPresentationQueueGetBackgroundColor traceVdpPresentationQueueGetBackgroundColor;
VdpPresentationQueueGetTime traceVdpPresentationQueueGetTime;
VdpPresentationQueueDisplay traceVdpPresentationQueueDisplay;
VdpPresentationQueueBlockUntilSurfaceIdle traceVdpPresentationQueueBlockUntilSurfaceIdle;
VdpPresentationQueueQuerySurfaceStatus traceVdpPresentationQueueQuerySurfaceStatus;
VdpVideoSurfaceQueryCapabilities traceVdpVideoSurfaceQueryCapabilities;
VdpVideoSurfaceQueryGetPutBitsYCbCrCapabilities traceVdpVideoSurfaceQueryGetPutBitsYCbCrCapabilities;
VdpVideoSurfaceCreate traceVdpVideoSurfaceCreate;
VdpVideoSurfaceDestroy traceVdpVideoSurfaceDestroy;
VdpVideoSurfaceGetParameters traceVdpVideoSurfaceGetParameters;
VdpVideoSurfaceGetBitsYCbCr traceVdpVideoSurfaceGetBitsYCbCr;
VdpVideoSurfacePutBitsYCbCr traceVdpVideoSurfacePutBitsYCbCr;
VdpBitmapSurfaceQueryCapabilities traceVdpBitmapSurfaceQueryCapabilities;
VdpBitmapSurfaceCreate traceVdpBitmapSurfaceCreate;
VdpBitmapSurfaceDestroy traceVdpBitmapSurfaceDestroy;
VdpBitmapSurfaceGetParameters traceVdpBitmapSurfaceGetParameters;
VdpBitmapSurfacePutBitsNative traceVdpBitmapSurfacePutBitsNative;
VdpDeviceDestroy traceVdpDeviceDestroy;
VdpGetInformationString traceVdpGetInformationString;
VdpGenerateCSCMatrix traceVdpGenerateCSCMatrix;
VdpOutputSurfaceRenderOutputSurface traceVdpOutputSurfaceRenderOutputSurface;
VdpOutputSurfaceRenderBitmapSurface traceVdpOutputSurfaceRenderBitmapSurface;
VdpPreemptionCallbackRegister traceVdpPreemptionCallbackRegister;
VdpPresentationQueueTargetCreateX11 traceVdpPresentationQueueTargetCreateX11;
VdpGetProcAddress traceVdpGetProcAddress;
#endif /* VA_GL_SRC_TRACE_H */
libvdpau-va-gl-0.3.6/src/watermark.c 0000664 0000000 0000000 00000047501 12665616006 0017301 0 ustar 00root root 0000000 0000000 /*
* Copyright 2013-2014 Rinat Ibragimov
*
* This file is part of libvdpau-va-gl
*
* libvdpau-va-gl is distributed under the terms of the LGPLv3. See COPYING for details.
*/
#include "watermark.h"
// grayscale (yet RGBA) image with text "vagl" on it
const int watermark_width = 50;
const int watermark_height = 27;
const char *watermark_data =
"\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\20\377"
"\377\377r\377\377\377\237\377\377\377\313\377\377\377\356\377\377\377\362"
"\377\377\377\362\377\377\377\362\377\377\377\362\377\377\377\362\377\377"
"\377\362\377\377\377\362\377\377\377\362\377\377\377\362\377\377\377\362"
"\377\377\377\362\377\377\377\362\377\377\377\362\377\377\377\362\377\377"
"\377\362\377\377\377\362\377\377\377\362\377\377\377\362\377\377\377\362"
"\377\377\377\362\377\377\377\362\377\377\377\362\377\377\377\362\377\377"
"\377\362\377\377\377\362\377\377\377\362\377\377\377\362\377\377\377\362"
"\377\377\377\362\377\377\377\362\377\377\377\362\377\377\377\362\377\377"
"\377\356\377\377\377\313\377\377\377\237\377\377\377s\377\377\377\20\377"
"\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377"
"\377\0\377\377\377\0\377\377\377H\377\377\377\325\377\377\377\377\377\377"
"\377\377\372\372\372\377\377\377\377\377\377\377\377\377\377\377\377\377"
"\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377"
"\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377"
"\377\377\377\377\327\327\327\377\312\312\312\377\302\302\302\377\302\302"
"\302\377\302\302\302\377\302\302\302\377\302\302\302\377\302\302\302\377"
"\302\302\302\377\302\302\302\377\302\302\302\377\302\302\302\377\302\302"
"\302\377\302\302\302\377\302\302\302\377\302\302\302\377\302\302\302\377"
"\302\302\302\377\302\302\302\377\302\302\302\377\317\317\317\377\367\367"
"\367\377\377\377\377\377\377\377\377\377\377\377\377\325\377\377\377G\377"
"\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377"
"\377\177\377\377\377\371\376\376\376\377\377\377\377\377\377\377\377\377"
"\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377"
"\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377"
"\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377"
"\377\377\377\377\377\377\375\375\375\377\236\236\236\377\23\23\23\377\0\0"
"\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0"
"\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377"
"\0\0\0\377\0\0\0\377\23\23\23\377LLL\377\242\242\242\377\376\376\376\377"
"\377\377\377\371\377\377\377\177\377\377\377\0\377\377\377\0\377\377\377"
"\0\377\377\377F\377\377\377\371\377\377\377\377\377\377\377\377\377\377\377"
"\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377"
"\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377"
"\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377"
"\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377"
"\377\332\332\332\377;;;\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0"
"\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377"
"\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377VV"
"V\377\342\342\342\377\377\377\377\371\377\377\377F\377\377\377\0\377\377"
"\377\15\377\377\377\324\374\374\374\377\377\377\377\377\377\377\377\377\377"
"\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377"
"\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377"
"\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377"
"\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377"
"\377\377\377\377\377\377\377\327\327\327\377\20\20\20\377\0\0\0\377\0\0\0"
"\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377"
"\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\13"
"\13\13\377\20\20\20\377\17\17\17\377BBB\377\371\371\371\377\377\377\377\324"
"\377\377\377\15\377\377\377m\377\377\377\377\375\375\375\377\377\377\377"
"\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377"
"\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377"
"\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377"
"\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377"
"\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\230"
"\230\230\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377"
"\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0"
"\0\0\377\0\0\0\377\0\0\0\377\267\267\267\377\377\377\377\377\370\370\370"
"\377\0\0\0\377\216\216\216\377\377\377\377\377\377\377\377m\377\377\377\233"
"\373\373\373\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377"
"\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377"
"\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377"
"\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377"
"\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377"
"\377\377\377\377\377\377\377\377\377\377\372\372\372\377\31\31\31\377\0\0"
"\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0"
"\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377"
"\267\267\267\377\377\377\377\377\370\370\370\377\0\0\0\377\15\15\15\377\361"
"\361\361\377\377\377\377\233\377\377\377\310\370\370\370\377\377\377\377"
"\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377"
"\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377"
"\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377"
"\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377"
"\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377"
"\377\377\377\377\377\377\377QQQ\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377"
"\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0"
"\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\267\267\267\377\377\377\377\377\370"
"\370\370\377\0\0\0\377\0\0\0\377\307\307\307\377\377\377\377\310\377\377"
"\377\360\377\377\377\377\361\361\361\377\317\317\317\377\317\317\317\377"
"\344\344\344\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377"
"\377\377\353\353\353\377\317\317\317\377\317\317\317\377\354\354\354\377"
"\377\377\377\377\377\377\377\377\376\376\376\377\320\320\320\377\260\260"
"\260\377\257\257\257\377\323\323\323\377\377\377\377\377\377\377\377\377"
"\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\177\177"
"\177\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\40\40\40\377V"
"VV\377555\377\3\3\3\377\0\0\0\377///\377000\377\34\34\34\377\0\0\0\377\0"
"\0\0\377\267\267\267\377\377\377\377\377\370\370\370\377\0\0\0\377\0\0\0"
"\377\234\234\234\377\377\377\377\355\377\377\377\365\377\377\377\377\345"
"\345\345\377\2\2\2\377\0\0\0\377AAA\377\377\377\377\377\377\377\377\377\377"
"\377\377\377\377\377\377\377bbb\377\0\0\0\377\0\0\0\377\316\316\316\377\377"
"\377\377\377\312\312\312\377+++\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377"
"HHH\377\355\355\355\377\377\377\377\377\377\377\377\377\377\377\377\377\377"
"\377\377\377\235\235\235\377\0\0\0\377\0\0\0\377\0\0\0\377\6\6\6\377\240"
"\240\240\377\377\377\377\377\377\377\377\377\377\377\377\377\333\333\333"
"\377$$$\377\377\377\377\377\377\377\377\377\221\221\221\377\0\0\0\377\0\0"
"\0\377\267\267\267\377\377\377\377\377\370\370\370\377\0\0\0\377\0\0\0\377"
"\202\202\202\377\377\377\377\362\377\377\377\365\377\377\377\377\377\377"
"\377\377CCC\377\0\0\0\377\4\4\4\377\357\357\357\377\377\377\377\377\377\377"
"\377\377\374\374\374\377\25\25\25\377\0\0\0\377---\377\377\377\377\377\363"
"\363\363\377\27\27\27\377\0\0\0\377\24\24\24\377\216\216\216\377{{{\377\3"
"\3\3\377\0\0\0\377YYY\377\377\377\377\377\377\377\377\377\377\377\377\377"
"\377\377\377\377\236\236\236\377\0\0\0\377\0\0\0\377\0\0\0\377\201\201\201"
"\377\377\377\377\377\377\377\377\377\331\331\331\377\206\206\206\377\266"
"\266\266\377\333\333\333\377\377\377\377\377\377\377\377\377\213\213\213"
"\377\0\0\0\377\0\0\0\377\267\267\267\377\377\377\377\377\370\370\370\377"
"\0\0\0\377\0\0\0\377\201\201\201\377\377\377\377\362\377\377\377\365\377"
"\377\377\377\377\377\377\377\237\237\237\377\0\0\0\377\0\0\0\377\250\250"
"\250\377\377\377\377\377\377\377\377\377\301\301\301\377\0\0\0\377\0\0\0"
"\377\213\213\213\377\377\377\377\377\305\305\305\377===\377111\377\227\227"
"\227\377\377\377\377\377\377\377\377\377OOO\377\0\0\0\377\11\11\11\377\375"
"\375\375\377\377\377\377\377\377\377\377\377\377\377\377\377\236\236\236"
"\377\0\0\0\377\0\0\0\377\6\6\6\377\353\353\353\377\377\377\377\377\364\364"
"\364\377\26\26\26\377\0\0\0\377\0\0\0\377\256\256\256\377\377\377\377\377"
"\377\377\377\377\207\207\207\377\0\0\0\377\0\0\0\377\267\267\267\377\377"
"\377\377\377\370\370\370\377\0\0\0\377\0\0\0\377\201\201\201\377\377\377"
"\377\362\377\377\377\365\377\377\377\377\377\377\377\377\361\361\361\377"
"\10\10\10\377\0\0\0\377[[[\377\377\377\377\377\377\377\377\377qqq\377\0\0"
"\0\377\3\3\3\377\346\346\346\377\377\377\377\377\377\377\377\377\377\377"
"\377\377\377\377\377\377\377\377\377\377\366\366\366\377\356\356\356\377"
"ddd\377\0\0\0\377\0\0\0\377\343\343\343\377\377\377\377\377\377\377\377\377"
"\377\377\377\377\236\236\236\377\0\0\0\377\0\0\0\377###\377\377\377\377\377"
"\377\377\377\377\252\252\252\377\0\0\0\377\0\0\0\377\0\0\0\377KKK\377\377"
"\377\377\377\377\377\377\377\207\207\207\377\0\0\0\377\0\0\0\377\267\267"
"\267\377\377\377\377\377\370\370\370\377\0\0\0\377\0\0\0\377\201\201\201"
"\377\377\377\377\362\377\377\377\365\377\377\377\377\377\377\377\377\377"
"\377\377\377UUU\377\0\0\0\377\22\22\22\377\374\374\374\377\377\377\377\377"
"!!!\377\0\0\0\377GGG\377\377\377\377\377\377\377\377\377\377\377\377\377"
"\253\253\253\377999\377\14\14\14\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0"
"\377\0\0\0\377\340\340\340\377\377\377\377\377\377\377\377\377\377\377\377"
"\377\236\236\236\377\0\0\0\377\0\0\0\377===\377\377\377\377\377\377\377\377"
"\377\220\220\220\377\0\0\0\377\0\0\0\377\0\0\0\377+++\377\377\377\377\377"
"\377\377\377\377\207\207\207\377\0\0\0\377\0\0\0\377\267\267\267\377\377"
"\377\377\377\370\370\370\377\0\0\0\377\0\0\0\377\201\201\201\377\377\377"
"\377\362\377\377\377\365\377\377\377\377\377\377\377\377\377\377\377\377"
"\260\260\260\377\0\0\0\377\0\0\0\377\302\302\302\377\320\320\320\377\0\0"
"\0\377\0\0\0\377\246\246\246\377\377\377\377\377\377\377\377\377\252\252"
"\252\377\0\0\0\377\0\0\0\377111\377\203\203\203\377\222\222\222\377AAA\377"
"\0\0\0\377\0\0\0\377\340\340\340\377\377\377\377\377\377\377\377\377\377"
"\377\377\377\236\236\236\377\0\0\0\377\0\0\0\377999\377\377\377\377\377\377"
"\377\377\377\245\245\245\377\0\0\0\377\0\0\0\377\0\0\0\377222\377\377\377"
"\377\377\377\377\377\377\207\207\207\377\0\0\0\377\0\0\0\377\267\267\267"
"\377\377\377\377\377\370\370\370\377\0\0\0\377\0\0\0\377\201\201\201\377"
"\377\377\377\362\377\377\377\365\377\377\377\377\377\377\377\377\377\377"
"\377\377\371\371\371\377\22\22\22\377\0\0\0\377uuu\377\177\177\177\377\0"
"\0\0\377\16\16\16\377\366\366\366\377\377\377\377\377\377\377\377\377EEE"
"\377\0\0\0\377\3\3\3\377\357\357\357\377\377\377\377\377\377\377\377\377"
"XXX\377\0\0\0\377\0\0\0\377\340\340\340\377\377\377\377\377\377\377\377\377"
"\377\377\377\377\236\236\236\377\0\0\0\377\0\0\0\377\37\37\37\377\377\377"
"\377\377\377\377\377\377\332\332\332\377\0\0\0\377\0\0\0\377\0\0\0\377WW"
"W\377\377\377\377\377\377\377\377\377\207\207\207\377\0\0\0\377\0\0\0\377"
"\267\267\267\377\377\377\377\377\370\370\370\377\0\0\0\377\0\0\0\377\201"
"\201\201\377\377\377\377\362\377\377\377\365\377\377\377\377\377\377\377"
"\377\377\377\377\377\377\377\377\377ggg\377\0\0\0\377)))\377///\377\0\0\0"
"\377bbb\377\377\377\377\377\377\377\377\377\377\377\377\377$$$\377\0\0\0"
"\377\5\5\5\377\361\361\361\377\377\377\377\377\342\342\342\377\14\14\14\377"
"\0\0\0\377\0\0\0\377\332\332\332\377\377\377\377\377\377\377\377\377\377"
"\377\377\377\236\236\236\377\0\0\0\377\0\0\0\377\2\2\2\377\342\342\342\377"
"\377\377\377\377\376\376\376\377FFF\377\0\0\0\377\10\10\10\377\307\307\307"
"\377\377\377\377\377\377\377\377\377\207\207\207\377\0\0\0\377\0\0\0\377"
"\267\267\267\377\377\377\377\377\370\370\370\377\0\0\0\377\0\0\0\377\201"
"\201\201\377\377\377\377\362\377\377\377\365\377\377\377\377\377\377\377"
"\377\377\377\377\377\377\377\377\377\302\302\302\377\0\0\0\377\0\0\0\377"
"\0\0\0\377\0\0\0\377\301\301\301\377\377\377\377\377\377\377\377\377\377"
"\377\377\377SSS\377\0\0\0\377\0\0\0\377CCC\377yyy\377\32\32\32\377III\377"
"\0\0\0\377\0\0\0\377rrr\377\322\322\322\377\377\377\377\377\377\377\377\377"
"\236\236\236\377\0\0\0\377\0\0\0\377\0\0\0\377uuu\377\377\377\377\377\377"
"\377\377\377\377\377\377\377\307\307\307\377\336\336\336\377\304\304\304"
"\377\377\377\377\377\377\377\377\377\207\207\207\377\0\0\0\377\0\0\0\377"
"\267\267\267\377\377\377\377\377\370\370\370\377\0\0\0\377\0\0\0\377\202"
"\202\202\377\377\377\377\362\377\377\377\362\377\377\377\377\377\377\377"
"\377\377\377\377\377\377\377\377\377\376\376\376\377\36\36\36\377\0\0\0\377"
"\0\0\0\377\40\40\40\377\376\376\376\377\377\377\377\377\377\377\377\377\377"
"\377\377\377\331\331\331\377\31\31\31\377\0\0\0\377\0\0\0\377\0\0\0\377Q"
"QQ\377\374\374\374\377999\377\0\0\0\377\0\0\0\377ppp\377\377\377\377\377"
"\377\377\377\377\226\226\226\377\0\0\0\377\0\0\0\377\0\0\0\377\1\1\1\377"
"\213\213\213\377\370\370\370\377\377\377\377\377\371\371\371\377\260\260"
"\260\377///\377\377\377\377\377\377\377\377\377\207\207\207\377\0\0\0\377"
"\0\0\0\377\267\267\267\377\377\377\377\377\370\370\370\377\0\0\0\377\0\0"
"\0\377\234\234\234\377\377\377\377\355\377\377\377\314\375\375\375\377\377"
"\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\325\325\325"
"\377\277\277\277\377\277\277\277\377\326\326\326\377\377\377\377\377\377"
"\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\361\361\361"
"\377\254\254\254\377\241\241\241\377\337\337\337\377\377\377\377\377\377"
"\377\377\377\374\374\374\377\276\276\276\377\254\254\254\377\344\344\344"
"\377\377\377\377\377\377\377\377\377lll\377\0\0\0\377\0\0\0\377\0\0\0\377"
"\0\0\0\377\0\0\0\377\6\6\6\377)))\377\7\7\7\377\0\0\0\377:::\377\377\377"
"\377\377\377\377\377\377nnn\377\0\0\0\377\0\0\0\377---\377@@@\377>>>\377"
"\0\0\0\377\0\0\0\377\307\307\307\377\377\377\377\310\377\377\377\233\374"
"\374\374\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377"
"\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377"
"\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377"
"\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377"
"\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377"
"\377\377\377\377\377\377\377\377\377\377\377\377\377>>>\377\0\0\0\377\0\0"
"\0\377\0\0\0\377TTT\377\210\210\210\377\240\240\240\377\16\16\16\377\0\0"
"\0\377\0\0\0\377xxx\377\377\377\377\377\377\377\377\377AAA\377\0\0\0\377"
"\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\15\15\15\377\361\361\361"
"\377\377\377\377\233\377\377\377m\377\377\377\377\377\377\377\377\377\377"
"\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377"
"\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377"
"\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377"
"\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377"
"\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377"
"\335\335\335\377\5\5\5\377\0\0\0\377\0\0\0\377\0\0\0\377|||\377\377\377\377"
"\377\377\377\377\377\304\304\304\377kkk\377\203\203\203\377\366\366\366\377"
"\377\377\377\377\307\307\307\377\2\2\2\377\0\0\0\377\0\0\0\377\0\0\0\377"
"\0\0\0\377\0\0\0\377\0\0\0\377\216\216\216\377\377\377\377\377\377\377\377"
"m\377\377\377\15\377\377\377\327\376\376\376\377\377\377\377\377\377\377"
"\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377"
"\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377"
"\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377"
"\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377"
"\377\377\377\377\377\377\377\377\377\377\376\376\376\377UUU\377\0\0\0\377"
"\0\0\0\377\0\0\0\377\0\0\0\377\3\3\3\377\232\232\232\377\373\373\373\377"
"\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\267\267"
"\267\377\40\40\40\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0"
"\0\0\377BBB\377\371\371\371\377\377\377\377\324\377\377\377\15\377\377\377"
"\0\377\377\377F\377\377\377\373\377\377\377\377\377\377\377\377\377\377\377"
"\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377"
"\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377"
"\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377"
"\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377"
"\377\377\377\377\377\242\242\242\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0"
"\377\0\0\0\377\0\0\0\377\0\0\0\377\31\31\31\377MMM\377ggg\377QQQ\377)))\377"
"\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377YY"
"Y\377\343\343\343\377\377\377\377\371\377\377\377F\377\377\377\0\377\377"
"\377\0\377\377\377\0\377\377\377\177\377\377\377\373\377\377\377\377\377"
"\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377"
"\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377"
"\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377"
"\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\370"
"\370\370\377\213\213\213\377\11\11\11\377\0\0\0\377\0\0\0\377\0\0\0\377\0"
"\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0"
"\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\0\0\0\377\20\20\20\377KKK"
"\377\243\243\243\377\376\376\376\377\377\377\377\371\377\377\377\177\377"
"\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377"
"\377H\377\377\377\330\377\377\377\377\377\377\377\377\377\377\377\377\377"
"\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377"
"\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377"
"\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\366\366\366"
"\377\354\354\354\377\322\322\322\377\302\302\302\377\302\302\302\377\302"
"\302\302\377\302\302\302\377\302\302\302\377\302\302\302\377\302\302\302"
"\377\302\302\302\377\302\302\302\377\302\302\302\377\302\302\302\377\302"
"\302\302\377\302\302\302\377\302\302\302\377\302\302\302\377\302\302\302"
"\377\302\302\302\377\316\316\316\377\367\367\367\377\377\377\377\377\377"
"\377\377\377\377\377\377\325\377\377\377G\377\377\377\0\377\377\377\0\377"
"\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377"
"\377\20\377\377\377r\377\377\377\237\377\377\377\315\377\377\377\362\377"
"\377\377\365\377\377\377\365\377\377\377\365\377\377\377\365\377\377\377"
"\365\377\377\377\365\377\377\377\365\377\377\377\365\377\377\377\365\377"
"\377\377\365\377\377\377\365\377\377\377\364\377\377\377\362\377\377\377"
"\362\377\377\377\362\377\377\377\362\377\377\377\362\377\377\377\362\377"
"\377\377\362\377\377\377\362\377\377\377\362\377\377\377\362\377\377\377"
"\362\377\377\377\362\377\377\377\362\377\377\377\362\377\377\377\362\377"
"\377\377\362\377\377\377\362\377\377\377\362\377\377\377\362\377\377\377"
"\362\377\377\377\356\377\377\377\313\377\377\377\237\377\377\377s\377\377"
"\377\20\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0";
libvdpau-va-gl-0.3.6/src/watermark.h 0000664 0000000 0000000 00000000577 12665616006 0017310 0 ustar 00root root 0000000 0000000 /*
* Copyright 2013-2014 Rinat Ibragimov
*
* This file is part of libvdpau-va-gl
*
* libvdpau-va-gl is distributed under the terms of the LGPLv3. See COPYING for details.
*/
#ifndef VA_GL_SRC_WATERMARK_H
#define VA_GL_SRC_WATERMARK_H
extern const char *watermark_data;
extern const int watermark_width;
extern const int watermark_height;
#endif /* VA_GL_SRC_WATERMARK_H */
libvdpau-va-gl-0.3.6/tests/ 0000775 0000000 0000000 00000000000 12665616006 0015504 5 ustar 00root root 0000000 0000000 libvdpau-va-gl-0.3.6/tests/CMakeLists.txt 0000664 0000000 0000000 00000002037 12665616006 0020246 0 ustar 00root root 0000000 0000000 cmake_minimum_required(VERSION 2.8)
project(tests-for-libvdpau-va-gl)
include_directories(..)
include_directories(${GENERATED_INCLUDE_DIRS})
find_package(X11 REQUIRED)
pkg_check_modules(VDPAU vdpau REQUIRED)
link_libraries(${X11_LIBRARIES} ${VDPAU_LIBRARIES} -lpthread)
link_directories(${X11_LIBRARY_DIRS} ${VDPAU_LIBRARY_DIRS})
list(APPEND _vdpau_tests
test-001 test-002 test-003 test-004 test-005 test-006
test-007 test-008 test-009 test-010)
list(APPEND _all_tests test-000 ${_vdpau_tests})
add_executable(test-000 EXCLUDE_FROM_ALL test-000.c ../src/bitstream.c)
foreach(_test ${_vdpau_tests})
add_executable(${_test} EXCLUDE_FROM_ALL "${_test}.c" tests-common.c)
target_link_libraries(${_test} ${DRIVER_NAME}_static)
endforeach(_test)
foreach(_test ${_all_tests})
add_test(${_test} ${CMAKE_CURRENT_BINARY_DIR}/${_test})
add_dependencies(build-tests ${_test})
endforeach(_test)
# tmp for testing
include_directories(${GLIB20_INCLUDE_DIRS})
add_executable(conv-speed conv-speed.c)
target_link_libraries(conv-speed ${DRIVER_NAME}_static)
libvdpau-va-gl-0.3.6/tests/conv-speed.c 0000664 0000000 0000000 00000005277 12665616006 0017726 0 ustar 00root root 0000000 0000000 #ifdef NDEBUG
#undef NDEBUG
#endif
#define CHECK(expr) if (VDP_STATUS_OK != (expr)) assert(0);
#include
#include "api.h"
#include
#include
#include
// force linking library constructor
void va_gl_library_constructor();
void *dummy_ptr = va_gl_library_constructor;
int
main(int argc, char *argv[])
{
const int width = 720;
const int height = 480;
VdpGetProcAddress *get_proc_address;
VdpDevice vdp_device;
VdpVideoSurface vdp_video_surface;
VdpVideoMixer vdp_video_mixer;
VdpOutputSurface vdp_output_surface;
Display *dpy;
dpy = XOpenDisplay(NULL);
assert(dpy);
CHECK(vdpDeviceCreateX11(dpy, 0, &vdp_device, &get_proc_address));
CHECK(vdpVideoSurfaceCreate(vdp_device, VDP_CHROMA_TYPE_420, width, height,
&vdp_video_surface));
CHECK(vdpOutputSurfaceCreate(vdp_device, VDP_RGBA_FORMAT_B8G8R8A8, width, height,
&vdp_output_surface));
CHECK(vdpVideoMixerCreate(vdp_device, 0, NULL, 0, NULL, NULL, &vdp_video_mixer));
char *y_plane = malloc(width * height);
char *u_plane = malloc((width/2) * (height/2));
char *v_plane = malloc((width/2) * (height/2));
const void *source_planes[4] = { y_plane, u_plane, v_plane, NULL };
uint32_t source_pitches[4] = { width, width/2, width/2, 0 };
assert(y_plane);
assert(u_plane);
assert(v_plane);
memset(y_plane, 128, width * height);
memset(u_plane, 200, (width/2) * (height/2));
memset(v_plane, 95, (width/2) * (height/2));
struct timespec t_start, t_end;
int rep_count = 3000;
if (argc >= 2)
rep_count = atoi(argv[1]);
clock_gettime(CLOCK_MONOTONIC, &t_start);
for (int k = 0; k < rep_count; k ++) {
CHECK(vdpVideoSurfacePutBitsYCbCr(vdp_video_surface, VDP_YCBCR_FORMAT_YV12,
source_planes, source_pitches));
CHECK(vdpVideoMixerRender(vdp_video_mixer, -1, NULL,
VDP_VIDEO_MIXER_PICTURE_STRUCTURE_FRAME,
0, NULL, vdp_video_surface, 0, NULL,
NULL, vdp_output_surface, NULL, NULL, 0, NULL));
}
clock_gettime(CLOCK_MONOTONIC, &t_end);
double duration = t_end.tv_sec - t_start.tv_sec + (t_end.tv_nsec - t_start.tv_nsec) / 1.0e9;
printf("%d repetitions in %f secs, %f per sec\n", rep_count, duration, rep_count / duration);
CHECK(vdpOutputSurfaceDestroy(vdp_output_surface));
CHECK(vdpVideoMixerDestroy(vdp_video_mixer));
CHECK(vdpVideoSurfaceDestroy(vdp_video_surface));
CHECK(vdpDeviceDestroy(vdp_device));
return 0;
}
libvdpau-va-gl-0.3.6/tests/test-000.c 0000664 0000000 0000000 00000004055 12665616006 0017130 0 ustar 00root root 0000000 0000000 #ifdef NDEBUG
#undef NDEBUG
#endif
#include "bitstream.h"
#include
#include
int main(void)
{
unsigned char buf[] = {0xa6, 0x42, 0x98, 0xe2, 0x3f};
rbsp_state_t st;
rbsp_attach_buffer(&st, buf, 5);
assert (0 == rbsp_get_uev(&st));
assert (1 == rbsp_get_uev(&st));
assert (2 == rbsp_get_uev(&st));
assert (3 == rbsp_get_uev(&st));
assert (4 == rbsp_get_uev(&st));
assert (5 == rbsp_get_uev(&st));
assert (6 == rbsp_get_uev(&st));
assert (7 == rbsp_get_uev(&st));
assert (0 == rbsp_get_uev(&st));
assert (0 == rbsp_get_uev(&st));
rbsp_attach_buffer(&st, buf, 5);
assert (1 == rbsp_get_u(&st, 1));
assert (2 == rbsp_get_u(&st, 3));
assert (3 == rbsp_get_u(&st, 3));
assert (4 == rbsp_get_u(&st, 5));
assert (5 == rbsp_get_u(&st, 5));
assert (6 == rbsp_get_u(&st, 5));
assert (7 == rbsp_get_u(&st, 5));
assert (8 == rbsp_get_u(&st, 7));
assert (1 == rbsp_get_u(&st, 1));
rbsp_attach_buffer(&st, buf, 5);
assert ( 0 == rbsp_get_sev(&st));
assert ( 1 == rbsp_get_sev(&st));
assert (-1 == rbsp_get_sev(&st));
assert ( 2 == rbsp_get_sev(&st));
assert (-2 == rbsp_get_sev(&st));
assert ( 3 == rbsp_get_sev(&st));
assert (-3 == rbsp_get_sev(&st));
assert ( 4 == rbsp_get_sev(&st));
assert ( 0 == rbsp_get_sev(&st));
assert ( 0 == rbsp_get_sev(&st));
unsigned char buf2[] = {0x00, 0x00, 0x03, 0x00, 0x00, 0x03, 0x00, 0x00};
rbsp_attach_buffer(&st, buf2, 8);
for (int k = 0; k < 6 * 8; k ++) {
assert (0 == rbsp_get_u(&st, 1));
}
unsigned char buf3[] = {0x00, 0x00, 0x03, 0xff, 0xff};
rbsp_attach_buffer(&st, buf3, 5);
for (int k = 0; k < 16; k ++) assert (0 == rbsp_get_u(&st, 1));
for (int k = 0; k < 16; k ++) assert (1 == rbsp_get_u(&st, 1));
unsigned char buf4[] = {0x00, 0x00, 0x00, 0x03, 0xff};
rbsp_attach_buffer(&st, buf4, 5);
for (int k = 0; k < 24; k ++) assert (0 == rbsp_get_u(&st, 1));
for (int k = 0; k < 8; k ++) assert (1 == rbsp_get_u(&st, 1));
printf ("pass\n");
}
libvdpau-va-gl-0.3.6/tests/test-001.c 0000664 0000000 0000000 00000011554 12665616006 0017133 0 ustar 00root root 0000000 0000000 // Create two output surfaces (B8G8R8A8) of 4x4, fill first with opaque black
// and second with black and two red dots (opaque too).
// Render second into first. Check that red dots do not get smoothed.
// The dot at (1, 1) checks for smoothing, one at (3,3) checks for edge condition.
#include "tests-common.h"
#include
#include
#include
int main(void)
{
VdpDevice device;
Display *dpy = get_dpy();
ASSERT_OK(vdpDeviceCreateX11(dpy, 0, &device, NULL));
VdpOutputSurface out_surface_1;
VdpOutputSurface out_surface_2;
ASSERT_OK(vdpOutputSurfaceCreate(device, VDP_RGBA_FORMAT_B8G8R8A8, 4, 4, &out_surface_1));
ASSERT_OK(vdpOutputSurfaceCreate(device, VDP_RGBA_FORMAT_B8G8R8A8, 4, 4, &out_surface_2));
uint32_t black_box[] = {
0xff000000, 0xff000000, 0xff000000, 0xff000000,
0xff000000, 0xff000000, 0xff000000, 0xff000000,
0xff000000, 0xff000000, 0xff000000, 0xff000000,
0xff000000, 0xff000000, 0xff000000, 0xff000000
};
uint32_t two_red_dots[] = {
0xff000000, 0xff000000, 0xff000000, 0xff000000,
0xff000000, 0xffff0000, 0xff000000, 0xff000000,
0xff000000, 0xff000000, 0xff000000, 0xff000000,
0xff000000, 0xff000000, 0xff000000, 0xffff0000
};
const void * const source_data_1[] = {black_box};
const void * const source_data_2[] = {two_red_dots};
uint32_t source_pitches[] = { 4 * 4 };
// upload data
ASSERT_OK(vdpOutputSurfacePutBitsNative(out_surface_1, source_data_1, source_pitches, NULL));
ASSERT_OK(vdpOutputSurfacePutBitsNative(out_surface_2, source_data_2, source_pitches, NULL));
// render
VdpOutputSurfaceRenderBlendState blend_state = {
.struct_version = VDP_OUTPUT_SURFACE_RENDER_BLEND_STATE_VERSION,
.blend_factor_source_color = VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ONE,
.blend_factor_source_alpha = VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ONE,
.blend_factor_destination_color = VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ZERO,
.blend_factor_destination_alpha = VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ZERO,
.blend_equation_color = VDP_OUTPUT_SURFACE_RENDER_BLEND_EQUATION_ADD,
.blend_equation_alpha = VDP_OUTPUT_SURFACE_RENDER_BLEND_EQUATION_ADD,
.blend_constant = {0, 0, 0, 0}
};
ASSERT_OK(vdpOutputSurfaceRenderOutputSurface(out_surface_1, NULL, out_surface_2, NULL,
NULL, &blend_state, VDP_OUTPUT_SURFACE_RENDER_ROTATE_0));
// get data back
uint32_t receive_buf[16];
void * const dest_data[] = {receive_buf};
ASSERT_OK(vdpOutputSurfaceGetBitsNative(out_surface_1, NULL, dest_data, source_pitches));
printf("output surface\n");
for (int k = 0; k < 16; k ++) {
printf("%x ", receive_buf[k]);
if (3 == k % 4) printf("\n");
}
printf("----------\n");
for (int k = 0; k < 16; k ++) {
printf("%x ", two_red_dots[k]);
if (3 == k % 4) printf("\n");
}
// compare recieve_buf with two_red_dots
if (memcmp(receive_buf, two_red_dots, 4*4*4)) {
printf("fail\n");
return 1;
}
// Check bitmap surface rendering smoothing issue
VdpBitmapSurface bmp_surface;
ASSERT_OK(vdpBitmapSurfaceCreate(device, VDP_RGBA_FORMAT_B8G8R8A8, 4, 4, 1, &bmp_surface));
ASSERT_OK(vdpBitmapSurfacePutBitsNative(bmp_surface, source_data_2, source_pitches, NULL));
VdpOutputSurfaceRenderBlendState blend_state_opaque_copy = {
.struct_version = VDP_OUTPUT_SURFACE_RENDER_BLEND_STATE_VERSION,
.blend_factor_source_color = VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ONE,
.blend_factor_source_alpha = VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ONE,
.blend_factor_destination_color = VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ZERO,
.blend_factor_destination_alpha = VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ZERO,
.blend_equation_color = VDP_OUTPUT_SURFACE_RENDER_BLEND_EQUATION_ADD,
.blend_equation_alpha = VDP_OUTPUT_SURFACE_RENDER_BLEND_EQUATION_ADD,
.blend_constant = {0, 0, 0, 0}
};
ASSERT_OK(vdpOutputSurfaceRenderBitmapSurface(out_surface_1, NULL, bmp_surface, NULL,
NULL, &blend_state_opaque_copy, VDP_OUTPUT_SURFACE_RENDER_ROTATE_0));
ASSERT_OK(vdpOutputSurfaceGetBitsNative(out_surface_1, NULL, dest_data, source_pitches));
printf("bitmap surface\n");
for (int k = 0; k < 16; k ++) {
printf("%x ", receive_buf[k]);
if (3 == k % 4) printf("\n");
}
printf("----------\n");
for (int k = 0; k < 16; k ++) {
printf("%x ", two_red_dots[k]);
if (3 == k % 4) printf("\n");
}
if (memcmp(receive_buf, two_red_dots, 4*4*4)) {
printf("fail\n");
return 2;
}
printf("pass\n");
return 0;
}
libvdpau-va-gl-0.3.6/tests/test-002.c 0000664 0000000 0000000 00000007025 12665616006 0017132 0 ustar 00root root 0000000 0000000 // test-002
// Test alignment issues of output surface {get,put}bits.
// Uploads 5x5 square of A8 samples, thus breaking 4-byte alignment. Then downloads and
// compares. Buffers should contain identical data.
//
// Bitmap surfaces checked too. But since there is no way to download data directly from
// bitmap surface, we doing this via rendering to output surface.
#include "tests-common.h"
#include
#include
int main(void)
{
Display *dpy = get_dpy();
VdpDevice device;
VdpOutputSurface out_surface;
VdpBitmapSurface bmp_surface;
uint8_t twenty_five[] = {
0x01, 0x02, 0x03, 0x04, 0x05,
0x06, 0x07, 0x08, 0x09, 0x0a,
0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
0x10, 0x11, 0x12, 0x13, 0x14,
0x15, 0x16, 0x17, 0x18, 0x19
};
uint8_t out_buf[25];
const void * const source_data[] = { twenty_five };
void * const destination_data[] = { out_buf };
uint32_t source_pitches[] = { 5 };
uint32_t destination_pitches[] = { 5 };
ASSERT_OK(vdpDeviceCreateX11(dpy, 0, &device, NULL));
ASSERT_OK(vdpOutputSurfaceCreate(device, VDP_RGBA_FORMAT_A8, 5, 5, &out_surface));
// upload image to surface, download image from surface
ASSERT_OK(vdpOutputSurfacePutBitsNative(out_surface, source_data, source_pitches, NULL));
ASSERT_OK(vdpOutputSurfaceGetBitsNative(out_surface, NULL, destination_data, destination_pitches));
printf("outputsurface\n");
for (int k = 0; k < 25; k ++) {
printf(" %02x", twenty_five[k]);
if (k % 5 == 4) printf("\n");
}
printf("----------\n");
for (int k = 0; k < 25; k ++) {
printf(" %02x", out_buf[k]);
if (k % 5 == 4) printf("\n");
}
printf("==========\n");
if (calc_difference_a8(out_buf, twenty_five, 25) > 2) {
printf("failure\n");
return 1;
}
// Do check bitmap surface
ASSERT_OK(vdpBitmapSurfaceCreate(device, VDP_RGBA_FORMAT_A8, 5, 5, 1, &bmp_surface));
ASSERT_OK(vdpBitmapSurfacePutBitsNative(bmp_surface, source_data, source_pitches, NULL));
// draw alpha channel as color
VdpOutputSurfaceRenderBlendState blend_state = {
.struct_version = VDP_OUTPUT_SURFACE_RENDER_BLEND_STATE_VERSION,
.blend_factor_source_color = VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_SRC_ALPHA,
.blend_factor_source_alpha = VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ONE,
.blend_factor_destination_color = VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ZERO,
.blend_factor_destination_alpha = VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ZERO,
.blend_equation_color = VDP_OUTPUT_SURFACE_RENDER_BLEND_EQUATION_ADD,
.blend_equation_alpha = VDP_OUTPUT_SURFACE_RENDER_BLEND_EQUATION_ADD,
.blend_constant = {0, 0, 0, 0}
};
ASSERT_OK(vdpOutputSurfaceRenderBitmapSurface(out_surface, NULL, bmp_surface, NULL, NULL,
&blend_state, VDP_OUTPUT_SURFACE_RENDER_ROTATE_0));
ASSERT_OK(vdpOutputSurfaceGetBitsNative(out_surface, NULL, destination_data, destination_pitches));
printf("bitmapsurface\n");
for (int k = 0; k < 25; k ++) {
printf(" %02x", twenty_five[k]);
if (k % 5 == 4) printf("\n");
}
printf("----------\n");
for (int k = 0; k < 25; k ++) {
printf(" %02x", out_buf[k]);
if (k % 5 == 4) printf("\n");
}
printf("==========\n");
if (calc_difference_a8(out_buf, twenty_five, 25) > 2) {
printf("failure\n");
return 2;
}
printf("pass\n");
return 0;
}
libvdpau-va-gl-0.3.6/tests/test-003.c 0000664 0000000 0000000 00000006717 12665616006 0017142 0 ustar 00root root 0000000 0000000 // test-003
//
// Testing blending A8 bitmap surface into B8G8R8A8 output with following blend parameters:
// source/destination colors : src alpha / 1 - src alpha
// source/destination alpha : one / src alpha
// blend equation for color / alpha : add / add
//
// target surface filled with {0, 0, 0, 1}
//
// coloring with color {0, 1, 0, 1}. This should be green with alpha == 1.
#include "tests-common.h"
#include
#include
int main(void)
{
Display *dpy = get_dpy();
VdpDevice device;
VdpBitmapSurface bmp_surface;
VdpOutputSurface out_surface;
const uint8_t bmp_1[] = {
0x00, 0x01, 0x02, 0x03,
0x14, 0x15, 0x16, 0x17,
0x28, 0x29, 0x2a, 0x2b,
0x3c, 0x3d, 0x3e, 0x3f
};
const void * const source_data_bmp[] = { bmp_1 };
uint32_t source_pitches_bmp[] = { 4 };
const uint32_t black_4x4[] = {
0xff000000, 0xff000000, 0xff000000, 0xff000000,
0xff000000, 0xff000000, 0xff000000, 0xff000000,
0xff000000, 0xff000000, 0xff000000, 0xff000000,
0xff000000, 0xff000000, 0xff000000, 0xff000000
};
const void * const source_data_black[] = { black_4x4 };
uint32_t source_pitches_black[] = { 4 * 4 };
ASSERT_OK(vdpDeviceCreateX11(dpy, 0, &device, NULL));
// create surfaces
ASSERT_OK(vdpBitmapSurfaceCreate(device, VDP_RGBA_FORMAT_A8, 4, 4, 1, &bmp_surface));
ASSERT_OK(vdpOutputSurfaceCreate(device, VDP_RGBA_FORMAT_B8G8R8A8, 4, 4, &out_surface));
// upload data
ASSERT_OK(vdpBitmapSurfacePutBitsNative(bmp_surface, source_data_bmp, source_pitches_bmp, NULL));
ASSERT_OK(vdpOutputSurfacePutBitsNative(out_surface, source_data_black, source_pitches_black, NULL));
VdpOutputSurfaceRenderBlendState blend_state = {
.blend_factor_source_color = VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_SRC_ALPHA,
.blend_factor_destination_color = VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA,
.blend_factor_source_alpha = VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ONE,
.blend_factor_destination_alpha = VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_SRC_ALPHA,
.blend_equation_color = VDP_OUTPUT_SURFACE_RENDER_BLEND_EQUATION_ADD,
.blend_equation_alpha = VDP_OUTPUT_SURFACE_RENDER_BLEND_EQUATION_ADD,
.blend_constant = {0, 0, 0, 0}
};
VdpColor color[] = {{0, 1.0, 0, 1.0}};
ASSERT_OK(vdpOutputSurfaceRenderBitmapSurface(out_surface, NULL, bmp_surface, NULL,
color, &blend_state, VDP_OUTPUT_SURFACE_RENDER_ROTATE_0));
const uint32_t expected_result[] = {
0x00000000, 0x02000100, 0x04000200, 0x06000300,
0x28001400, 0x2a001500, 0x2c001600, 0x2e001700,
0x50002800, 0x52002900, 0x54002a00, 0x56002b00,
0x78003c00, 0x7a003d00, 0x7c003e00, 0x7e003f00
};
uint32_t result[16];
void * const dest_data[] = { result };
ASSERT_OK(vdpOutputSurfaceGetBitsNative(out_surface, NULL, dest_data, source_pitches_black));
printf("=== expected ===\n");
for (int k = 0; k < 16; k ++) {
printf(" %08x", expected_result[k]);
if (k % 4 == 3) printf("\n");
}
printf("--- actual ---\n");
for (int k = 0; k < 16; k ++) {
printf(" %08x", result[k]);
if (k % 4 == 3) printf("\n");
}
printf("==========\n");
if (memcmp(expected_result, result, sizeof(expected_result))) {
printf("fail\n");
return 1;
}
printf("pass\n");
return 0;
}
libvdpau-va-gl-0.3.6/tests/test-004.c 0000664 0000000 0000000 00000006745 12665616006 0017144 0 ustar 00root root 0000000 0000000 // test-004
//
// Large scale (> 500 pixels) smooth test for VdpOutputSurfaceRenderBitmapSurface and
// VdpOutputSurfaceRenderOutputSurface.
// Rendering the same pattern via both paths and then comparing results. Using opaque copy,
// only source matters.
#include "tests-common.h"
#include
#include
#include
#define WIDTH 509
#define HEIGHT 601
int main(void)
{
Display *dpy = get_dpy();
int err_code = 0;
VdpDevice device;
ASSERT_OK(vdpDeviceCreateX11(dpy, 0, &device, NULL));
VdpOutputSurface out_surface_in;
VdpOutputSurface out_surface_out;
VdpBitmapSurface bmp_surface;
ASSERT_OK(vdpOutputSurfaceCreate(device, VDP_RGBA_FORMAT_B8G8R8A8, WIDTH, HEIGHT, &out_surface_in));
ASSERT_OK(vdpOutputSurfaceCreate(device, VDP_RGBA_FORMAT_B8G8R8A8, WIDTH, HEIGHT, &out_surface_out));
ASSERT_OK(vdpBitmapSurfaceCreate(device, VDP_RGBA_FORMAT_B8G8R8A8, WIDTH, HEIGHT, 1, &bmp_surface));
uint32_t *src = malloc(4 * WIDTH * HEIGHT);
uint32_t *dst = malloc(4 * WIDTH * HEIGHT);
assert (NULL != src || NULL != dst);
for (int k = 0; k < WIDTH * HEIGHT; k ++) {
src[k] = ((k & 0xff) << 8) + (0xff << 24); // green pixel pattern
}
const void * const source_data[] = { src };
void * const destination_data[] = { dst };
uint32_t source_pitches[] = { 4 * WIDTH };
uint32_t destination_pitches[] = { 4 * WIDTH };
ASSERT_OK(vdpOutputSurfacePutBitsNative(out_surface_in, source_data, source_pitches, NULL));
ASSERT_OK(vdpBitmapSurfacePutBitsNative(bmp_surface, source_data, source_pitches, NULL));
VdpOutputSurfaceRenderBlendState blend_state_opaque_copy = {
.struct_version = VDP_OUTPUT_SURFACE_RENDER_BLEND_STATE_VERSION,
.blend_factor_source_color = VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ONE,
.blend_factor_source_alpha = VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ONE,
.blend_factor_destination_color = VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ZERO,
.blend_factor_destination_alpha = VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ZERO,
.blend_equation_color = VDP_OUTPUT_SURFACE_RENDER_BLEND_EQUATION_ADD,
.blend_equation_alpha = VDP_OUTPUT_SURFACE_RENDER_BLEND_EQUATION_ADD,
.blend_constant = {0, 0, 0, 0}
};
ASSERT_OK(vdpOutputSurfaceRenderOutputSurface(out_surface_out, NULL, out_surface_in, NULL,
NULL, &blend_state_opaque_copy, VDP_OUTPUT_SURFACE_RENDER_ROTATE_0));
// check result of vdpOutputSurfaceRenderOutputSurface
ASSERT_OK(vdpOutputSurfaceGetBitsNative(out_surface_out, NULL, destination_data, destination_pitches));
if (calc_difference_r8g8b8a8(src, dst, WIDTH * HEIGHT) > 2) {
printf("fail / vdpOutputSurfaceRenderOutputSurface\n");
err_code = 1;
goto free_resources_and_exit;
}
// check vdpOutputSurfaceRenderBitmapSurface
ASSERT_OK(vdpOutputSurfaceRenderBitmapSurface(out_surface_out, NULL, bmp_surface, NULL,
NULL, &blend_state_opaque_copy, VDP_OUTPUT_SURFACE_RENDER_ROTATE_0));
ASSERT_OK(vdpOutputSurfaceGetBitsNative(out_surface_out, NULL, destination_data, destination_pitches));
if (calc_difference_r8g8b8a8(src, dst, WIDTH * HEIGHT) > 2) {
printf("fail / vdpOutputSurfaceRenderBitmapSurface\n");
err_code = 2;
goto free_resources_and_exit;
}
printf("pass\n");
free_resources_and_exit:
free(src);
free(dst);
return err_code;
}
libvdpau-va-gl-0.3.6/tests/test-005.c 0000664 0000000 0000000 00000012423 12665616006 0017133 0 ustar 00root root 0000000 0000000 // test-005
//
// rendering A8 bitmap to non-black output surface.
// source/destination colors : src alpha / 1 - src alpha
// source/destination alpha : one / src alpha
// blend equation for color / alpha : add / add
//
// target surface filled with {1, 0, 0, 1}
//
// coloring with color {0, 1, 0, 1}. This should be green with alpha == 1.
#include "tests-common.h"
#include
#include
#include
int main(void)
{
Display *dpy = get_dpy();
VdpDevice device;
ASSERT_OK(vdpDeviceCreateX11(dpy, 0, &device, NULL));
VdpBitmapSurface bmp_surface;
VdpOutputSurface out_surface;
ASSERT_OK(vdpBitmapSurfaceCreate(device, VDP_RGBA_FORMAT_A8, 5, 5, 1, &bmp_surface));
ASSERT_OK(vdpOutputSurfaceCreate(device, VDP_RGBA_FORMAT_B8G8R8A8, 7, 7, &out_surface));
const uint8_t bmp_data[5 * 5] = {
/* 1 2 3 4 5 */
/* 1 */ 0x00, 0x1e, 0x1f, 0x20, 0x21,
/* 2 */ 0x01, 0x3e, 0x3f, 0x40, 0x41,
/* 3 */ 0x02, 0x5e, 0x5f, 0x60, 0x61,
/* 4 */ 0x03, 0x8e, 0x7f, 0xff, 0xff,
/* 5 */ 0x04, 0xce, 0x7f, 0xff, 0xff
};
const void * const source_data_bmp[] = { bmp_data };
uint32_t source_pitches_bmp[] = { 5 * 1 };
uint32_t green_screen[7 * 7];
const void * const source_data[] = { green_screen };
uint32_t source_pitches[] = { 7 * 4 };
for (int k = 0; k < 7 * 7; k ++) {
green_screen[k] = 0xff00ff00;
}
ASSERT_OK(vdpOutputSurfacePutBitsNative(out_surface, source_data, source_pitches, NULL));
ASSERT_OK(vdpBitmapSurfacePutBitsNative(bmp_surface, source_data_bmp, source_pitches_bmp, NULL));
VdpOutputSurfaceRenderBlendState blend_state = {
.blend_factor_source_color = VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_SRC_ALPHA,
.blend_factor_destination_color = VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA,
.blend_factor_source_alpha = VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ONE,
.blend_factor_destination_alpha = VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_SRC_ALPHA,
.blend_equation_color = VDP_OUTPUT_SURFACE_RENDER_BLEND_EQUATION_ADD,
.blend_equation_alpha = VDP_OUTPUT_SURFACE_RENDER_BLEND_EQUATION_ADD,
.blend_constant = {0, 0, 0, 0}
};
VdpColor color[] = {{0.7, 0.3, 0.1, 0.6}};
VdpRect dest_rect = {1, 1, 6, 6};
ASSERT_OK(vdpOutputSurfaceRenderBitmapSurface(out_surface, &dest_rect, bmp_surface, NULL,
color, &blend_state, VDP_OUTPUT_SURFACE_RENDER_ROTATE_0));
uint32_t result_buf[7 * 7];
void * const dest_data[] = { result_buf };
ASSERT_OK(vdpOutputSurfaceGetBitsNative(out_surface, NULL, dest_data, source_pitches));
printf("--- actual ---\n");
for (int k = 0; k < 7 * 7; k ++) {
printf(" %08x", result_buf[k]);
if (k % 7 == 7 - 1) printf("\n");
}
uint32_t expected[7 * 7];
// compute expected result
for (int x = 0; x < 7 * 7; x ++) expected[x] = green_screen[x];
for (int y = 0; y < 5; y ++) {
for (int x = 0; x < 5; x ++) {
float src_r = 1.0 * color[0].red;
float src_g = 1.0 * color[0].green;
float src_b = 1.0 * color[0].blue;
float src_a = bmp_data[y*5+x]/255.0 * color[0].alpha;
uint32_t dst_bgra = expected[(y+1)*7 + (x+1)];
float dst_a = ((dst_bgra >> 24) & 0xff) / 255.0;
float dst_r = ((dst_bgra >> 16) & 0xff) / 255.0;
float dst_g = ((dst_bgra >> 8) & 0xff) / 255.0;
float dst_b = ((dst_bgra >> 0) & 0xff) / 255.0;
float res_r = src_r * src_a + dst_r * (1.0 - src_a);
float res_g = src_g * src_a + dst_g * (1.0 - src_a);
float res_b = src_b * src_a + dst_b * (1.0 - src_a);
float res_a = src_a * 1.0 + dst_a * src_a;
uint32_t r = (res_r * 255.0);
uint32_t g = (res_g * 255.0);
uint32_t b = (res_b * 255.0);
uint32_t a = (res_a * 255.0);
if (r > 255) r = 255;
if (g > 255) g = 255;
if (b > 255) b = 255;
if (a > 255) a = 255;
expected[(y+1)*7 + (x+1)] = (a << 24) | (r << 16) | (g << 8) | (b);
}
}
printf("--- expected ---\n");
for (int k = 0; k < 7 * 7; k ++) {
printf(" %08x", expected[k]);
if (k % 7 == 7 - 1) printf("\n");
}
printf("=================\n");
printf("--- difference --- \n");
uint32_t max_diff = 0;
for (int k = 0; k < 7 * 7; k ++) {
uint32_t diff_a = abs(((expected[k] >> 24) & 0xff) - ((result_buf[k] >> 24) & 0xff));
uint32_t diff_r = abs(((expected[k] >> 16) & 0xff) - ((result_buf[k] >> 16) & 0xff));
uint32_t diff_g = abs(((expected[k] >> 8) & 0xff) - ((result_buf[k] >> 8) & 0xff));
uint32_t diff_b = abs(((expected[k] >> 0) & 0xff) - ((result_buf[k] >> 0) & 0xff));
printf(" %08x", (diff_a << 24) + (diff_r << 16) + (diff_g << 8) + (diff_b));
if (k % 7 == 7 - 1) printf("\n");
if (diff_a > max_diff) max_diff = diff_a;
if (diff_r > max_diff) max_diff = diff_r;
if (diff_g > max_diff) max_diff = diff_g;
if (diff_b > max_diff) max_diff = diff_b;
}
printf("=================\n");
if (max_diff > 1) {
printf("fail\n");
return 1;
}
printf("pass\n");
return 0;
}
libvdpau-va-gl-0.3.6/tests/test-006.c 0000664 0000000 0000000 00000006146 12665616006 0017141 0 ustar 00root root 0000000 0000000 // test-006
//
// initializing/finalizing number of times a row with some drawing between.
// This test is to reveal thread-safety failure inside VDPAU library.
//
// Initialization function executed once, but then 30 threads try to do the same work
// with rendering simultaneously.
#include "tests-common.h"
#include
#include
#define THREAD_COUNT 50
VdpDevice device;
Window window;
void *thread_1_func(void *p)
{
(void)p; // unused
VdpPresentationQueueTarget pq_target;
VdpPresentationQueue pq;
VdpOutputSurface out_surface;
VdpOutputSurface out_surface_2;
VdpBitmapSurface bmp_surface;
ASSERT_OK(vdpPresentationQueueTargetCreateX11(device, window, &pq_target));
ASSERT_OK(vdpPresentationQueueCreate(device, pq_target, &pq));
ASSERT_OK(vdpOutputSurfaceCreate(device, VDP_RGBA_FORMAT_B8G8R8A8, 300, 150, &out_surface));
ASSERT_OK(vdpOutputSurfaceCreate(device, VDP_RGBA_FORMAT_B8G8R8A8, 300, 150, &out_surface_2));
ASSERT_OK(vdpBitmapSurfaceCreate(device, VDP_RGBA_FORMAT_B8G8R8A8, 300, 150, 1, &bmp_surface));
uint32_t buf[300*150];
const void * const source_data[] = { buf };
uint32_t source_pitches[] = { 4 * 300 };
for (int k = 0; k < 300*150; k ++) { buf[k] = 0xff000000 + (k & 0xffffff); }
ASSERT_OK(vdpBitmapSurfacePutBitsNative(bmp_surface, source_data, source_pitches, NULL));
VdpTime vdpTime = 0;
ASSERT_OK(vdpPresentationQueueBlockUntilSurfaceIdle(pq, out_surface, &vdpTime));
ASSERT_OK(vdpPresentationQueueGetTime(pq, &vdpTime));
VdpOutputSurfaceRenderBlendState blend_state = {
.blend_factor_source_color=VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ONE,
.blend_factor_destination_color=VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ZERO,
.blend_factor_source_alpha=VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ONE,
.blend_factor_destination_alpha=VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ZERO,
.blend_equation_color=VDP_OUTPUT_SURFACE_RENDER_BLEND_EQUATION_ADD,
.blend_equation_alpha=VDP_OUTPUT_SURFACE_RENDER_BLEND_EQUATION_ADD,
.blend_constant = { 0, 0, 0, 0}
};
VdpRect source_rect = {0, 0, 300, 150};
VdpRect destination_rect = {0, 0, 300, 150};
ASSERT_OK(vdpOutputSurfaceRenderBitmapSurface(out_surface, &destination_rect, bmp_surface,
&source_rect, NULL, &blend_state, VDP_OUTPUT_SURFACE_RENDER_ROTATE_0));
ASSERT_OK(vdpPresentationQueueDisplay(pq, out_surface, 0, 0, 0));
ASSERT_OK(vdpOutputSurfaceDestroy(out_surface));
ASSERT_OK(vdpOutputSurfaceDestroy(out_surface_2));
ASSERT_OK(vdpPresentationQueueDestroy(pq));
ASSERT_OK(vdpPresentationQueueTargetDestroy(pq_target));
ASSERT_OK(vdpBitmapSurfaceDestroy(bmp_surface));
return NULL;
}
int main(void)
{
Display *dpy = get_dpy();
window = get_wnd();
pthread_t pt[THREAD_COUNT];
ASSERT_OK(vdpDeviceCreateX11(dpy, 0, &device, NULL));
for (int k = 0; k < THREAD_COUNT; k ++)
pthread_create(&pt[k], NULL, thread_1_func, NULL);
for (int k = 0; k < THREAD_COUNT; k ++)
pthread_join(pt[k], NULL);
ASSERT_OK(vdpDeviceDestroy(device));
return 0;
}
libvdpau-va-gl-0.3.6/tests/test-007.c 0000664 0000000 0000000 00000011032 12665616006 0017130 0 ustar 00root root 0000000 0000000 // test-007
// TOUCHES: VdpBitmapSurfaceCreate
// TOUCHES: VdpBitmapSurfaceDestroy
// TOUCHES: VdpBitmapSurfaceGetParameters
// TOUCHES: VdpBitmapSurfaceQueryCapabilities
#include "tests-common.h"
#include
int main(void)
{
Display *dpy = get_dpy();
VdpDevice device;
VdpBitmapSurface bmp_surf1;
VdpBitmapSurface bmp_surf2;
ASSERT_OK(vdpDeviceCreateX11(dpy, 0, &device, NULL));
// invalid device handle
assert(VDP_STATUS_INVALID_HANDLE ==
vdpBitmapSurfaceCreate(device+1, VDP_RGBA_FORMAT_A8, 13, 13, 1, &bmp_surf1));
// invalid rgba format
assert(VDP_STATUS_INVALID_RGBA_FORMAT ==
vdpBitmapSurfaceCreate(device, -2, 13, 13, 1, &bmp_surf1));
// normal paratemers
ASSERT_OK(vdpBitmapSurfaceCreate(device, VDP_RGBA_FORMAT_B8G8R8A8, 123, 234, 1, &bmp_surf1));
ASSERT_OK(vdpBitmapSurfaceCreate(device, VDP_RGBA_FORMAT_R8G8B8A8, 345, 456, 0, &bmp_surf2));
uint32_t width, height;
VdpBool fa;
VdpRGBAFormat rgba_f;
// test that getParameters get actual supplied parameters
ASSERT_OK(vdpBitmapSurfaceGetParameters(bmp_surf1, &rgba_f, &width, &height, &fa));
assert(VDP_RGBA_FORMAT_B8G8R8A8 == rgba_f);
assert(123 == width);
assert(234 == height);
assert(1 == fa);
// test with other surface
ASSERT_OK(vdpBitmapSurfaceGetParameters(bmp_surf2, &rgba_f, &width, &height, &fa));
assert(VDP_RGBA_FORMAT_R8G8B8A8 == rgba_f);
assert(345 == width);
assert(456 == height);
assert(0 == fa);
// test getParameters with NULLs
assert(VDP_STATUS_INVALID_POINTER ==
vdpBitmapSurfaceGetParameters(bmp_surf1, NULL, &width, &height, &fa));
assert(VDP_STATUS_INVALID_POINTER ==
vdpBitmapSurfaceGetParameters(bmp_surf1, &rgba_f, NULL, &height, &fa));
assert(VDP_STATUS_INVALID_POINTER ==
vdpBitmapSurfaceGetParameters(bmp_surf1, &rgba_f, &width, NULL, &fa));
assert(VDP_STATUS_INVALID_POINTER ==
vdpBitmapSurfaceGetParameters(bmp_surf1, &rgba_f, &width, &height, NULL));
// test with invalid bitmap handle
assert(VDP_STATUS_INVALID_HANDLE ==
vdpBitmapSurfaceGetParameters(device, &rgba_f, &width, &height, &fa));
VdpBool is_supported;
// testing query capabilities
assert(VDP_STATUS_INVALID_HANDLE ==
vdpBitmapSurfaceQueryCapabilities(device+1, VDP_RGBA_FORMAT_A8, &is_supported,
&width, &height));
assert(VDP_STATUS_INVALID_POINTER ==
vdpBitmapSurfaceQueryCapabilities(device, VDP_RGBA_FORMAT_A8, NULL,
&width, &height));
assert(VDP_STATUS_INVALID_POINTER ==
vdpBitmapSurfaceQueryCapabilities(device, VDP_RGBA_FORMAT_A8, &is_supported,
NULL, &height));
assert(VDP_STATUS_INVALID_POINTER ==
vdpBitmapSurfaceQueryCapabilities(device, VDP_RGBA_FORMAT_A8, &is_supported,
&width, NULL));
// querying various formats
ASSERT_OK(vdpBitmapSurfaceQueryCapabilities(device, VDP_RGBA_FORMAT_B8G8R8A8, &is_supported,
&width, &height));
assert(is_supported);
assert(width > 0);
assert(height > 0);
ASSERT_OK(vdpBitmapSurfaceQueryCapabilities(device, VDP_RGBA_FORMAT_R8G8B8A8, &is_supported,
&width, &height));
assert(is_supported);
assert(width > 0);
assert(height > 0);
ASSERT_OK(vdpBitmapSurfaceQueryCapabilities(device, VDP_RGBA_FORMAT_R10G10B10A2, &is_supported,
&width, &height));
assert(is_supported);
assert(width > 0);
assert(height > 0);
ASSERT_OK(vdpBitmapSurfaceQueryCapabilities(device, VDP_RGBA_FORMAT_B10G10R10A2, &is_supported,
&width, &height));
assert(is_supported);
assert(width > 0);
assert(height > 0);
ASSERT_OK(vdpBitmapSurfaceQueryCapabilities(device, VDP_RGBA_FORMAT_A8, &is_supported,
&width, &height));
assert(is_supported);
assert(width > 0);
assert(height > 0);
// query wrong format
ASSERT_OK(vdpBitmapSurfaceQueryCapabilities(device, 9000, &is_supported, &width, &height));
assert (0 == is_supported);
// try to destroy wrong surface
assert (VDP_STATUS_INVALID_HANDLE == vdpBitmapSurfaceDestroy(-2));
assert (VDP_STATUS_INVALID_HANDLE == vdpBitmapSurfaceDestroy(device));
assert (VDP_STATUS_INVALID_HANDLE == vdpBitmapSurfaceDestroy(bmp_surf1 + 43000));
// really destroy surfaces
ASSERT_OK(vdpBitmapSurfaceDestroy(bmp_surf1));
ASSERT_OK(vdpBitmapSurfaceDestroy(bmp_surf2));
ASSERT_OK(vdpDeviceDestroy(device));
printf("pass\n");
return 0;
}
libvdpau-va-gl-0.3.6/tests/test-008.c 0000664 0000000 0000000 00000006331 12665616006 0017137 0 ustar 00root root 0000000 0000000 // test-008
// Progressively create larger and larger bitmap surfaces, up to maximum
// allowed size. First square ones, then stretched in both directions
// in turn
// TOUCHES: VdpBitmapSurfaceCreate
// TOUCHES: VdpBitmapSurfaceQueryCapabilities
#include "tests-common.h"
#include
void
test_bitmaps_of_format(VdpDevice device, int fmt, const char *fmt_name,
uint32_t max_width, uint32_t max_height)
{
VdpBitmapSurface bmp_surf1;
const uint32_t max_square_size = MIN(max_width, max_height);
const uint32_t step = 128;
// trying square surface
for (uint32_t k = 0; k < max_square_size + step; (k < step) ? (k++) : (k+=step)) {
for (uint32_t freq = 0; freq <= 1; freq ++) {
const uint32_t size = MAX(1, MIN(k, max_square_size));
printf("trying square %s bitmap %d x %d (%d)\n", fmt_name, size, size, freq);
ASSERT_OK(vdpBitmapSurfaceCreate(device, fmt, size, size, freq, &bmp_surf1));
ASSERT_OK(vdpBitmapSurfaceDestroy(bmp_surf1));
}
}
// width stretched
for (uint32_t k = 0; k < max_width + step; (k < step) ? (k++) : (k+=step)) {
for (uint32_t freq = 0; freq <= 1; freq ++) {
const uint32_t size = MAX(1, MIN(k, max_width));
printf("trying width stretched %s bitmap %d x %d (%d)\n", fmt_name, size, 128, freq);
ASSERT_OK(vdpBitmapSurfaceCreate(device, fmt, size, 128, freq, &bmp_surf1));
ASSERT_OK(vdpBitmapSurfaceDestroy(bmp_surf1));
}
}
// height stretched
for (uint32_t k = 0; k < max_height + step; (k < step) ? (k++) : (k+=step)) {
for (uint32_t freq = 0; freq <= 1; freq ++) {
const uint32_t size = MAX(1, MIN(k, max_height));
printf("trying height stretched %s bitmap %d x %d (%d)\n", fmt_name, 128, size, freq);
ASSERT_OK(vdpBitmapSurfaceCreate(device, fmt, 128, size, freq, &bmp_surf1));
ASSERT_OK(vdpBitmapSurfaceDestroy(bmp_surf1));
}
}
}
int main(void)
{
Display *dpy = get_dpy();
VdpDevice device;
ASSERT_OK(vdpDeviceCreateX11(dpy, 0, &device, NULL));
uint32_t max_width, max_height;
VdpBool is_supported;
// querying max_size
ASSERT_OK(vdpBitmapSurfaceQueryCapabilities(device, VDP_RGBA_FORMAT_B8G8R8A8, &is_supported,
&max_width, &max_height));
assert(is_supported);
assert(max_width > 0);
assert(max_height > 0);
test_bitmaps_of_format(device, VDP_RGBA_FORMAT_B8G8R8A8, "VDP_RGBA_FORMAT_B8G8R8A8",
max_width, max_height);
test_bitmaps_of_format(device, VDP_RGBA_FORMAT_R8G8B8A8, "VDP_RGBA_FORMAT_R8G8B8A8",
max_width, max_height);
test_bitmaps_of_format(device, VDP_RGBA_FORMAT_R10G10B10A2, "VDP_RGBA_FORMAT_R10G10B10A2",
max_width, max_height);
test_bitmaps_of_format(device, VDP_RGBA_FORMAT_B10G10R10A2, "VDP_RGBA_FORMAT_B10G10R10A2",
max_width, max_height);
test_bitmaps_of_format(device, VDP_RGBA_FORMAT_A8, "VDP_RGBA_FORMAT_A8",
max_width, max_height);
ASSERT_OK(vdpDeviceDestroy(device));
printf("pass\n");
return 0;
}
libvdpau-va-gl-0.3.6/tests/test-009.c 0000664 0000000 0000000 00000000661 12665616006 0017140 0 ustar 00root root 0000000 0000000 // test-009
// Create and destroy vdp device many times a row.
// Intended to check X resource leakage introduced by library.
#include "tests-common.h"
#include
int main(void)
{
VdpDevice device;
Display *dpy = get_dpy();
for (int k = 0; k < 300; k ++) {
ASSERT_OK(vdpDeviceCreateX11(dpy, 0, &device, NULL));
ASSERT_OK(vdpDeviceDestroy(device));
}
printf("pass\n");
return 0;
}
libvdpau-va-gl-0.3.6/tests/test-010.c 0000664 0000000 0000000 00000001274 12665616006 0017131 0 ustar 00root root 0000000 0000000 // test-010
// creating and destroying couple of VdpDevice from different threads
// caused deadlocks and crashes
#include
#include
#include "tests-common.h"
VdpDevice device1;
VdpDevice device2;
void *
thread_1(void *param)
{
(void)param;
ASSERT_OK(vdpDeviceDestroy(device1));
return NULL;
}
int main(void)
{
Display *dpy = get_dpy();
ASSERT_OK(vdpDeviceCreateX11(dpy, 0, &device1, NULL));
ASSERT_OK(vdpDeviceCreateX11(dpy, 0, &device2, NULL));
pthread_t thread_id_1;
pthread_create(&thread_id_1, NULL, thread_1, NULL);
pthread_join(thread_id_1, NULL);
ASSERT_OK(vdpDeviceDestroy(device2));
printf("pass\n");
return 0;
}
libvdpau-va-gl-0.3.6/tests/tests-common.c 0000664 0000000 0000000 00000003056 12665616006 0020304 0 ustar 00root root 0000000 0000000 #include "tests-common.h"
#include
Display *
get_dpy(void)
{
static Display *cached_dpy = NULL;
if (cached_dpy)
return cached_dpy;
cached_dpy = XOpenDisplay(NULL);
return cached_dpy;
}
Window
get_wnd(void)
{
Display *dpy = get_dpy();
Window root = XDefaultRootWindow(dpy);
Window wnd = XCreateSimpleWindow(dpy, root, 0, 0, 300, 300, 0, 0, 0);
XSync(dpy, False);
return wnd;
}
static inline
int
max2(int a, int b)
{
return (a > b) ? a : b;
}
int
calc_difference_a8(uint8_t *src1, uint8_t *src2, int count)
{
int max_diff = 0;
for (int k = 0; k < count; k ++)
max_diff = max2(max_diff, abs(src1[k] - src2[k]));
return max_diff;
}
int
calc_difference_r8g8b8a8(uint32_t *src1, uint32_t *src2, int count)
{
int max_diff = 0;
for (int k = 0; k < count; k ++) {
const uint8_t r1 = (src1[k] >> 24) & 0xffu;
const uint8_t g1 = (src1[k] >> 16) & 0xffu;
const uint8_t b1 = (src1[k] >> 8) & 0xffu;
const uint8_t a1 = (src1[k] >> 0) & 0xffu;
const uint8_t r2 = (src2[k] >> 24) & 0xffu;
const uint8_t g2 = (src2[k] >> 16) & 0xffu;
const uint8_t b2 = (src2[k] >> 8) & 0xffu;
const uint8_t a2 = (src2[k] >> 0) & 0xffu;
const int tmp1 = max2(abs(r1 - r2), abs(g1 - g2));
const int tmp2 = max2(abs(b1 - b2), abs(a1 - a2));
max_diff = max2(max_diff, max2(tmp1, tmp2));
}
return max_diff;
}
// force linking library constructor
void va_gl_library_constructor();
void *dummy_ptr = va_gl_library_constructor;
libvdpau-va-gl-0.3.6/tests/tests-common.h 0000664 0000000 0000000 00000001036 12665616006 0020305 0 ustar 00root root 0000000 0000000 #ifndef VA_GL_TESTS_TESTS_COMMON_H
#define VA_GL_TESTS_TESTS_COMMON_H
#undef NDEBUG
#include
#include
#include
#include "api.h"
#define ASSERT_OK(expr) \
do { \
VdpStatus status = expr; \
assert (VDP_STATUS_OK == status); \
} while (0)
Display *get_dpy(void);
Window get_wnd(void);
int
calc_difference_a8(uint8_t *src1, uint8_t *src2, int count);
int
calc_difference_r8g8b8a8(uint32_t *src1, uint32_t *src2, int count);
#endif // VA_GL_TESTS_TESTS_COMMON_H