live/000755 001751 000000 00000000000 12265042525 011744 5ustar00rsfwheel000000 000000 live/BasicUsageEnvironment/000755 001751 000000 00000000000 12265042432 016174 5ustar00rsfwheel000000 000000 live/proxyServer/000755 001751 000000 00000000000 12265042432 014311 5ustar00rsfwheel000000 000000 live/mediaServer/000755 001751 000000 00000000000 12265042432 014207 5ustar00rsfwheel000000 000000 live/testProgs/000755 001751 000000 00000000000 12265042432 013733 5ustar00rsfwheel000000 000000 live/WindowsAudioInputDevice/000755 001751 000000 00000000000 12265042432 016515 5ustar00rsfwheel000000 000000 live/UsageEnvironment/000755 001751 000000 00000000000 12265042432 015232 5ustar00rsfwheel000000 000000 live/groupsock/000755 001751 000000 00000000000 12265042432 013755 5ustar00rsfwheel000000 000000 live/liveMedia/000755 001751 000000 00000000000 12265042432 013640 5ustar00rsfwheel000000 000000 live/configure000551 001751 000000 00000000571 12265042432 013645 0ustar00rsfwheel000000 000000 #!/bin/sh echo "Whoa! This software distribution does NOT use the normal Unix \"configure\" mechanism for generating a Makefile. For instructions on how to build this software, see ." echo "Also, please make sure that you're using the most up-to-date version of the source code - available from ." live/config.aix000444 001751 000000 00000000661 12265042525 013715 0ustar00rsfwheel000000 000000 COMPILE_OPTS = $(INCLUDES) -I. -DBSD=1 -O -DTIME_BASE=int -DSOCKLEN_T=socklen_t C = c C_COMPILER = cc C_FLAGS = $(COMPILE_OPTS) CPP = cpp CPLUSPLUS_COMPILER = c++ CPLUSPLUS_FLAGS = $(COMPILE_OPTS) -Wall -DAIX=1 OBJ = o LINK = c++ -o LINK_OPTS = -L. CONSOLE_LINK_OPTS = $(LINK_OPTS) LIBRARY_LINK = ld -o LIBRARY_LINK_OPTS = $(LINK_OPTS) -r LIB_SUFFIX = a LIBS_FOR_CONSOLE_APPLICATION = LIBS_FOR_GUI_APPLICATION = EXE = live/COPYING000444 001751 000000 00000057505 12265042525 013011 0ustar00rsfwheel000000 000000 GNU LESSER GENERAL PUBLIC LICENSE Version 2.1, February 1999 Copyright (C) 1991, 1999 Free Software Foundation, Inc. 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. [This is the first released version of the Lesser GPL. It also counts as the successor of the GNU Library Public License, version 2, hence the version number 2.1.] Preamble The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public Licenses are intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This license, the Lesser General Public License, applies to some specially designated software packages--typically libraries--of the Free Software Foundation and other authors who decide to use it. You can use it too, but we suggest you first think carefully about whether this license or the ordinary General Public License is the better strategy to use in any particular case, based on the explanations below. When we speak of free software, we are referring to freedom of use, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish); that you receive source code or can get it if you want it; that you can change the software and use pieces of it in new free programs; and that you are informed that you can do these things. To protect your rights, we need to make restrictions that forbid distributors to deny you these rights or to ask you to surrender these rights. These restrictions translate to certain responsibilities for you if you distribute copies of the library or if you modify it. For example, if you distribute copies of the library, whether gratis or for a fee, you must give the recipients all the rights that we gave you. You must make sure that they, too, receive or can get the source code. If you link other code with the library, you must provide complete object files to the recipients, so that they can relink them with the library after making changes to the library and recompiling it. And you must show them these terms so they know their rights. We protect your rights with a two-step method: (1) we copyright the library, and (2) we offer you this license, which gives you legal permission to copy, distribute and/or modify the library. To protect each distributor, we want to make it very clear that there is no warranty for the free library. Also, if the library is modified by someone else and passed on, the recipients should know that what they have is not the original version, so that the original author's reputation will not be affected by problems that might be introduced by others. Finally, software patents pose a constant threat to the existence of any free program. We wish to make sure that a company cannot effectively restrict the users of a free program by obtaining a restrictive license from a patent holder. Therefore, we insist that any patent license obtained for a version of the library must be consistent with the full freedom of use specified in this license. Most GNU software, including some libraries, is covered by the ordinary GNU General Public License. This license, the GNU Lesser General Public License, applies to certain designated libraries, and is quite different from the ordinary General Public License. We use this license for certain libraries in order to permit linking those libraries into non-free programs. When a program is linked with a library, whether statically or using a shared library, the combination of the two is legally speaking a combined work, a derivative of the original library. The ordinary General Public License therefore permits such linking only if the entire combination fits its criteria of freedom. The Lesser General Public License permits more lax criteria for linking other code with the library. We call this license the "Lesser" General Public License because it does Less to protect the user's freedom than the ordinary General Public License. It also provides other free software developers Less of an advantage over competing non-free programs. These disadvantages are the reason we use the ordinary General Public License for many libraries. However, the Lesser license provides advantages in certain special circumstances. For example, on rare occasions, there may be a special need to encourage the widest possible use of a certain library, so that it becomes a de-facto standard. To achieve this, non-free programs must be allowed to use the library. A more frequent case is that a free library does the same job as widely used non-free libraries. In this case, there is little to gain by limiting the free library to free software only, so we use the Lesser General Public License. In other cases, permission to use a particular library in non-free programs enables a greater number of people to use a large body of free software. For example, permission to use the GNU C Library in non-free programs enables many more people to use the whole GNU operating system, as well as its variant, the GNU/Linux operating system. Although the Lesser General Public License is Less protective of the users' freedom, it does ensure that the user of a program that is linked with the Library has the freedom and the wherewithal to run that program using a modified version of the Library. The precise terms and conditions for copying, distribution and modification follow. Pay close attention to the difference between a "work based on the library" and a "work that uses the library". The former contains code derived from the library, whereas the latter must be combined with the library in order to run. GNU LESSER GENERAL PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License Agreement applies to any software library or other program which contains a notice placed by the copyright holder or other authorized party saying it may be distributed under the terms of this Lesser General Public License (also called "this License"). Each licensee is addressed as "you". A "library" means a collection of software functions and/or data prepared so as to be conveniently linked with application programs (which use some of those functions and data) to form executables. The "Library", below, refers to any such software library or work which has been distributed under these terms. A "work based on the Library" means either the Library or any derivative work under copyright law: that is to say, a work containing the Library or a portion of it, either verbatim or with modifications and/or translated straightforwardly into another language. (Hereinafter, translation is included without limitation in the term "modification".) "Source code" for a work means the preferred form of the work for making modifications to it. For a library, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the library. Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running a program using the Library is not restricted, and output from such a program is covered only if its contents constitute a work based on the Library (independent of the use of the Library in a tool for writing it). Whether that is true depends on what the Library does and what the program that uses the Library does. 1. You may copy and distribute verbatim copies of the Library's complete source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and distribute a copy of this License along with the Library. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. 2. You may modify your copy or copies of the Library or any portion of it, thus forming a work based on the Library, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: a) The modified work must itself be a software library. b) You must cause the files modified to carry prominent notices stating that you changed the files and the date of any change. c) You must cause the whole of the work to be licensed at no charge to all third parties under the terms of this License. d) If a facility in the modified Library refers to a function or a table of data to be supplied by an application program that uses the facility, other than as an argument passed when the facility is invoked, then you must make a good faith effort to ensure that, in the event an application does not supply such function or table, the facility still operates, and performs whatever part of its purpose remains meaningful. (For example, a function in a library to compute square roots has a purpose that is entirely well-defined independent of the application. Therefore, Subsection 2d requires that any application-supplied function or table used by this function must be optional: if the application does not supply it, the square root function must still compute square roots.) These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Library, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Library, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Library. In addition, mere aggregation of another work not based on the Library with the Library (or with a work based on the Library) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. 3. You may opt to apply the terms of the ordinary GNU General Public License instead of this License to a given copy of the Library. To do this, you must alter all the notices that refer to this License, so that they refer to the ordinary GNU General Public License, version 2, instead of to this License. (If a newer version than version 2 of the ordinary GNU General Public License has appeared, then you can specify that version instead if you wish.) Do not make any other change in these notices. Once this change is made in a given copy, it is irreversible for that copy, so the ordinary GNU General Public License applies to all subsequent copies and derivative works made from that copy. This option is useful when you wish to copy part of the code of the Library into a program that is not a library. 4. You may copy and distribute the Library (or a portion or derivative of it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange. If distribution of object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place satisfies the requirement to distribute the source code, even though third parties are not compelled to copy the source along with the object code. 5. A program that contains no derivative of any portion of the Library, but is designed to work with the Library by being compiled or linked with it, is called a "work that uses the Library". Such a work, in isolation, is not a derivative work of the Library, and therefore falls outside the scope of this License. However, linking a "work that uses the Library" with the Library creates an executable that is a derivative of the Library (because it contains portions of the Library), rather than a "work that uses the library". The executable is therefore covered by this License. Section 6 states terms for distribution of such executables. When a "work that uses the Library" uses material from a header file that is part of the Library, the object code for the work may be a derivative work of the Library even though the source code is not. Whether this is true is especially significant if the work can be linked without the Library, or if the work is itself a library. The threshold for this to be true is not precisely defined by law. If such an object file uses only numerical parameters, data structure layouts and accessors, and small macros and small inline functions (ten lines or less in length), then the use of the object file is unrestricted, regardless of whether it is legally a derivative work. (Executables containing this object code plus portions of the Library will still fall under Section 6.) Otherwise, if the work is a derivative of the Library, you may distribute the object code for the work under the terms of Section 6. Any executables containing that work also fall under Section 6, whether or not they are linked directly with the Library itself. 6. As an exception to the Sections above, you may also combine or link a "work that uses the Library" with the Library to produce a work containing portions of the Library, and distribute that work under terms of your choice, provided that the terms permit modification of the work for the customer's own use and reverse engineering for debugging such modifications. You must give prominent notice with each copy of the work that the Library is used in it and that the Library and its use are covered by this License. You must supply a copy of this License. If the work during execution displays copyright notices, you must include the copyright notice for the Library among them, as well as a reference directing the user to the copy of this License. Also, you must do one of these things: a) Accompany the work with the complete corresponding machine-readable source code for the Library including whatever changes were used in the work (which must be distributed under Sections 1 and 2 above); and, if the work is an executable linked with the Library, with the complete machine-readable "work that uses the Library", as object code and/or source code, so that the user can modify the Library and then relink to produce a modified executable containing the modified Library. (It is understood that the user who changes the contents of definitions files in the Library will not necessarily be able to recompile the application to use the modified definitions.) b) Use a suitable shared library mechanism for linking with the Library. A suitable mechanism is one that (1) uses at run time a copy of the library already present on the user's computer system, rather than copying library functions into the executable, and (2) will operate properly with a modified version of the library, if the user installs one, as long as the modified version is interface-compatible with the version that the work was made with. c) Accompany the work with a written offer, valid for at least three years, to give the same user the materials specified in Subsection 6a, above, for a charge no more than the cost of performing this distribution. d) If distribution of the work is made by offering access to copy from a designated place, offer equivalent access to copy the above specified materials from the same place. e) Verify that the user has already received a copy of these materials or that you have already sent this user a copy. For an executable, the required form of the "work that uses the Library" must include any data and utility programs needed for reproducing the executable from it. However, as a special exception, the materials to be distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. It may happen that this requirement contradicts the license restrictions of other proprietary libraries that do not normally accompany the operating system. Such a contradiction means you cannot use both them and the Library together in an executable that you distribute. 7. You may place library facilities that are a work based on the Library side-by-side in a single library together with other library facilities not covered by this License, and distribute such a combined library, provided that the separate distribution of the work based on the Library and of the other library facilities is otherwise permitted, and provided that you do these two things: a) Accompany the combined library with a copy of the same work based on the Library, uncombined with any other library facilities. This must be distributed under the terms of the Sections above. b) Give prominent notice with the combined library of the fact that part of it is a work based on the Library, and explaining where to find the accompanying uncombined form of the same work. 8. You may not copy, modify, sublicense, link with, or distribute the Library except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense, link with, or distribute the Library is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. 9. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Library or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Library (or any work based on the Library), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Library or works based on it. 10. Each time you redistribute the Library (or any work based on the Library), the recipient automatically receives a license from the original licensor to copy, distribute, link with or modify the Library subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties with this License. 11. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Library at all. For example, if a patent license would not permit royalty-free redistribution of the Library by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Library. If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply, and the section as a whole is intended to apply in other circumstances. It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. 12. If the distribution and/or use of the Library is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Library under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. 13. The Free Software Foundation may publish revised and/or new versions of the Lesser General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Library specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Library does not specify a license version number, you may choose any version ever published by the Free Software Foundation. 14. If you wish to incorporate parts of the Library into other free programs whose distribution conditions are incompatible with these, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. NO WARRANTY 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS live/genMakefiles000551 001751 000000 00000001762 12265042525 014264 0ustar00rsfwheel000000 000000 #!/bin/sh usage() { echo "Usage: $0 " exit 1 } if [ $# -ne 1 ] then usage $* fi cd liveMedia /bin/rm -f Makefile cat Makefile.head ../config.$1 Makefile.tail > Makefile chmod a-w Makefile cd ../groupsock /bin/rm -f Makefile cat Makefile.head ../config.$1 Makefile.tail > Makefile chmod a-w Makefile cd ../UsageEnvironment /bin/rm -f Makefile cat Makefile.head ../config.$1 Makefile.tail > Makefile chmod a-w Makefile cd ../BasicUsageEnvironment /bin/rm -f Makefile cat Makefile.head ../config.$1 Makefile.tail > Makefile chmod a-w Makefile cd ../testProgs /bin/rm -f Makefile cat Makefile.head ../config.$1 Makefile.tail > Makefile chmod a-w Makefile cd ../mediaServer /bin/rm -f Makefile cat Makefile.head ../config.$1 Makefile.tail > Makefile chmod a-w Makefile cd ../proxyServer /bin/rm -f Makefile cat Makefile.head ../config.$1 Makefile.tail > Makefile chmod a-w Makefile cd .. /bin/rm -f Makefile cat Makefile.head config.$1 Makefile.tail > Makefile chmod a-w Makefile live/config.uClinux000444 001751 000000 00000001357 12265042525 014566 0ustar00rsfwheel000000 000000 CROSS_COMPILE= arc-linux-uclibc- COMPILE_OPTS = $(INCLUDES) -I. -O2 -DSOCKLEN_T=socklen_t -D_LARGEFILE_SOURCE=1 -D_FILE_OFFSET_BITS=64 C = c C_COMPILER = $(CROSS_COMPILE)gcc CFLAGS += $(COMPILE_OPTS) C_FLAGS = $(CFLAGS) CPP = cpp CPLUSPLUS_COMPILER = $(CROSS_COMPILE)g++ CPLUSPLUS_FLAGS = $(COMPILE_OPTS) -Wall -DBSD=1 CPLUSPLUS_FLAGS += $(CPPFLAGS) -fexceptions OBJ = o LINK = $(CROSS_COMPILE)g++ -o LINK_OPTS = -L. $(LDFLAGS) CONSOLE_LINK_OPTS = $(LINK_OPTS) LIBRARY_LINK = $(CROSS_COMPILE)ar cr LIBRARY_LINK_OPTS = LIB_SUFFIX = a LIBS_FOR_CONSOLE_APPLICATION = $(CXXLIBS) LIBS_FOR_GUI_APPLICATION = $(LIBS_FOR_CONSOLE_APPLICATION) EXE = live/config.sunos000444 001751 000000 00000000613 12265042525 014300 0ustar00rsfwheel000000 000000 COMPILE_OPTS = $(INCLUDES) -I. -DBSD=1 -O C = c C_COMPILER = cc C_FLAGS = $(COMPILE_OPTS) CPP = cc CPLUSPLUS_COMPILER = c++ CPLUSPLUS_FLAGS = $(COMPILE_OPTS) -Wall OBJ = o LINK = c++ -o LINK_OPTS = -L. CONSOLE_LINK_OPTS = $(LINK_OPTS) LIBRARY_LINK = ld -o LIBRARY_LINK_OPTS = $(LINK_OPTS) -r -Bstatic LIB_SUFFIX = a LIBS_FOR_CONSOLE_APPLICATION = LIBS_FOR_GUI_APPLICATION = EXE = live/config.solaris-64bit000444 001751 000000 00000001162 12265042525 015533 0ustar00rsfwheel000000 000000 COMPILE_OPTS = $(INCLUDES) -m64 -I. -O -DSOLARIS -DSOCKLEN_T=socklen_t C = c C_COMPILER = cc C_FLAGS = $(COMPILE_OPTS) CPP = cpp CPLUSPLUS_COMPILER = c++ CPLUSPLUS_FLAGS = $(COMPILE_OPTS) -Wall OBJ = o LINK = c++ -m64 -o LINK_OPTS = -L. CONSOLE_LINK_OPTS = $(LINK_OPTS) LIBRARY_LINK = ld -o LIBRARY_LINK_OPTS = $(LINK_OPTS) -64 -r -dn LIB_SUFFIX = a LIBS_FOR_CONSOLE_APPLICATION = -lsocket -lnsl LIBS_FOR_GUI_APPLICATION = $(LIBS_FOR_CONSOLE_APPLICATION) EXE = live/config.solaris-32bit000444 001751 000000 00000000716 12265042525 015532 0ustar00rsfwheel000000 000000 COMPILE_OPTS = $(INCLUDES) -I. -O -DSOLARIS -DSOCKLEN_T=socklen_t C = c C_COMPILER = cc C_FLAGS = $(COMPILE_OPTS) CPP = cpp CPLUSPLUS_COMPILER = c++ CPLUSPLUS_FLAGS = $(COMPILE_OPTS) -Wall OBJ = o LINK = c++ -o LINK_OPTS = -L. CONSOLE_LINK_OPTS = $(LINK_OPTS) LIBRARY_LINK = ld -o LIBRARY_LINK_OPTS = $(LINK_OPTS) -r -dn LIB_SUFFIX = a LIBS_FOR_CONSOLE_APPLICATION = -lsocket -lnsl LIBS_FOR_GUI_APPLICATION = $(LIBS_FOR_CONSOLE_APPLICATION) EXE = live/config.qnx4000444 001751 000000 00000001070 12265042525 014021 0ustar00rsfwheel000000 000000 # # Requires: # QNX 4.25 # Watcom 10.6 # TCP/IP 5.0 # COMPILE_OPTS = $(INCLUDES) -I. -D_QNX4 -DBSD -DSOCKLEN_T=uint32_t -I/usr/watcom/10.6/usr/include C = c C_COMPILER = cc32 C_FLAGS = $(COMPILE_OPTS) CPP = cpp CPLUSPLUS_COMPILER = cc32 CPLUSPLUS_FLAGS = $(COMPILE_OPTS) -WC,-xs OBJ = o LINK = cc32 -b -M -N30000 -o LINK_OPTS = -l. CONSOLE_LINK_OPTS = $(LINK_OPTS) LIBRARY_LINK = wlib -n -b -c LIBRARY_LINK_OPTS = $(LINK_OPTS) LIB_SUFFIX = lib LIBS_FOR_CONSOLE_APPLICATION = -lsocket LIBS_FOR_GUI_APPLICATION = $(LIBS_FOR_CONSOLE_APPLICATION) EXE = live/config.openbsd000444 001751 000000 00000000661 12265042525 014566 0ustar00rsfwheel000000 000000 .SUFFIXES: .cpp COMPILE_OPTS = $(INCLUDES) -I. -DBSD=1 -O -DSOCKLEN_T=socklen_t C = c C_COMPILER = cc C_FLAGS = $(COMPILE_OPTS) CPP = cpp CPLUSPLUS_COMPILER = c++ CPLUSPLUS_FLAGS = $(COMPILE_OPTS) -Wall -DAIX=1 OBJ = o LINK = c++ -o LINK_OPTS = -L. CONSOLE_LINK_OPTS = $(LINK_OPTS) LIBRARY_LINK = ld -o LIBRARY_LINK_OPTS = $(LINK_OPTS) -r LIB_SUFFIX = a LIBS_FOR_CONSOLE_APPLICATION = LIBS_FOR_GUI_APPLICATION = EXE = live/config.mingw000444 001751 000000 00000001213 12265042525 014247 0ustar00rsfwheel000000 000000 COMPILE_OPTS = $(INCLUDES) -I. -O -DSOCKLEN_T=int -DLOCALE_NOT_USED C = c C_COMPILER = $(CC) C_FLAGS = $(COMPILE_OPTS) -DUSE_OUR_BZERO=1 -D__MINGW32__ CPP = cpp CPLUSPLUS_COMPILER = $(CXX) CPLUSPLUS_FLAGS = $(COMPILE_OPTS) -D__MINGW32__ -Wall -Wno-deprecated OBJ = o LINK = $(CXX) -o LINK_OPTS = -L. CONSOLE_LINK_OPTS = $(LINK_OPTS) LIBRARY_LINK = $(LD) -o LIBRARY_LINK_OPTS = $(LINK_OPTS) -r -Bstatic LIB_SUFFIX = a LIBS_FOR_CONSOLE_APPLICATION = -lws2_32 LIBS_FOR_GUI_APPLICATION = -lws2_32 EXE = live/config.macosx-32bit000444 001751 000000 00000000725 12265042525 015350 0ustar00rsfwheel000000 000000 COMPILE_OPTS = -m32 $(INCLUDES) -I. $(EXTRA_LDFLAGS) -DBSD=1 -O -DSOCKLEN_T=socklen_t -DHAVE_SOCKADDR_LEN=1 -DTIME_BASE=int C = c C_COMPILER = cc C_FLAGS = $(COMPILE_OPTS) CPP = cpp CPLUSPLUS_COMPILER = c++ CPLUSPLUS_FLAGS = $(COMPILE_OPTS) -Wall OBJ = o LINK = c++ -o LINK_OPTS = -L. -m32 CONSOLE_LINK_OPTS = $(LINK_OPTS) LIBRARY_LINK = libtool -s -o LIBRARY_LINK_OPTS = LIB_SUFFIX = a LIBS_FOR_CONSOLE_APPLICATION = LIBS_FOR_GUI_APPLICATION = EXE = live/config.macosx-before-version-10.4000444 001751 000000 00000000646 12265042525 017734 0ustar00rsfwheel000000 000000 COMPILE_OPTS = $(INCLUDES) -I. -DBSD=1 -O -DSOCKLEN_T=int -DTIME_BASE=int C = c C_COMPILER = cc C_FLAGS = $(COMPILE_OPTS) CPP = cpp CPLUSPLUS_COMPILER = c++ CPLUSPLUS_FLAGS = $(COMPILE_OPTS) -Wall OBJ = o LINK = c++ -o LINK_OPTS = -L. CONSOLE_LINK_OPTS = $(LINK_OPTS) LIBRARY_LINK = ld -o LIBRARY_LINK_OPTS = $(LINK_OPTS) -r LIB_SUFFIX = a LIBS_FOR_CONSOLE_APPLICATION = LIBS_FOR_GUI_APPLICATION = EXE = live/config.macosx000444 001751 000000 00000000713 12265042525 014424 0ustar00rsfwheel000000 000000 COMPILE_OPTS = $(INCLUDES) -I. $(EXTRA_LDFLAGS) -DBSD=1 -O -DSOCKLEN_T=socklen_t -DHAVE_SOCKADDR_LEN=1 -DTIME_BASE=int C = c C_COMPILER = cc C_FLAGS = $(COMPILE_OPTS) CPP = cpp CPLUSPLUS_COMPILER = c++ CPLUSPLUS_FLAGS = $(COMPILE_OPTS) -Wall OBJ = o LINK = c++ -o LINK_OPTS = -L. CONSOLE_LINK_OPTS = $(LINK_OPTS) LIBRARY_LINK = libtool -s -o LIBRARY_LINK_OPTS = LIB_SUFFIX = a LIBS_FOR_CONSOLE_APPLICATION = LIBS_FOR_GUI_APPLICATION = EXE = live/config.linux-with-shared-libraries000444 001751 000000 00000004444 12265042525 020465 0ustar00rsfwheel000000 000000 # 'CURRENT':'REVISION':'AGE' are updated - whenever a library changes - as follows: # The library code changes, but without any changes to the API (i.e., interfaces) => increment REVISION # At least one interface changes, or is removed => CURRENT += 1; REVISION = 0; AGE = 0 # One or more interfaces were added, but no existing interfaces were changed or removed => CURRENT += 1; REVISION = 0; AGE += 1 libliveMedia_VERSION_CURRENT=24 libliveMedia_VERSION_REVISION=0 libliveMedia_VERSION_AGE=1 libliveMedia_LIB_SUFFIX=so.$(shell expr $(libliveMedia_VERSION_CURRENT) - $(libliveMedia_VERSION_AGE)).$(libliveMedia_VERSION_AGE).$(libliveMedia_VERSION_REVISION) libBasicUsageEnvironment_VERSION_CURRENT=0 libBasicUsageEnvironment_VERSION_REVISION=2 libBasicUsageEnvironment_VERSION_AGE=0 libBasicUsageEnvironment_LIB_SUFFIX=so.$(shell expr $(libBasicUsageEnvironment_VERSION_CURRENT) - $(libBasicUsageEnvironment_VERSION_AGE)).$(libBasicUsageEnvironment_VERSION_AGE).$(libBasicUsageEnvironment_VERSION_REVISION) libUsageEnvironment_VERSION_CURRENT=1 libUsageEnvironment_VERSION_REVISION=0 libUsageEnvironment_VERSION_AGE=0 libUsageEnvironment_LIB_SUFFIX=so.$(shell expr $(libUsageEnvironment_VERSION_CURRENT) - $(libUsageEnvironment_VERSION_AGE)).$(libUsageEnvironment_VERSION_AGE).$(libUsageEnvironment_VERSION_REVISION) libgroupsock_VERSION_CURRENT=1 libgroupsock_VERSION_REVISION=4 libgroupsock_VERSION_AGE=0 libgroupsock_LIB_SUFFIX=so.$(shell expr $(libgroupsock_VERSION_CURRENT) - $(libgroupsock_VERSION_AGE)).$(libgroupsock_VERSION_AGE).$(libgroupsock_VERSION_REVISION) ##### COMPILE_OPTS = $(INCLUDES) -I. -O2 -DSOCKLEN_T=socklen_t -D_LARGEFILE_SOURCE=1 -D_FILE_OFFSET_BITS=64 -fPIC C = c C_COMPILER = cc C_FLAGS = $(COMPILE_OPTS) $(CPPFLAGS) $(CFLAGS) CPP = cpp CPLUSPLUS_COMPILER = c++ CPLUSPLUS_FLAGS = $(COMPILE_OPTS) -Wall -DBSD=1 $(CPPFLAGS) $(CXXFLAGS) OBJ = o LINK = c++ -o LINK_OPTS = -L. $(LDFLAGS) CONSOLE_LINK_OPTS = $(LINK_OPTS) LIBRARY_LINK = gcc -o SHORT_LIB_SUFFIX = so.$(shell expr $($(NAME)_VERSION_CURRENT) - $($(NAME)_VERSION_AGE)) LIB_SUFFIX = $(SHORT_LIB_SUFFIX).$($(NAME)_VERSION_AGE).$($(NAME)_VERSION_REVISION) LIBRARY_LINK_OPTS = -shared -Wl,-soname,$(NAME).$(SHORT_LIB_SUFFIX) $(LDFLAGS) LIBS_FOR_CONSOLE_APPLICATION = LIBS_FOR_GUI_APPLICATION = EXE = INSTALL2 = install_shared_libraries live/config.linux-gdb000444 001751 000000 00000000673 12265042525 015030 0ustar00rsfwheel000000 000000 COMPILE_OPTS = $(INCLUDES) -I. -O -DSOCKLEN_T=socklen_t -g -D_LARGEFILE_SOURCE=1 -D_FILE_OFFSET_BITS=64 C = c C_COMPILER = cc C_FLAGS = $(COMPILE_OPTS) CPP = cpp CPLUSPLUS_COMPILER = c++ CPLUSPLUS_FLAGS = $(COMPILE_OPTS) -Wall -DBSD=1 OBJ = o LINK = c++ -o LINK_OPTS = -L. CONSOLE_LINK_OPTS = $(LINK_OPTS) LIBRARY_LINK = ar cr LIBRARY_LINK_OPTS = LIB_SUFFIX = a LIBS_FOR_CONSOLE_APPLICATION = LIBS_FOR_GUI_APPLICATION = EXE = live/config.linux-64bit000444 001751 000000 00000000705 12265042525 015220 0ustar00rsfwheel000000 000000 COMPILE_OPTS = $(INCLUDES) -m64 -fPIC -I. -O2 -DSOCKLEN_T=socklen_t -D_LARGEFILE_SOURCE=1 -D_FILE_OFFSET_BITS=64 C = c C_COMPILER = cc C_FLAGS = $(COMPILE_OPTS) CPP = cpp CPLUSPLUS_COMPILER = c++ CPLUSPLUS_FLAGS = $(COMPILE_OPTS) -Wall -DBSD=1 OBJ = o LINK = c++ -o LINK_OPTS = -L. CONSOLE_LINK_OPTS = $(LINK_OPTS) LIBRARY_LINK = ar cr LIBRARY_LINK_OPTS = LIB_SUFFIX = a LIBS_FOR_CONSOLE_APPLICATION = LIBS_FOR_GUI_APPLICATION = EXE = live/config.linux000444 001751 000000 00000000762 12265042525 014275 0ustar00rsfwheel000000 000000 COMPILE_OPTS = $(INCLUDES) -I. -O2 -DSOCKLEN_T=socklen_t -D_LARGEFILE_SOURCE=1 -D_FILE_OFFSET_BITS=64 C = c C_COMPILER = cc C_FLAGS = $(COMPILE_OPTS) $(CPPFLAGS) $(CFLAGS) CPP = cpp CPLUSPLUS_COMPILER = c++ CPLUSPLUS_FLAGS = $(COMPILE_OPTS) -Wall -DBSD=1 $(CPPFLAGS) $(CXXFLAGS) OBJ = o LINK = c++ -o LINK_OPTS = -L. $(LDFLAGS) CONSOLE_LINK_OPTS = $(LINK_OPTS) LIBRARY_LINK = ar cr LIBRARY_LINK_OPTS = LIB_SUFFIX = a LIBS_FOR_CONSOLE_APPLICATION = LIBS_FOR_GUI_APPLICATION = EXE = live/config.irix000444 001751 000000 00000000623 12265042525 014105 0ustar00rsfwheel000000 000000 COMPILE_OPTS = $(INCLUDES) -I. -O C = c C_COMPILER = cc C_FLAGS = $(COMPILE_OPTS) -DIRIX CPP = cpp CPLUSPLUS_COMPILER = c++ CPLUSPLUS_FLAGS = $(COMPILE_OPTS) -Wall -DIRIX OBJ = o LINK = c++ -o LINK_OPTS = -L. CONSOLE_LINK_OPTS = $(LINK_OPTS) LIBRARY_LINK = ld -o LIBRARY_LINK_OPTS = $(LINK_OPTS) -r -B static LIB_SUFFIX = a LIBS_FOR_CONSOLE_APPLICATION = LIBS_FOR_GUI_APPLICATION = EXE = live/config.iphoneos000444 001751 000000 00000002075 12265042525 014761 0ustar00rsfwheel000000 000000 # Change the following version number, if necessary, before running "genMakefiles iphoneos" IOS_VERSION = 6.1 DEVELOPER_PATH = /Applications/Xcode.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer TOOL_PATH = $(DEVELOPER_PATH)/usr/bin SDK_PATH = $(DEVELOPER_PATH)/SDKs SDK = $(SDK_PATH)/iPhoneOS$(IOS_VERSION).sdk COMPILE_OPTS = $(INCLUDES) -I. $(EXTRA_LDFLAGS) -DBSD=1 -O2 -DSOCKLEN_T=socklen_t -DHAVE_SOCKADDR_LEN=1 -D_LARGEFILE_SOURCE=1 -D_FILE_OFFSET_BITS=64 -fPIC -arch armv7 --sysroot=$(SDK) C = c C_COMPILER = $(TOOL_PATH)/gcc C_FLAGS = $(COMPILE_OPTS) CPP = cpp CPLUSPLUS_COMPILER = $(TOOL_PATH)/g++ CPLUSPLUS_FLAGS = $(COMPILE_OPTS) -Wall OBJ = o LINK = $(TOOL_PATH)/g++ -o LINK_OPTS = -L. -arch armv7 --sysroot=$(SDK) -L$(SDK)/usr/lib/system CONSOLE_LINK_OPTS = $(LINK_OPTS) LIBRARY_LINK = libtool -s -o LIBRARY_LINK_OPTS = LIB_SUFFIX = a LIBS_FOR_CONSOLE_APPLICATION = LIBS_FOR_GUI_APPLICATION = EXE = live/config.iphone-simulator000444 001751 000000 00000002111 12265042525 016423 0ustar00rsfwheel000000 000000 # Change the following version number, if necessary, before running "genMakefiles iphoneos" IOS_VERSION = 6.1 DEVELOPER_PATH = /Applications/Xcode.app/Contents/Developer/Platforms/iPhoneSimulator.platform/Developer TOOL_PATH = $(DEVELOPER_PATH)/usr/bin SDK_PATH = $(DEVELOPER_PATH)/SDKs SDK = $(SDK_PATH)/iPhoneSimulator$(IOS_VERSION).sdk COMPILE_OPTS = $(INCLUDES) -I. $(EXTRA_LDFLAGS) -DBSD=1 -O2 -DSOCKLEN_T=socklen_t -DHAVE_SOCKADDR_LEN=1 -D_LARGEFILE_SOURCE=1 -D_FILE_OFFSET_BITS=64 -fPIC -arch i386 --sysroot=$(SDK) C = c C_COMPILER = $(TOOL_PATH)/gcc C_FLAGS = $(COMPILE_OPTS) CPP = cpp CPLUSPLUS_COMPILER = $(TOOL_PATH)/g++ CPLUSPLUS_FLAGS = $(COMPILE_OPTS) -Wall OBJ = o LINK = $(TOOL_PATH)/g++ -o LINK_OPTS = -L. -arch i386 --sysroot=$(SDK) -L$(SDK)/usr/lib/system CONSOLE_LINK_OPTS = $(LINK_OPTS) LIBRARY_LINK = libtool -s -o LIBRARY_LINK_OPTS = LIB_SUFFIX = a LIBS_FOR_CONSOLE_APPLICATION = LIBS_FOR_GUI_APPLICATION = EXE = live/config.freebsd000444 001751 000000 00000000666 12265042525 014553 0ustar00rsfwheel000000 000000 COMPILE_OPTS = $(INCLUDES) -I. -O -DBSD=1 -DXLOCALE_NOT_USED=1 -DSOCKLEN_T=socklen_t -DHAVE_SOCKADDR_LEN=1 C = c C_COMPILER = cc C_FLAGS = $(COMPILE_OPTS) CPP = cpp CPLUSPLUS_COMPILER = c++ CPLUSPLUS_FLAGS = $(COMPILE_OPTS) -Wall OBJ = o LINK = c++ -o LINK_OPTS = -L. CONSOLE_LINK_OPTS = $(LINK_OPTS) LIBRARY_LINK = ar cr LIBRARY_LINK_OPTS = LIB_SUFFIX = a LIBS_FOR_CONSOLE_APPLICATION = LIBS_FOR_GUI_APPLICATION = EXE = live/config.cygwin-for-vlc000444 001751 000000 00000001003 12265042525 015771 0ustar00rsfwheel000000 000000 COMPILE_OPTS = $(INCLUDES) -I. -O -DSOCKLEN_T=socklen_t -DXLOCALE_NOT_USED=1 C = c C_COMPILER = gcc C_FLAGS = $(COMPILE_OPTS) -DUSE_OUR_BZERO=1 -D_WIN32 -mno-cygwin CPP = cpp CPLUSPLUS_COMPILER = c++ CPLUSPLUS_FLAGS = $(COMPILE_OPTS) -Wall -DBSD=1 -D_WIN32 -Wno-deprecated -mno-cygwin OBJ = o LINK = c++ -o LINK_OPTS = -L. CONSOLE_LINK_OPTS = $(LINK_OPTS) LIBRARY_LINK = ld -o LIBRARY_LINK_OPTS = $(LINK_OPTS) -r -Bstatic LIB_SUFFIX = a LIBS_FOR_CONSOLE_APPLICATION = LIBS_FOR_GUI_APPLICATION = EXE = live/config.cygwin000444 001751 000000 00000000731 12265042525 014432 0ustar00rsfwheel000000 000000 COMPILE_OPTS = $(INCLUDES) -I. -O -DSOCKLEN_T=socklen_t -DXLOCALE_NOT_USED=1 C = c C_COMPILER = gcc C_FLAGS = $(COMPILE_OPTS) -DUSE_OUR_BZERO=1 -D__CYGWIN__ CPP = cpp CPLUSPLUS_COMPILER = c++ CPLUSPLUS_FLAGS = $(COMPILE_OPTS) -Wall -DBSD=1 OBJ = o LINK = c++ -o LINK_OPTS = -L. CONSOLE_LINK_OPTS = $(LINK_OPTS) LIBRARY_LINK = ld -o LIBRARY_LINK_OPTS = $(LINK_OPTS) -r -Bstatic LIB_SUFFIX = a LIBS_FOR_CONSOLE_APPLICATION = LIBS_FOR_GUI_APPLICATION = EXE = live/config.cris-axis-linux-gnu000444 001751 000000 00000001603 12265042525 016757 0ustar00rsfwheel000000 000000 # Note: AXIS_TOP_DIR is assumed to already be set in your environment. # You can set this using the "init_env" script. # See http://developer.axis.com/doc/software/apps/apps-howto.html # for more information. AXIS_DIR = $(AXIS_TOP_DIR)/target/cris-axis-linux-gnu COMPILE_OPTS = $(INCLUDES) -I. -mlinux -isystem $(AXIS_DIR)/include -Wall -O2 -DSOCKLEN_T=socklen_t -DCRIS -D_LARGEFILE_SOURCE=1 -D_FILE_OFFSET_BITS=64 C = c C_COMPILER = gcc-cris C_FLAGS = $(COMPILE_OPTS) CPP = cpp CPLUSPLUS_COMPILER = c++-cris CPLUSPLUS_FLAGS = $(COMPILE_OPTS) -Wno-ctor-dtor-privacy -ansi -pipe OBJ = o LINK = c++-cris -static -o AXIS_LINK_OPTS = -L$(AXIS_DIR)/lib LINK_OPTS = -L. CONSOLE_LINK_OPTS = $(LINK_OPTS) -L$(AXIS_DIR)/lib -mlinux LIBRARY_LINK = ld-cris -mcrislinux -o LIBRARY_LINK_OPTS = $(LINK_OPTS) -r -Bstatic LIB_SUFFIX = a LIBS_FOR_CONSOLE_APPLICATION = LIBS_FOR_GUI_APPLICATION = EXE = live/config.bsplinux000444 001751 000000 00000001311 12265042525 014771 0ustar00rsfwheel000000 000000 CROSS_COMPILE= COMPILE_OPTS = $(INCLUDES) -I. -O2 -DSOCKLEN_T=socklen_t -DNO_SSTREAM=1 -D_LARGEFILE_SOURCE=1 -D_FILE_OFFSET_BITS=64 C = c C_COMPILER = $(CROSS_COMPILE)ecc C_FLAGS = $(COMPILE_OPTS) CPP = cpp CPLUSPLUS_COMPILER = $(CROSS_COMPILE)e++ CPLUSPLUS_FLAGS = $(COMPILE_OPTS) -Wall -DBSD=1 OBJ = o LINK = $(CROSS_COMPILE)e++ -o LINK_OPTS = -L. CONSOLE_LINK_OPTS = $(LINK_OPTS) LIBRARY_LINK = $(CROSS_COMPILE)eld -o LIBRARY_LINK_OPTS = $(LINK_OPTS) -r -Bstatic LIB_SUFFIX = a LIBS_FOR_CONSOLE_APPLICATION = -lm LIBS_FOR_GUI_APPLICATION = EXE = live/config.bfin-uclinux000444 001751 000000 00000001213 12265042525 015531 0ustar00rsfwheel000000 000000 CROSS_COMPILER= bfin-uclinux- COMPILE_OPTS = $(INCLUDES) -I. -DSOCKLEN_T=socklen_t -D_LARGEFILE_SOURCE=1 -DUCLINUX -D_FILE_OFFSET_BITS=64 C = c C_COMPILER = $(CROSS_COMPILER)gcc C_FLAGS = $(COMPILE_OPTS) -Wall CPP = cpp CPLUSPLUS_COMPILER = $(CROSS_COMPILER)g++ CPLUSPLUS_FLAGS = $(COMPILE_OPTS) -Wall OBJ = o LINK = $(CROSS_COMPILER)g++ -Wl,-elf2flt -o LINK_OPTS = -L. CONSOLE_LINK_OPTS = $(LINK_OPTS) LIBRARY_LINK = $(CROSS_COMPILER)ar cr LIBRARY_LINK_OPTS = LIB_SUFFIX = a LIBS_FOR_CONSOLE_APPLICATION = LIBS_FOR_GUI_APPLICATION = EXE = live/config.bfin-linux-uclibc000444 001751 000000 00000001215 12265042525 016442 0ustar00rsfwheel000000 000000 CROSS_COMPILER = bfin-linux-uclibc- COMPILE_OPTS = $(INCLUDES) -I. -DSOCKLEN_T=socklen_t -D_LARGEFILE_SOURCE=1 -DUCLINUX -D_FILE_OFFSET_BITS=64 C = c C_COMPILER = $(CROSS_COMPILER)gcc C_FLAGS = $(COMPILE_OPTS) -Wall CPP = cpp CPLUSPLUS_COMPILER = $(CROSS_COMPILER)g++ CPLUSPLUS_FLAGS = $(COMPILE_OPTS) -Wall OBJ = o LINK = $(CROSS_COMPILER)g++ -o LINK_OPTS = -L. CONSOLE_LINK_OPTS = $(LINK_OPTS) LIBRARY_LINK = $(CROSS_COMPILER)ar cr LIBRARY_LINK_OPTS = LIB_SUFFIX = a LIBS_FOR_CONSOLE_APPLICATION = LIBS_FOR_GUI_APPLICATION = EXE = live/config.avr32-linux000444 001751 000000 00000001263 12265042525 015225 0ustar00rsfwheel000000 000000 CROSS_COMPILE= avr32-linux-uclibc- COMPILE_OPTS = -Os $(INCLUDES) -msoft-float -D_LARGEFILE_SOURCE -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -DSOCKLEN_T=socklen_t -DNO_SSTREAM=1 C = c C_COMPILER = $(CROSS_COMPILE)gcc C_FLAGS = $(COMPILE_OPTS) CPP = cpp CPLUSPLUS_COMPILER = $(CROSS_COMPILE)c++ CPLUSPLUS_FLAGS = $(COMPILE_OPTS) -Wall -fuse-cxa-atexit -DBSD=1 OBJ = o LINK = $(CROSS_COMPILE)c++ -o LINK_OPTS = CONSOLE_LINK_OPTS = $(LINK_OPTS) LIBRARY_LINK = $(CROSS_COMPILE)ar cr LIBRARY_LINK_OPTS = LIB_SUFFIX = a LIBS_FOR_CONSOLE_APPLICATION = LIBS_FOR_GUI_APPLICATION = EXE = live/config.armlinux000444 001751 000000 00000001054 12265042525 014770 0ustar00rsfwheel000000 000000 CROSS_COMPILE?= arm-elf- COMPILE_OPTS = $(INCLUDES) -I. -O2 -DSOCKLEN_T=socklen_t -DNO_SSTREAM=1 -D_LARGEFILE_SOURCE=1 -D_FILE_OFFSET_BITS=64 C = c C_COMPILER = $(CROSS_COMPILE)gcc C_FLAGS = $(COMPILE_OPTS) CPP = cpp CPLUSPLUS_COMPILER = $(CROSS_COMPILE)g++ CPLUSPLUS_FLAGS = $(COMPILE_OPTS) -Wall -DBSD=1 OBJ = o LINK = $(CROSS_COMPILE)g++ -o LINK_OPTS = CONSOLE_LINK_OPTS = $(LINK_OPTS) LIBRARY_LINK = $(CROSS_COMPILE)ar cr LIBRARY_LINK_OPTS = $(LINK_OPTS) LIB_SUFFIX = a LIBS_FOR_CONSOLE_APPLICATION = LIBS_FOR_GUI_APPLICATION = EXE = live/config.armeb-uclibc000444 001751 000000 00000001274 12265042525 015462 0ustar00rsfwheel000000 000000 CROSS_COMPILE= armeb-linux-uclibc- COMPILE_OPTS = $(INCLUDES) -I. -Os -DSOCKLEN_T=socklen_t -DNO_SSTREAM=1 -D LARGEFILE_SOURCE=1 -D_FILE_OFFSET_BITS=64 C = c C_COMPILER = $(CROSS_COMPILE)gcc C_FLAGS = $(COMPILE_OPTS) CPP = cpp CPLUSPLUS_COMPILER = $(CROSS_COMPILE)g++ CPLUSPLUS_FLAGS = $(COMPILE_OPTS) -Wall -DBSD=1 OBJ = o LINK = $(CROSS_COMPILE)gcc -o LINK_OPTS = -L. CONSOLE_LINK_OPTS = $(LINK_OPTS) LIBRARY_LINK = $(CROSS_COMPILE)ar cr LIBRARY_LINK_OPTS = LIB_SUFFIX = a LIBS_FOR_CONSOLE_APPLICATION = LIBS_FOR_GUI_APPLICATION = EXE = live/config.alpha000444 001751 000000 00000000655 12265042525 014224 0ustar00rsfwheel000000 000000 COMPILE_OPTS = $(INCLUDES) -I. -O -DTIME_BASE=int C = c C_COMPILER = cc C_FLAGS = $(COMPILE_OPTS) -DALPHA CPP = cpp CPLUSPLUS_COMPILER = c++ CPLUSPLUS_FLAGS = $(COMPILE_OPTS) -Wall -DBSD=1 -DALPHA OBJ = o LINK = c++ -o LINK_OPTS = -L. CONSOLE_LINK_OPTS = $(LINK_OPTS) LIBRARY_LINK = ld -o LIBRARY_LINK_OPTS = $(LINK_OPTS) -r -B static LIB_SUFFIX = a LIBS_FOR_CONSOLE_APPLICATION = LIBS_FOR_GUI_APPLICATION = EXE = live/README000444 001751 000000 00000000147 12265042525 012624 0ustar00rsfwheel000000 000000 For documentation and instructions for building this software, see live/Makefile.head000444 001751 000000 00000000061 12265042525 014277 0ustar00rsfwheel000000 000000 ##### Change the following for your environment: live/Makefile.tail000444 001751 000000 00000002553 12265042525 014337 0ustar00rsfwheel000000 000000 ##### End of variables to change LIVEMEDIA_DIR = liveMedia GROUPSOCK_DIR = groupsock USAGE_ENVIRONMENT_DIR = UsageEnvironment BASIC_USAGE_ENVIRONMENT_DIR = BasicUsageEnvironment TESTPROGS_DIR = testProgs MEDIA_SERVER_DIR = mediaServer PROXY_SERVER_DIR = proxyServer all: cd $(LIVEMEDIA_DIR) ; $(MAKE) cd $(GROUPSOCK_DIR) ; $(MAKE) cd $(USAGE_ENVIRONMENT_DIR) ; $(MAKE) cd $(BASIC_USAGE_ENVIRONMENT_DIR) ; $(MAKE) cd $(TESTPROGS_DIR) ; $(MAKE) cd $(MEDIA_SERVER_DIR) ; $(MAKE) cd $(PROXY_SERVER_DIR) ; $(MAKE) install: cd $(LIVEMEDIA_DIR) ; $(MAKE) install cd $(GROUPSOCK_DIR) ; $(MAKE) install cd $(USAGE_ENVIRONMENT_DIR) ; $(MAKE) install cd $(BASIC_USAGE_ENVIRONMENT_DIR) ; $(MAKE) install cd $(TESTPROGS_DIR) ; $(MAKE) install cd $(MEDIA_SERVER_DIR) ; $(MAKE) install cd $(PROXY_SERVER_DIR) ; $(MAKE) install clean: cd $(LIVEMEDIA_DIR) ; $(MAKE) clean cd $(GROUPSOCK_DIR) ; $(MAKE) clean cd $(USAGE_ENVIRONMENT_DIR) ; $(MAKE) clean cd $(BASIC_USAGE_ENVIRONMENT_DIR) ; $(MAKE) clean cd $(TESTPROGS_DIR) ; $(MAKE) clean cd $(MEDIA_SERVER_DIR) ; $(MAKE) clean cd $(PROXY_SERVER_DIR) ; $(MAKE) clean distclean: clean -rm -f $(LIVEMEDIA_DIR)/Makefile $(GROUPSOCK_DIR)/Makefile \ $(USAGE_ENVIRONMENT_DIR)/Makefile $(BASIC_USAGE_ENVIRONMENT_DIR)/Makefile \ $(TESTPROGS_DIR)/Makefile $(MEDIA_SERVER_DIR)/Makefile \ $(PROXY_SERVER_DIR)/Makefile Makefile live/fix-makefile000555 001751 000000 00000001043 12265042525 014227 0ustar00rsfwheel000000 000000 #!/bin/sh # the next line restarts using tclsh \ exec tclsh8.4 "$0" "$@" set makefileName [lindex $argv 0] set tmpfileName /tmp/rsftmp set inFid [open $makefileName r] set outFid [open $tmpfileName w] while {![eof $inFid]} { set line [gets $inFid] if {[string match *\)\$* $line]} { set pos [string first \)\$ $line] set prefix [string range $line 0 $pos] incr pos set suffix [string range $line $pos end] set line $prefix\ $suffix } puts $outFid $line } close $inFid close $outFid file rename -force $tmpfileName $makefileName live/genWindowsMakefiles.cmd000444 001751 000000 00000001531 12265042525 016374 0ustar00rsfwheel000000 000000 @Echo OFF SETLOCAL for %%I in (%0) do %%~dI for %%I in (%0) do cd "%%~pI" cd liveMedia del /Q liveMedia.mak type Makefile.head ..\win32config Makefile.tail > liveMedia.mak cd ../groupsock del /Q groupsock.mak type Makefile.head ..\win32config Makefile.tail > groupsock.mak cd ../UsageEnvironment del /Q UsageEnvironment.mak type Makefile.head ..\win32config Makefile.tail > UsageEnvironment.mak cd ../BasicUsageEnvironment del /Q BasicUsageEnvironment.mak type Makefile.head ..\win32config Makefile.tail > BasicUsageEnvironment.mak cd ../testProgs del /Q testProgs.mak type Makefile.head ..\win32config Makefile.tail > testProgs.mak cd ../mediaServer del /Q mediaServer.mak type Makefile.head ..\win32config Makefile.tail > mediaServer.mak cd ../proxyServer del /Q proxyServer.mak type Makefile.head ..\win32config Makefile.tail > proxyServer.mak ENDLOCAL live/genWindowsMakefiles000555 001751 000000 00000001667 12265042525 015647 0ustar00rsfwheel000000 000000 #!/bin/sh cd liveMedia /bin/rm -f liveMedia.mak /bin/rm -f Makefile cat Makefile.head ../win32config Makefile.tail > liveMedia.mak cd ../groupsock /bin/rm -f groupsock.mak /bin/rm -f Makefile cat Makefile.head ../win32config Makefile.tail > groupsock.mak cd ../UsageEnvironment /bin/rm -f UsageEnvironment.mak /bin/rm -f Makefile cat Makefile.head ../win32config Makefile.tail > UsageEnvironment.mak cd ../BasicUsageEnvironment /bin/rm -f BasicUsageEnvironment.mak /bin/rm -f Makefile cat Makefile.head ../win32config Makefile.tail > BasicUsageEnvironment.mak cd ../testProgs /bin/rm -f testProgs.mak /bin/rm -f Makefile cat Makefile.head ../win32config Makefile.tail > testProgs.mak cd ../mediaServer /bin/rm -f mediaServer.mak /bin/rm -f Makefile cat Makefile.head ../win32config Makefile.tail > mediaServer.mak cd ../proxyServer /bin/rm -f proxyServer.mak /bin/rm -f Makefile cat Makefile.head ../win32config Makefile.tail > proxyServer.mak live/win32config.Borland000444 001751 000000 00000002616 12265042525 015402 0ustar00rsfwheel000000 000000 # Comment out the following line to produce Makefiles that generate debuggable code: NODEBUG=1 # The following definition ensures that we are properly matching # the WinSock2 library file with the correct header files. # (will link with "ws2_32.lib" and include "winsock2.h" & "Ws2tcpip.h") TARGETOS = WINNT # If for some reason you wish to use WinSock1 instead, uncomment the # following two definitions. # (will link with "wsock32.lib" and include "winsock.h") #TARGETOS = WIN95 #APPVER = 4.0 #!include UI_OPTS = $(guilflags) $(guilibsdll) # Use the following to get a console (e.g., for debugging): CONSOLE_UI_OPTS = $(conlflags) $(conlibsdll) CPU=i386 TOOLS32 = C:\Progra~1\Borland\CBuilder5 COMPILE_OPTS = $(INCLUDES) $(cdebug) $(cflags) $(cvarsdll) -I. -I$(TOOLS32)\include C = c C_COMPILER = $(TOOLS32)\bin\bcc32 C_FLAGS = $(COMPILE_OPTS) CPP = cpp CPLUSPLUS_COMPILER = $(C_COMPILER) CPLUSPLUS_FLAGS = $(COMPILE_OPTS) OBJ = obj LINK = $(TOOLS32)\bin\ilink32 LIBRARY_LINK = $(TOOLS32)\bin\tlib LINK_OPTS_0 = $(linkdebug) msvcirt.lib LIBRARY_LINK_OPTS = /u LINK_OPTS = $(LINK_OPTS_0) $(UI_OPTS) CONSOLE_LINK_OPTS = c0x32 SERVICE_LINK_OPTS = kernel32.lib advapi32.lib shell32.lib -subsystem:console,$(APPVER) LIB_SUFFIX = lib LIBS_FOR_CONSOLE_APPLICATION = cw32.lib import32.lib LIBS_FOR_GUI_APPLICATION = ,,cw32 EXE = rc32 = $(TOOLS32)\bin\brc32" .rc.res: $(rc32) $< live/win32config000444 001751 000000 00000002650 12265042525 014020 0ustar00rsfwheel000000 000000 # Comment out the following line to produce Makefiles that generate debuggable code: NODEBUG=1 # The following definition ensures that we are properly matching # the WinSock2 library file with the correct header files. # (will link with "ws2_32.lib" and include "winsock2.h" & "Ws2tcpip.h") TARGETOS = WINNT # If for some reason you wish to use WinSock1 instead, uncomment the # following two definitions. # (will link with "wsock32.lib" and include "winsock.h") #TARGETOS = WIN95 #APPVER = 4.0 !include UI_OPTS = $(guilflags) $(guilibsdll) # Use the following to get a console (e.g., for debugging): CONSOLE_UI_OPTS = $(conlflags) $(conlibsdll) CPU=i386 TOOLS32 = c:\Program Files\DevStudio\Vc COMPILE_OPTS = $(INCLUDES) $(cdebug) $(cflags) $(cvarsdll) -I. -I"$(TOOLS32)\include" C = c C_COMPILER = "$(TOOLS32)\bin\cl" C_FLAGS = $(COMPILE_OPTS) CPP = cpp CPLUSPLUS_COMPILER = $(C_COMPILER) CPLUSPLUS_FLAGS = $(COMPILE_OPTS) OBJ = obj LINK = $(link) -out: LIBRARY_LINK = lib -out: LINK_OPTS_0 = $(linkdebug) msvcirt.lib LIBRARY_LINK_OPTS = LINK_OPTS = $(LINK_OPTS_0) $(UI_OPTS) CONSOLE_LINK_OPTS = $(LINK_OPTS_0) $(CONSOLE_UI_OPTS) SERVICE_LINK_OPTS = kernel32.lib advapi32.lib shell32.lib -subsystem:console,$(APPVER) LIB_SUFFIX = lib LIBS_FOR_CONSOLE_APPLICATION = LIBS_FOR_GUI_APPLICATION = MULTIMEDIA_LIBS = winmm.lib EXE = .exe PLATFORM = Windows rc32 = "$(TOOLS32)\bin\rc" .rc.res: $(rc32) $< live/liveMedia/include/000755 001751 000000 00000000000 12265042432 015263 5ustar00rsfwheel000000 000000 live/liveMedia/RTPSource.cpp000444 001751 000000 00000031324 12265042432 016173 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // RTP Sources // Implementation #include "RTPSource.hh" #include "GroupsockHelper.hh" ////////// RTPSource ////////// Boolean RTPSource::lookupByName(UsageEnvironment& env, char const* sourceName, RTPSource*& resultSource) { resultSource = NULL; // unless we succeed MediaSource* source; if (!MediaSource::lookupByName(env, sourceName, source)) return False; if (!source->isRTPSource()) { env.setResultMsg(sourceName, " is not a RTP source"); return False; } resultSource = (RTPSource*)source; return True; } Boolean RTPSource::hasBeenSynchronizedUsingRTCP() { return fCurPacketHasBeenSynchronizedUsingRTCP; } Boolean RTPSource::isRTPSource() const { return True; } RTPSource::RTPSource(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, u_int32_t rtpTimestampFrequency) : FramedSource(env), fRTPInterface(this, RTPgs), fCurPacketHasBeenSynchronizedUsingRTCP(False), fLastReceivedSSRC(0), fRTPPayloadFormat(rtpPayloadFormat), fTimestampFrequency(rtpTimestampFrequency), fSSRC(our_random32()), fEnableRTCPReports(True) { fReceptionStatsDB = new RTPReceptionStatsDB(); } RTPSource::~RTPSource() { delete fReceptionStatsDB; } void RTPSource::getAttributes() const { envir().setResultMsg(""); // Fix later to get attributes from header ##### } ////////// RTPReceptionStatsDB ////////// RTPReceptionStatsDB::RTPReceptionStatsDB() : fTable(HashTable::create(ONE_WORD_HASH_KEYS)), fTotNumPacketsReceived(0) { reset(); } void RTPReceptionStatsDB::reset() { fNumActiveSourcesSinceLastReset = 0; Iterator iter(*this); RTPReceptionStats* stats; while ((stats = iter.next()) != NULL) { stats->reset(); } } RTPReceptionStatsDB::~RTPReceptionStatsDB() { // First, remove and delete all stats records from the table: RTPReceptionStats* stats; while ((stats = (RTPReceptionStats*)fTable->RemoveNext()) != NULL) { delete stats; } // Then, delete the table itself: delete fTable; } void RTPReceptionStatsDB ::noteIncomingPacket(u_int32_t SSRC, u_int16_t seqNum, u_int32_t rtpTimestamp, unsigned timestampFrequency, Boolean useForJitterCalculation, struct timeval& resultPresentationTime, Boolean& resultHasBeenSyncedUsingRTCP, unsigned packetSize) { ++fTotNumPacketsReceived; RTPReceptionStats* stats = lookup(SSRC); if (stats == NULL) { // This is the first time we've heard from this SSRC. // Create a new record for it: stats = new RTPReceptionStats(SSRC, seqNum); if (stats == NULL) return; add(SSRC, stats); } if (stats->numPacketsReceivedSinceLastReset() == 0) { ++fNumActiveSourcesSinceLastReset; } stats->noteIncomingPacket(seqNum, rtpTimestamp, timestampFrequency, useForJitterCalculation, resultPresentationTime, resultHasBeenSyncedUsingRTCP, packetSize); } void RTPReceptionStatsDB ::noteIncomingSR(u_int32_t SSRC, u_int32_t ntpTimestampMSW, u_int32_t ntpTimestampLSW, u_int32_t rtpTimestamp) { RTPReceptionStats* stats = lookup(SSRC); if (stats == NULL) { // This is the first time we've heard of this SSRC. // Create a new record for it: stats = new RTPReceptionStats(SSRC); if (stats == NULL) return; add(SSRC, stats); } stats->noteIncomingSR(ntpTimestampMSW, ntpTimestampLSW, rtpTimestamp); } void RTPReceptionStatsDB::removeRecord(u_int32_t SSRC) { RTPReceptionStats* stats = lookup(SSRC); if (stats != NULL) { long SSRC_long = (long)SSRC; fTable->Remove((char const*)SSRC_long); delete stats; } } RTPReceptionStatsDB::Iterator ::Iterator(RTPReceptionStatsDB& receptionStatsDB) : fIter(HashTable::Iterator::create(*(receptionStatsDB.fTable))) { } RTPReceptionStatsDB::Iterator::~Iterator() { delete fIter; } RTPReceptionStats* RTPReceptionStatsDB::Iterator::next(Boolean includeInactiveSources) { char const* key; // dummy // If asked, skip over any sources that haven't been active // since the last reset: RTPReceptionStats* stats; do { stats = (RTPReceptionStats*)(fIter->next(key)); } while (stats != NULL && !includeInactiveSources && stats->numPacketsReceivedSinceLastReset() == 0); return stats; } RTPReceptionStats* RTPReceptionStatsDB::lookup(u_int32_t SSRC) const { long SSRC_long = (long)SSRC; return (RTPReceptionStats*)(fTable->Lookup((char const*)SSRC_long)); } void RTPReceptionStatsDB::add(u_int32_t SSRC, RTPReceptionStats* stats) { long SSRC_long = (long)SSRC; fTable->Add((char const*)SSRC_long, stats); } ////////// RTPReceptionStats ////////// RTPReceptionStats::RTPReceptionStats(u_int32_t SSRC, u_int16_t initialSeqNum) { initSeqNum(initialSeqNum); init(SSRC); } RTPReceptionStats::RTPReceptionStats(u_int32_t SSRC) { init(SSRC); } RTPReceptionStats::~RTPReceptionStats() { } void RTPReceptionStats::init(u_int32_t SSRC) { fSSRC = SSRC; fTotNumPacketsReceived = 0; fTotBytesReceived_hi = fTotBytesReceived_lo = 0; fBaseExtSeqNumReceived = 0; fHighestExtSeqNumReceived = 0; fHaveSeenInitialSequenceNumber = False; fLastTransit = ~0; fPreviousPacketRTPTimestamp = 0; fJitter = 0.0; fLastReceivedSR_NTPmsw = fLastReceivedSR_NTPlsw = 0; fLastReceivedSR_time.tv_sec = fLastReceivedSR_time.tv_usec = 0; fLastPacketReceptionTime.tv_sec = fLastPacketReceptionTime.tv_usec = 0; fMinInterPacketGapUS = 0x7FFFFFFF; fMaxInterPacketGapUS = 0; fTotalInterPacketGaps.tv_sec = fTotalInterPacketGaps.tv_usec = 0; fHasBeenSynchronized = False; fSyncTime.tv_sec = fSyncTime.tv_usec = 0; reset(); } void RTPReceptionStats::initSeqNum(u_int16_t initialSeqNum) { fBaseExtSeqNumReceived = 0x10000 | initialSeqNum; fHighestExtSeqNumReceived = 0x10000 | initialSeqNum; fHaveSeenInitialSequenceNumber = True; } #ifndef MILLION #define MILLION 1000000 #endif void RTPReceptionStats ::noteIncomingPacket(u_int16_t seqNum, u_int32_t rtpTimestamp, unsigned timestampFrequency, Boolean useForJitterCalculation, struct timeval& resultPresentationTime, Boolean& resultHasBeenSyncedUsingRTCP, unsigned packetSize) { if (!fHaveSeenInitialSequenceNumber) initSeqNum(seqNum); ++fNumPacketsReceivedSinceLastReset; ++fTotNumPacketsReceived; u_int32_t prevTotBytesReceived_lo = fTotBytesReceived_lo; fTotBytesReceived_lo += packetSize; if (fTotBytesReceived_lo < prevTotBytesReceived_lo) { // wrap-around ++fTotBytesReceived_hi; } // Check whether the new sequence number is the highest yet seen: unsigned oldSeqNum = (fHighestExtSeqNumReceived&0xFFFF); unsigned seqNumCycle = (fHighestExtSeqNumReceived&0xFFFF0000); unsigned seqNumDifference = (unsigned)((int)seqNum-(int)oldSeqNum); unsigned newSeqNum = 0; if (seqNumLT((u_int16_t)oldSeqNum, seqNum)) { // This packet was not an old packet received out of order, so check it: if (seqNumDifference >= 0x8000) { // The sequence number wrapped around, so start a new cycle: seqNumCycle += 0x10000; } newSeqNum = seqNumCycle|seqNum; if (newSeqNum > fHighestExtSeqNumReceived) { fHighestExtSeqNumReceived = newSeqNum; } } else if (fTotNumPacketsReceived > 1) { // This packet was an old packet received out of order if ((int)seqNumDifference >= 0x8000) { // The sequence number wrapped around, so switch to an old cycle: seqNumCycle -= 0x10000; } newSeqNum = seqNumCycle|seqNum; if (newSeqNum < fBaseExtSeqNumReceived) { fBaseExtSeqNumReceived = newSeqNum; } } // Record the inter-packet delay struct timeval timeNow; gettimeofday(&timeNow, NULL); if (fLastPacketReceptionTime.tv_sec != 0 || fLastPacketReceptionTime.tv_usec != 0) { unsigned gap = (timeNow.tv_sec - fLastPacketReceptionTime.tv_sec)*MILLION + timeNow.tv_usec - fLastPacketReceptionTime.tv_usec; if (gap > fMaxInterPacketGapUS) { fMaxInterPacketGapUS = gap; } if (gap < fMinInterPacketGapUS) { fMinInterPacketGapUS = gap; } fTotalInterPacketGaps.tv_usec += gap; if (fTotalInterPacketGaps.tv_usec >= MILLION) { ++fTotalInterPacketGaps.tv_sec; fTotalInterPacketGaps.tv_usec -= MILLION; } } fLastPacketReceptionTime = timeNow; // Compute the current 'jitter' using the received packet's RTP timestamp, // and the RTP timestamp that would correspond to the current time. // (Use the code from appendix A.8 in the RTP spec.) // Note, however, that we don't use this packet if its timestamp is // the same as that of the previous packet (this indicates a multi-packet // fragment), or if we've been explicitly told not to use this packet. if (useForJitterCalculation && rtpTimestamp != fPreviousPacketRTPTimestamp) { unsigned arrival = (timestampFrequency*timeNow.tv_sec); arrival += (unsigned) ((2.0*timestampFrequency*timeNow.tv_usec + 1000000.0)/2000000); // note: rounding int transit = arrival - rtpTimestamp; if (fLastTransit == (~0)) fLastTransit = transit; // hack for first time int d = transit - fLastTransit; fLastTransit = transit; if (d < 0) d = -d; fJitter += (1.0/16.0) * ((double)d - fJitter); } // Return the 'presentation time' that corresponds to "rtpTimestamp": if (fSyncTime.tv_sec == 0 && fSyncTime.tv_usec == 0) { // This is the first timestamp that we've seen, so use the current // 'wall clock' time as the synchronization time. (This will be // corrected later when we receive RTCP SRs.) fSyncTimestamp = rtpTimestamp; fSyncTime = timeNow; } int timestampDiff = rtpTimestamp - fSyncTimestamp; // Note: This works even if the timestamp wraps around // (as long as "int" is 32 bits) // Divide this by the timestamp frequency to get real time: double timeDiff = timestampDiff/(double)timestampFrequency; // Add this to the 'sync time' to get our result: unsigned const million = 1000000; unsigned seconds, uSeconds; if (timeDiff >= 0.0) { seconds = fSyncTime.tv_sec + (unsigned)(timeDiff); uSeconds = fSyncTime.tv_usec + (unsigned)((timeDiff - (unsigned)timeDiff)*million); if (uSeconds >= million) { uSeconds -= million; ++seconds; } } else { timeDiff = -timeDiff; seconds = fSyncTime.tv_sec - (unsigned)(timeDiff); uSeconds = fSyncTime.tv_usec - (unsigned)((timeDiff - (unsigned)timeDiff)*million); if ((int)uSeconds < 0) { uSeconds += million; --seconds; } } resultPresentationTime.tv_sec = seconds; resultPresentationTime.tv_usec = uSeconds; resultHasBeenSyncedUsingRTCP = fHasBeenSynchronized; // Save these as the new synchronization timestamp & time: fSyncTimestamp = rtpTimestamp; fSyncTime = resultPresentationTime; fPreviousPacketRTPTimestamp = rtpTimestamp; } void RTPReceptionStats::noteIncomingSR(u_int32_t ntpTimestampMSW, u_int32_t ntpTimestampLSW, u_int32_t rtpTimestamp) { fLastReceivedSR_NTPmsw = ntpTimestampMSW; fLastReceivedSR_NTPlsw = ntpTimestampLSW; gettimeofday(&fLastReceivedSR_time, NULL); // Use this SR to update time synchronization information: fSyncTimestamp = rtpTimestamp; fSyncTime.tv_sec = ntpTimestampMSW - 0x83AA7E80; // 1/1/1900 -> 1/1/1970 double microseconds = (ntpTimestampLSW*15625.0)/0x04000000; // 10^6/2^32 fSyncTime.tv_usec = (unsigned)(microseconds+0.5); fHasBeenSynchronized = True; } double RTPReceptionStats::totNumKBytesReceived() const { double const hiMultiplier = 0x20000000/125.0; // == (2^32)/(10^3) return fTotBytesReceived_hi*hiMultiplier + fTotBytesReceived_lo/1000.0; } unsigned RTPReceptionStats::jitter() const { return (unsigned)fJitter; } void RTPReceptionStats::reset() { fNumPacketsReceivedSinceLastReset = 0; fLastResetExtSeqNumReceived = fHighestExtSeqNumReceived; } Boolean seqNumLT(u_int16_t s1, u_int16_t s2) { // a 'less-than' on 16-bit sequence numbers int diff = s2-s1; if (diff > 0) { return (diff < 0x8000); } else if (diff < 0) { return (diff < -0x8000); } else { // diff == 0 return False; } } live/liveMedia/SimpleRTPSource.cpp000444 001751 000000 00000004643 12265042432 017351 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A RTP source for a simple RTP payload format that // - doesn't have any special headers following the RTP header // - doesn't have any special framing apart from the packet data itself // Implementation #include "SimpleRTPSource.hh" #include SimpleRTPSource* SimpleRTPSource::createNew(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, unsigned rtpTimestampFrequency, char const* mimeTypeString, unsigned offset, Boolean doNormalMBitRule) { return new SimpleRTPSource(env, RTPgs, rtpPayloadFormat, rtpTimestampFrequency, mimeTypeString, offset, doNormalMBitRule); } SimpleRTPSource ::SimpleRTPSource(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, unsigned rtpTimestampFrequency, char const* mimeTypeString, unsigned offset, Boolean doNormalMBitRule) : MultiFramedRTPSource(env, RTPgs, rtpPayloadFormat, rtpTimestampFrequency), fMIMEtypeString(strDup(mimeTypeString)), fOffset(offset) { fUseMBitForFrameEnd = doNormalMBitRule && strncmp(mimeTypeString, "audio/", 6) != 0; } SimpleRTPSource::~SimpleRTPSource() { delete[] (char*)fMIMEtypeString; } Boolean SimpleRTPSource ::processSpecialHeader(BufferedPacket* packet, unsigned& resultSpecialHeaderSize) { fCurrentPacketCompletesFrame = !fUseMBitForFrameEnd || packet->rtpMarkerBit(); resultSpecialHeaderSize = fOffset; return True; } char const* SimpleRTPSource::MIMEtype() const { if (fMIMEtypeString == NULL) return MultiFramedRTPSource::MIMEtype(); return fMIMEtypeString; } live/liveMedia/MP3Transcoder.cpp000444 001751 000000 00000003455 12265042432 016775 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // MP3 Transcoder // Implementation #include "MP3Transcoder.hh" MP3Transcoder::MP3Transcoder(UsageEnvironment& env, MP3ADUTranscoder* aduTranscoder) : MP3FromADUSource(env, aduTranscoder, False) { } MP3Transcoder::~MP3Transcoder() { } MP3Transcoder* MP3Transcoder::createNew(UsageEnvironment& env, unsigned outBitrate /* in kbps */, FramedSource* inputSource) { MP3Transcoder* newSource = NULL; do { // Create the intermediate filters that help implement the transcoder: ADUFromMP3Source* aduFromMP3 = ADUFromMP3Source::createNew(env, inputSource, False); // Note: This also checks that "inputSource" is an MP3 source if (aduFromMP3 == NULL) break; MP3ADUTranscoder* aduTranscoder = MP3ADUTranscoder::createNew(env, outBitrate, aduFromMP3); if (aduTranscoder == NULL) break; // Then create the transcoder itself: newSource = new MP3Transcoder(env, aduTranscoder); } while (0); return newSource; } live/liveMedia/SimpleRTPSink.cpp000444 001751 000000 00000006733 12265042432 017017 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A simple RTP sink that packs frames into each outgoing // packet, without any fragmentation or special headers. // Implementation #include "SimpleRTPSink.hh" SimpleRTPSink::SimpleRTPSink(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, unsigned rtpTimestampFrequency, char const* sdpMediaTypeString, char const* rtpPayloadFormatName, unsigned numChannels, Boolean allowMultipleFramesPerPacket, Boolean doNormalMBitRule) : MultiFramedRTPSink(env, RTPgs, rtpPayloadFormat, rtpTimestampFrequency, rtpPayloadFormatName, numChannels), fAllowMultipleFramesPerPacket(allowMultipleFramesPerPacket), fSetMBitOnNextPacket(False) { fSDPMediaTypeString = strDup(sdpMediaTypeString == NULL ? "unknown" : sdpMediaTypeString); fSetMBitOnLastFrames = doNormalMBitRule && strcmp(fSDPMediaTypeString, "audio") != 0; } SimpleRTPSink::~SimpleRTPSink() { delete[] (char*)fSDPMediaTypeString; } SimpleRTPSink* SimpleRTPSink::createNew(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, unsigned rtpTimestampFrequency, char const* sdpMediaTypeString, char const* rtpPayloadFormatName, unsigned numChannels, Boolean allowMultipleFramesPerPacket, Boolean doNormalMBitRule) { return new SimpleRTPSink(env, RTPgs, rtpPayloadFormat, rtpTimestampFrequency, sdpMediaTypeString, rtpPayloadFormatName, numChannels, allowMultipleFramesPerPacket, doNormalMBitRule); } void SimpleRTPSink::doSpecialFrameHandling(unsigned fragmentationOffset, unsigned char* frameStart, unsigned numBytesInFrame, struct timeval framePresentationTime, unsigned numRemainingBytes) { if (numRemainingBytes == 0) { // This packet contains the last (or only) fragment of the frame. // Set the RTP 'M' ('marker') bit, if appropriate: if (fSetMBitOnLastFrames) setMarkerBit(); } if (fSetMBitOnNextPacket) { // An external object has asked for the 'M' bit to be set on the next packet: setMarkerBit(); fSetMBitOnNextPacket = False; } // Important: Also call our base class's doSpecialFrameHandling(), // to set the packet's timestamp: MultiFramedRTPSink::doSpecialFrameHandling(fragmentationOffset, frameStart, numBytesInFrame, framePresentationTime, numRemainingBytes); } Boolean SimpleRTPSink:: frameCanAppearAfterPacketStart(unsigned char const* /*frameStart*/, unsigned /*numBytesInFrame*/) const { return fAllowMultipleFramesPerPacket; } char const* SimpleRTPSink::sdpMediaType() const { return fSDPMediaTypeString; } live/liveMedia/rtcp_from_spec.c000444 001751 000000 00000024452 12265042432 017016 0ustar00rsfwheel000000 000000 /* RTCP code taken directly from the most recent RTP specification: * RFC 3550 * Implementation */ #include "rtcp_from_spec.h" /***** A.7 Computing the RTCP Transmission Interval The following functions implement the RTCP transmission and reception rules described in Section 6.2. These rules are coded in several functions: o rtcp_interval() computes the deterministic calculated interval, measured in seconds. The parameters are defined in Section 6.3. o OnExpire() is called when the RTCP transmission timer expires. o OnReceive() is called whenever an RTCP packet is received. Both OnExpire() and OnReceive() have event e as an argument. This is the next scheduled event for that participant, either an RTCP report or a BYE packet. It is assumed that the following functions are available: o Schedule(time t, event e) schedules an event e to occur at time t. When time t arrives, the function OnExpire is called with e as an argument. o Reschedule(time t, event e) reschedules a previously scheduled event e for time t. o SendRTCPReport(event e) sends an RTCP report. o SendBYEPacket(event e) sends a BYE packet. o TypeOfEvent(event e) returns EVENT_BYE if the event being processed is for a BYE packet to be sent, else it returns EVENT_REPORT. o PacketType(p) returns PACKET_RTCP_REPORT if packet p is an RTCP report (not BYE), PACKET_BYE if its a BYE RTCP packet, and PACKET_RTP if its a regular RTP data packet. o ReceivedPacketSize() and SentPacketSize() return the size of the referenced packet in octets. o NewMember(p) returns a 1 if the participant who sent packet p is not currently in the member list, 0 otherwise. Note this function is not sufficient for a complete implementation because each CSRC identifier in an RTP packet and each SSRC in a BYE packet should be processed. o NewSender(p) returns a 1 if the participant who sent packet p is not currently in the sender sublist of the member list, 0 otherwise. o AddMember() and RemoveMember() to add and remove participants from the member list. o AddSender() and RemoveSender() to add and remove participants from the sender sublist of the member list. *****/ double rtcp_interval(int members, int senders, double rtcp_bw, int we_sent, double avg_rtcp_size, int initial) { /* * Minimum average time between RTCP packets from this site (in * seconds). This time prevents the reports from `clumping' when * sessions are small and the law of large numbers isn't helping * to smooth out the traffic. It also keeps the report interval * from becoming ridiculously small during transient outages like * a network partition. */ double const RTCP_MIN_TIME = 5.; /* * Fraction of the RTCP bandwidth to be shared among active * senders. (This fraction was chosen so that in a typical * session with one or two active senders, the computed report * time would be roughly equal to the minimum report time so that * we don't unnecessarily slow down receiver reports.) The * receiver fraction must be 1 - the sender fraction. */ double const RTCP_SENDER_BW_FRACTION = 0.25; double const RTCP_RCVR_BW_FRACTION = (1-RTCP_SENDER_BW_FRACTION); /* * To compensate for "unconditional reconsideration" converging to a * value below the intended average. */ double const COMPENSATION = 2.71828 - 1.5; double t; /* interval */ double rtcp_min_time = RTCP_MIN_TIME; int n; /* no. of members for computation */ /* * Very first call at application start-up uses half the min * delay for quicker notification while still allowing some time * before reporting for randomization and to learn about other * sources so the report interval will converge to the correct * interval more quickly. */ if (initial) { rtcp_min_time /= 2; } /* * If there were active senders, give them at least a minimum * share of the RTCP bandwidth. Otherwise all participants share * the RTCP bandwidth equally. */ n = members; if (senders > 0 && senders < members * RTCP_SENDER_BW_FRACTION) { if (we_sent) { rtcp_bw *= RTCP_SENDER_BW_FRACTION; n = senders; } else { rtcp_bw *= RTCP_RCVR_BW_FRACTION; n -= senders; } } /* * The effective number of sites times the average packet size is * the total number of octets sent when each site sends a report. * Dividing this by the effective bandwidth gives the time * interval over which those packets must be sent in order to * meet the bandwidth target, with a minimum enforced. In that * time interval we send one report so this time is also our * average time between reports. */ t = avg_rtcp_size * n / rtcp_bw; if (t < rtcp_min_time) t = rtcp_min_time; /* * To avoid traffic bursts from unintended synchronization with * other sites, we then pick our actual next report interval as a * random number uniformly distributed between 0.5*t and 1.5*t. */ t = t * (drand48() + 0.5); t = t / COMPENSATION; return t; } void OnExpire(event e, int members, int senders, double rtcp_bw, int we_sent, double *avg_rtcp_size, int *initial, time_tp tc, time_tp *tp, int *pmembers) { /* This function is responsible for deciding whether to send * an RTCP report or BYE packet now, or to reschedule transmission. * It is also responsible for updating the pmembers, initial, tp, * and avg_rtcp_size state variables. This function should be called * upon expiration of the event timer used by Schedule(). */ double t; /* Interval */ double tn; /* Next transmit time */ /* In the case of a BYE, we use "unconditional reconsideration" to * reschedule the transmission of the BYE if necessary */ if (TypeOfEvent(e) == EVENT_BYE) { t = rtcp_interval(members, senders, rtcp_bw, we_sent, *avg_rtcp_size, *initial); tn = *tp + t; if (tn <= tc) { SendBYEPacket(e); exit(1); } else { Schedule(tn, e); } } else if (TypeOfEvent(e) == EVENT_REPORT) { t = rtcp_interval(members, senders, rtcp_bw, we_sent, *avg_rtcp_size, *initial); tn = *tp + t; if (tn <= tc) { SendRTCPReport(e); *avg_rtcp_size = (1./16.)*SentPacketSize(e) + (15./16.)*(*avg_rtcp_size); *tp = tc; /* We must redraw the interval. Don't reuse the one computed above, since its not actually distributed the same, as we are conditioned on it being small enough to cause a packet to be sent */ t = rtcp_interval(members, senders, rtcp_bw, we_sent, *avg_rtcp_size, *initial); Schedule(t+tc,e); *initial = 0; } else { Schedule(tn, e); } *pmembers = members; } } void OnReceive(packet p, event e, int *members, int *pmembers, int *senders, double *avg_rtcp_size, double *tp, double tc, double tn) { /* What we do depends on whether we have left the group, and * are waiting to send a BYE (TypeOfEvent(e) == EVENT_BYE) or * an RTCP report. p represents the packet that was just received. */ if (PacketType(p) == PACKET_RTCP_REPORT) { if (NewMember(p) && (TypeOfEvent(e) == EVENT_REPORT)) { AddMember(p); *members += 1; } *avg_rtcp_size = (1./16.)*ReceivedPacketSize(p) + (15./16.)*(*avg_rtcp_size); } else if (PacketType(p) == PACKET_RTP) { if (NewMember(p) && (TypeOfEvent(e) == EVENT_REPORT)) { AddMember(p); *members += 1; } if (NewSender(p) && (TypeOfEvent(e) == EVENT_REPORT)) { AddSender(p); *senders += 1; } } else if (PacketType(p) == PACKET_BYE) { *avg_rtcp_size = (1./16.)*ReceivedPacketSize(p) + (15./16.)*(*avg_rtcp_size); if (TypeOfEvent(e) == EVENT_REPORT) { if (NewSender(p) == FALSE) { RemoveSender(p); *senders -= 1; } if (NewMember(p) == FALSE) { RemoveMember(p); *members -= 1; } if(*members < *pmembers) { tn = tc + (((double) *members)/(*pmembers))*(tn - tc); *tp = tc - (((double) *members)/(*pmembers))*(tc - *tp); /* Reschedule the next report for time tn */ Reschedule(tn, e); *pmembers = *members; } } else if (TypeOfEvent(e) == EVENT_BYE) { *members += 1; } } } live/liveMedia/MP3ADU.cpp000444 001751 000000 00000050244 12265042432 015300 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // 'ADU' MP3 streams (for improved loss-tolerance) // Implementation #include "MP3ADU.hh" #include "MP3ADUdescriptor.hh" #include "MP3Internals.hh" #include #ifdef TEST_LOSS #include "GroupsockHelper.hh" #endif // Segment data structures, used in the implementation below: #define SegmentBufSize 2000 /* conservatively high */ class Segment { public: unsigned char buf[SegmentBufSize]; unsigned char* dataStart() { return &buf[descriptorSize]; } unsigned frameSize; // if it's a non-ADU frame unsigned dataHere(); // if it's a non-ADU frame unsigned descriptorSize; static unsigned const headerSize; unsigned sideInfoSize, aduSize; unsigned backpointer; struct timeval presentationTime; unsigned durationInMicroseconds; }; unsigned const Segment::headerSize = 4; #define SegmentQueueSize 20 class SegmentQueue { public: SegmentQueue(Boolean directionIsToADU, Boolean includeADUdescriptors) : fDirectionIsToADU(directionIsToADU), fIncludeADUdescriptors(includeADUdescriptors) { reset(); } Segment s[SegmentQueueSize]; unsigned headIndex() {return fHeadIndex;} Segment& headSegment() {return s[fHeadIndex];} unsigned nextFreeIndex() {return fNextFreeIndex;} Segment& nextFreeSegment() {return s[fNextFreeIndex];} Boolean isEmpty() {return isEmptyOrFull() && totalDataSize() == 0;} Boolean isFull() {return isEmptyOrFull() && totalDataSize() > 0;} static unsigned nextIndex(unsigned ix) {return (ix+1)%SegmentQueueSize;} static unsigned prevIndex(unsigned ix) {return (ix+SegmentQueueSize-1)%SegmentQueueSize;} unsigned totalDataSize() {return fTotalDataSize;} void enqueueNewSegment(FramedSource* inputSource, FramedSource* usingSource); Boolean dequeue(); Boolean insertDummyBeforeTail(unsigned backpointer); void reset() { fHeadIndex = fNextFreeIndex = fTotalDataSize = 0; } private: static void sqAfterGettingSegment(void* clientData, unsigned numBytesRead, unsigned numTruncatedBytes, struct timeval presentationTime, unsigned durationInMicroseconds); Boolean sqAfterGettingCommon(Segment& seg, unsigned numBytesRead); Boolean isEmptyOrFull() {return headIndex() == nextFreeIndex();} unsigned fHeadIndex, fNextFreeIndex, fTotalDataSize; // The following is used for asynchronous reads: FramedSource* fUsingSource; // This tells us whether the direction in which we're being used // is MP3->ADU, or vice-versa. (This flag is used for debugging output.) Boolean fDirectionIsToADU; // The following is true iff we're used to enqueue incoming // ADU frames, and these have an ADU descriptor in front Boolean fIncludeADUdescriptors; }; ////////// ADUFromMP3Source ////////// ADUFromMP3Source::ADUFromMP3Source(UsageEnvironment& env, FramedSource* inputSource, Boolean includeADUdescriptors) : FramedFilter(env, inputSource), fAreEnqueueingMP3Frame(False), fSegments(new SegmentQueue(True /* because we're MP3->ADU */, False /*no descriptors in incoming frames*/)), fIncludeADUdescriptors(includeADUdescriptors), fTotalDataSizeBeforePreviousRead(0), fScale(1), fFrameCounter(0) { } ADUFromMP3Source::~ADUFromMP3Source() { delete fSegments; } char const* ADUFromMP3Source::MIMEtype() const { return "audio/MPA-ROBUST"; } ADUFromMP3Source* ADUFromMP3Source::createNew(UsageEnvironment& env, FramedSource* inputSource, Boolean includeADUdescriptors) { // The source must be a MPEG audio source: if (strcmp(inputSource->MIMEtype(), "audio/MPEG") != 0) { env.setResultMsg(inputSource->name(), " is not an MPEG audio source"); return NULL; } return new ADUFromMP3Source(env, inputSource, includeADUdescriptors); } void ADUFromMP3Source::resetInput() { fSegments->reset(); } Boolean ADUFromMP3Source::setScaleFactor(int scale) { if (scale < 1) return False; fScale = scale; return True; } void ADUFromMP3Source::doGetNextFrame() { if (!fAreEnqueueingMP3Frame) { // Arrange to enqueue a new MP3 frame: fTotalDataSizeBeforePreviousRead = fSegments->totalDataSize(); fAreEnqueueingMP3Frame = True; fSegments->enqueueNewSegment(fInputSource, this); } else { // Deliver an ADU from a previously-read MP3 frame: fAreEnqueueingMP3Frame = False; if (!doGetNextFrame1()) { // An internal error occurred; act as if our source went away: FramedSource::handleClosure(this); } } } Boolean ADUFromMP3Source::doGetNextFrame1() { // First, check whether we have enough previously-read data to output an // ADU for the last-read MP3 frame: unsigned tailIndex; Segment* tailSeg; Boolean needMoreData; if (fSegments->isEmpty()) { needMoreData = True; tailSeg = NULL; tailIndex = 0; // unneeded, but stops compiler warnings } else { tailIndex = SegmentQueue::prevIndex(fSegments->nextFreeIndex()); tailSeg = &(fSegments->s[tailIndex]); needMoreData = fTotalDataSizeBeforePreviousRead < tailSeg->backpointer // bp points back too far || tailSeg->backpointer + tailSeg->dataHere() < tailSeg->aduSize; // not enough data } if (needMoreData) { // We don't have enough data to output an ADU from the last-read MP3 // frame, so need to read another one and try again: doGetNextFrame(); return True; } // Output an ADU from the tail segment: fFrameSize = tailSeg->headerSize+tailSeg->sideInfoSize+tailSeg->aduSize; fPresentationTime = tailSeg->presentationTime; fDurationInMicroseconds = tailSeg->durationInMicroseconds; unsigned descriptorSize = fIncludeADUdescriptors ? ADUdescriptor::computeSize(fFrameSize) : 0; #ifdef DEBUG fprintf(stderr, "m->a:outputting ADU %d<-%d, nbr:%d, sis:%d, dh:%d, (descriptor size: %d)\n", tailSeg->aduSize, tailSeg->backpointer, fFrameSize, tailSeg->sideInfoSize, tailSeg->dataHere(), descriptorSize); #endif if (descriptorSize + fFrameSize > fMaxSize) { envir() << "ADUFromMP3Source::doGetNextFrame1(): not enough room (" << descriptorSize + fFrameSize << ">" << fMaxSize << ")\n"; fFrameSize = 0; return False; } unsigned char* toPtr = fTo; // output the ADU descriptor: if (fIncludeADUdescriptors) { fFrameSize += ADUdescriptor::generateDescriptor(toPtr, fFrameSize); } // output header and side info: memmove(toPtr, tailSeg->dataStart(), tailSeg->headerSize + tailSeg->sideInfoSize); toPtr += tailSeg->headerSize + tailSeg->sideInfoSize; // go back to the frame that contains the start of our data: unsigned offset = 0; unsigned i = tailIndex; unsigned prevBytes = tailSeg->backpointer; while (prevBytes > 0) { i = SegmentQueue::prevIndex(i); unsigned dataHere = fSegments->s[i].dataHere(); if (dataHere < prevBytes) { prevBytes -= dataHere; } else { offset = dataHere - prevBytes; break; } } // dequeue any segments that we no longer need: while (fSegments->headIndex() != i) { fSegments->dequeue(); // we're done with it } unsigned bytesToUse = tailSeg->aduSize; while (bytesToUse > 0) { Segment& seg = fSegments->s[i]; unsigned char* fromPtr = &seg.dataStart()[seg.headerSize + seg.sideInfoSize + offset]; unsigned dataHere = seg.dataHere() - offset; unsigned bytesUsedHere = dataHere < bytesToUse ? dataHere : bytesToUse; memmove(toPtr, fromPtr, bytesUsedHere); bytesToUse -= bytesUsedHere; toPtr += bytesUsedHere; offset = 0; i = SegmentQueue::nextIndex(i); } if (fFrameCounter++%fScale == 0) { // Call our own 'after getting' function. Because we're not a 'leaf' // source, we can call this directly, without risking infinite recursion. afterGetting(this); } else { // Don't use this frame; get another one: doGetNextFrame(); } return True; } ////////// MP3FromADUSource ////////// MP3FromADUSource::MP3FromADUSource(UsageEnvironment& env, FramedSource* inputSource, Boolean includeADUdescriptors) : FramedFilter(env, inputSource), fAreEnqueueingADU(False), fSegments(new SegmentQueue(False /* because we're ADU->MP3 */, includeADUdescriptors)) { } MP3FromADUSource::~MP3FromADUSource() { delete fSegments; } char const* MP3FromADUSource::MIMEtype() const { return "audio/MPEG"; } MP3FromADUSource* MP3FromADUSource::createNew(UsageEnvironment& env, FramedSource* inputSource, Boolean includeADUdescriptors) { // The source must be an MP3 ADU source: if (strcmp(inputSource->MIMEtype(), "audio/MPA-ROBUST") != 0) { env.setResultMsg(inputSource->name(), " is not an MP3 ADU source"); return NULL; } return new MP3FromADUSource(env, inputSource, includeADUdescriptors); } void MP3FromADUSource::doGetNextFrame() { if (fAreEnqueueingADU) insertDummyADUsIfNecessary(); fAreEnqueueingADU = False; if (needToGetAnADU()) { // Before returning a frame, we must enqueue at least one ADU: #ifdef TEST_LOSS NOTE: This code no longer works, because it uses synchronous reads, which are no longer supported. static unsigned const framesPerPacket = 10; static unsigned const frameCount = 0; static Boolean packetIsLost; while (1) { if ((frameCount++)%framesPerPacket == 0) { packetIsLost = (our_random()%10 == 0); // simulate 10% packet loss ##### } if (packetIsLost) { // Read and discard the next input frame (that would be part of // a lost packet): Segment dummySegment; unsigned numBytesRead; struct timeval presentationTime; // (this works only if the source can be read synchronously) fInputSource->syncGetNextFrame(dummySegment.buf, sizeof dummySegment.buf, numBytesRead, presentationTime); } else { break; // from while (1) } } #endif fAreEnqueueingADU = True; fSegments->enqueueNewSegment(fInputSource, this); } else { // Return a frame now: generateFrameFromHeadADU(); // sets fFrameSize, fPresentationTime, and fDurationInMicroseconds // Call our own 'after getting' function. Because we're not a 'leaf' // source, we can call this directly, without risking infinite recursion. afterGetting(this); } } Boolean MP3FromADUSource::needToGetAnADU() { // Check whether we need to first enqueue a new ADU before we // can generate a frame for our head ADU. Boolean needToEnqueue = True; if (!fSegments->isEmpty()) { unsigned index = fSegments->headIndex(); Segment* seg = &(fSegments->headSegment()); int const endOfHeadFrame = (int) seg->dataHere(); unsigned frameOffset = 0; while (1) { int endOfData = frameOffset - seg->backpointer + seg->aduSize; if (endOfData >= endOfHeadFrame) { // We already have enough data to generate a frame needToEnqueue = False; break; } frameOffset += seg->dataHere(); index = SegmentQueue::nextIndex(index); if (index == fSegments->nextFreeIndex()) break; seg = &(fSegments->s[index]); } } return needToEnqueue; } void MP3FromADUSource::insertDummyADUsIfNecessary() { if (fSegments->isEmpty()) return; // shouldn't happen // The tail segment (ADU) is assumed to have been recently // enqueued. If its backpointer would overlap the data // of the previous ADU, then we need to insert one or more // empty, 'dummy' ADUs ahead of it. (This situation should occur // only if an intermediate ADU was lost.) unsigned tailIndex = SegmentQueue::prevIndex(fSegments->nextFreeIndex()); Segment* tailSeg = &(fSegments->s[tailIndex]); while (1) { unsigned prevADUend; // relative to the start of the new ADU if (fSegments->headIndex() != tailIndex) { // there is a previous segment unsigned prevIndex = SegmentQueue::prevIndex(tailIndex); Segment& prevSegment = fSegments->s[prevIndex]; prevADUend = prevSegment.dataHere() + prevSegment.backpointer; if (prevSegment.aduSize > prevADUend) { // shouldn't happen if the previous ADU was well-formed prevADUend = 0; } else { prevADUend -= prevSegment.aduSize; } } else { prevADUend = 0; } if (tailSeg->backpointer > prevADUend) { // We need to insert a dummy ADU in front of the tail #ifdef DEBUG fprintf(stderr, "a->m:need to insert a dummy ADU (%d, %d, %d) [%d, %d]\n", tailSeg->backpointer, prevADUend, tailSeg->dataHere(), fSegments->headIndex(), fSegments->nextFreeIndex()); #endif tailIndex = fSegments->nextFreeIndex(); if (!fSegments->insertDummyBeforeTail(prevADUend)) return; tailSeg = &(fSegments->s[tailIndex]); } else { break; // no more dummy ADUs need to be inserted } } } Boolean MP3FromADUSource::generateFrameFromHeadADU() { // Output a frame for the head ADU: if (fSegments->isEmpty()) return False; unsigned index = fSegments->headIndex(); Segment* seg = &(fSegments->headSegment()); #ifdef DEBUG fprintf(stderr, "a->m:outputting frame for %d<-%d (fs %d, dh %d), (descriptorSize: %d)\n", seg->aduSize, seg->backpointer, seg->frameSize, seg->dataHere(), seg->descriptorSize); #endif unsigned char* toPtr = fTo; // output header and side info: fFrameSize = seg->frameSize; fPresentationTime = seg->presentationTime; fDurationInMicroseconds = seg->durationInMicroseconds; memmove(toPtr, seg->dataStart(), seg->headerSize + seg->sideInfoSize); toPtr += seg->headerSize + seg->sideInfoSize; // zero out the rest of the frame, in case ADU data doesn't fill it all in unsigned bytesToZero = seg->dataHere(); for (unsigned i = 0; i < bytesToZero; ++i) { toPtr[i] = '\0'; } // Fill in the frame with appropriate ADU data from this and // subsequent ADUs: unsigned frameOffset = 0; unsigned toOffset = 0; unsigned const endOfHeadFrame = seg->dataHere(); while (toOffset < endOfHeadFrame) { int startOfData = frameOffset - seg->backpointer; if (startOfData > (int)endOfHeadFrame) break; // no more ADUs needed int endOfData = startOfData + seg->aduSize; if (endOfData > (int)endOfHeadFrame) { endOfData = endOfHeadFrame; } unsigned fromOffset; if (startOfData <= (int)toOffset) { fromOffset = toOffset - startOfData; startOfData = toOffset; if (endOfData < startOfData) endOfData = startOfData; } else { fromOffset = 0; // we may need some padding bytes beforehand unsigned bytesToZero = startOfData - toOffset; #ifdef DEBUG if (bytesToZero > 0) fprintf(stderr, "a->m:outputting %d zero bytes (%d, %d, %d, %d)\n", bytesToZero, startOfData, toOffset, frameOffset, seg->backpointer); #endif toOffset += bytesToZero; } unsigned char* fromPtr = &seg->dataStart()[seg->headerSize + seg->sideInfoSize + fromOffset]; unsigned bytesUsedHere = endOfData - startOfData; #ifdef DEBUG if (bytesUsedHere > 0) fprintf(stderr, "a->m:outputting %d bytes from %d<-%d\n", bytesUsedHere, seg->aduSize, seg->backpointer); #endif memmove(toPtr + toOffset, fromPtr, bytesUsedHere); toOffset += bytesUsedHere; frameOffset += seg->dataHere(); index = SegmentQueue::nextIndex(index); if (index == fSegments->nextFreeIndex()) break; seg = &(fSegments->s[index]); } fSegments->dequeue(); return True; } ////////// Segment ////////// unsigned Segment::dataHere() { int result = frameSize - (headerSize + sideInfoSize); if (result < 0) { return 0; } return (unsigned)result; } ////////// SegmentQueue ////////// void SegmentQueue::enqueueNewSegment(FramedSource* inputSource, FramedSource* usingSource) { if (isFull()) { usingSource->envir() << "SegmentQueue::enqueueNewSegment() overflow\n"; FramedSource::handleClosure(usingSource); return; } fUsingSource = usingSource; Segment& seg = nextFreeSegment(); inputSource->getNextFrame(seg.buf, sizeof seg.buf, sqAfterGettingSegment, this, FramedSource::handleClosure, usingSource); } void SegmentQueue::sqAfterGettingSegment(void* clientData, unsigned numBytesRead, unsigned /*numTruncatedBytes*/, struct timeval presentationTime, unsigned durationInMicroseconds) { SegmentQueue* segQueue = (SegmentQueue*)clientData; Segment& seg = segQueue->nextFreeSegment(); seg.presentationTime = presentationTime; seg.durationInMicroseconds = durationInMicroseconds; if (segQueue->sqAfterGettingCommon(seg, numBytesRead)) { #ifdef DEBUG char const* direction = segQueue->fDirectionIsToADU ? "m->a" : "a->m"; fprintf(stderr, "%s:read frame %d<-%d, fs:%d, sis:%d, dh:%d, (descriptor size: %d)\n", direction, seg.aduSize, seg.backpointer, seg.frameSize, seg.sideInfoSize, seg.dataHere(), seg.descriptorSize); #endif } // Continue our original calling source where it left off: segQueue->fUsingSource->doGetNextFrame(); } // Common code called after a new segment is enqueued Boolean SegmentQueue::sqAfterGettingCommon(Segment& seg, unsigned numBytesRead) { unsigned char* fromPtr = seg.buf; if (fIncludeADUdescriptors) { // The newly-read data is assumed to be an ADU with a descriptor // in front (void)ADUdescriptor::getRemainingFrameSize(fromPtr); seg.descriptorSize = (unsigned)(fromPtr-seg.buf); } else { seg.descriptorSize = 0; } // parse the MP3-specific info in the frame to get the ADU params unsigned hdr; MP3SideInfo sideInfo; if (!GetADUInfoFromMP3Frame(fromPtr, numBytesRead, hdr, seg.frameSize, sideInfo, seg.sideInfoSize, seg.backpointer, seg.aduSize)) { return False; } // If we've just read an ADU (rather than a regular MP3 frame), then use the // entire "numBytesRead" data for the 'aduSize', so that we include any // 'ancillary data' that may be present at the end of the ADU: if (!fDirectionIsToADU) { unsigned newADUSize = numBytesRead - seg.descriptorSize - 4/*header size*/ - seg.sideInfoSize; if (newADUSize > seg.aduSize) seg.aduSize = newADUSize; } fTotalDataSize += seg.dataHere(); fNextFreeIndex = nextIndex(fNextFreeIndex); return True; } Boolean SegmentQueue::dequeue() { if (isEmpty()) { fUsingSource->envir() << "SegmentQueue::dequeue(): underflow!\n"; return False; } Segment& seg = s[headIndex()]; fTotalDataSize -= seg.dataHere(); fHeadIndex = nextIndex(fHeadIndex); return True; } Boolean SegmentQueue::insertDummyBeforeTail(unsigned backpointer) { if (isEmptyOrFull()) return False; // Copy the current tail segment to its new position, then modify the // old tail segment to be a 'dummy' ADU unsigned newTailIndex = nextFreeIndex(); Segment& newTailSeg = s[newTailIndex]; unsigned oldTailIndex = prevIndex(newTailIndex); Segment& oldTailSeg = s[oldTailIndex]; newTailSeg = oldTailSeg; // structure copy // Begin by setting (replacing) the ADU descriptor of the dummy ADU: unsigned char* ptr = oldTailSeg.buf; if (fIncludeADUdescriptors) { unsigned remainingFrameSize = oldTailSeg.headerSize + oldTailSeg.sideInfoSize + 0 /* 0-size ADU */; unsigned currentDescriptorSize = oldTailSeg.descriptorSize; if (currentDescriptorSize == 2) { ADUdescriptor::generateTwoByteDescriptor(ptr, remainingFrameSize); } else { (void)ADUdescriptor::generateDescriptor(ptr, remainingFrameSize); } } // Then zero out the side info of the dummy frame: if (!ZeroOutMP3SideInfo(ptr, oldTailSeg.frameSize, backpointer)) return False; unsigned dummyNumBytesRead = oldTailSeg.descriptorSize + 4/*header size*/ + oldTailSeg.sideInfoSize; return sqAfterGettingCommon(oldTailSeg, dummyNumBytesRead); } live/liveMedia/rtcp_from_spec.h000444 001751 000000 00000004352 12265042432 017020 0ustar00rsfwheel000000 000000 /* RTCP code taken directly from the most recent RTP specification: * draft-ietf-avt-rtp-new-11.txt * C header */ #ifndef _RTCP_FROM_SPEC_H #define _RTCP_FROM_SPEC_H #include /* Definitions of _ANSI_ARGS and EXTERN that will work in either C or C++ code: */ #undef _ANSI_ARGS_ #if ((defined(__STDC__) || defined(SABER)) && !defined(NO_PROTOTYPE)) || defined(__cplusplus) || defined(USE_PROTOTYPE) # define _ANSI_ARGS_(x) x #else # define _ANSI_ARGS_(x) () #endif #ifdef __cplusplus # define EXTERN extern "C" #else # define EXTERN extern #endif /* The code from the spec assumes a type "event"; make this a void*: */ typedef void* event; #define EVENT_UNKNOWN 0 #define EVENT_REPORT 1 #define EVENT_BYE 2 /* The code from the spec assumes a type "time_tp"; make this a double: */ typedef double time_tp; /* The code from the spec assumes a type "packet"; make this a void*: */ typedef void* packet; #define PACKET_UNKNOWN_TYPE 0 #define PACKET_RTP 1 #define PACKET_RTCP_REPORT 2 #define PACKET_BYE 3 /* The code from the spec calls drand48(), but we have drand30() instead */ #define drand48 drand30 /* The code calls "exit()", but we don't want to exit, so make it a noop: */ #define exit(n) do {} while (0) #ifndef FALSE #define FALSE 0 #endif #ifndef TRUE #define TRUE 1 #endif /* EXPORTS: */ EXTERN void OnExpire _ANSI_ARGS_((event, int, int, double, int, double*, int*, time_tp, time_tp*, int*)); EXTERN void OnReceive _ANSI_ARGS_((packet, event, int*, int*, int*, double*, double*, double, double)); /* IMPORTS: */ EXTERN void Schedule _ANSI_ARGS_((double,event)); EXTERN void Reschedule _ANSI_ARGS_((double,event)); EXTERN void SendRTCPReport _ANSI_ARGS_((event)); EXTERN void SendBYEPacket _ANSI_ARGS_((event)); EXTERN int TypeOfEvent _ANSI_ARGS_((event)); EXTERN int SentPacketSize _ANSI_ARGS_((event)); EXTERN int PacketType _ANSI_ARGS_((packet)); EXTERN int ReceivedPacketSize _ANSI_ARGS_((packet)); EXTERN int NewMember _ANSI_ARGS_((packet)); EXTERN int NewSender _ANSI_ARGS_((packet)); EXTERN void AddMember _ANSI_ARGS_((packet)); EXTERN void AddSender _ANSI_ARGS_((packet)); EXTERN void RemoveMember _ANSI_ARGS_((packet)); EXTERN void RemoveSender _ANSI_ARGS_((packet)); EXTERN double drand30 _ANSI_ARGS_((void)); #endif live/liveMedia/MatroskaFileParser.cpp000444 001751 000000 00000137531 12265042432 020112 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A parser for a Matroska file. // Implementation #include "MatroskaFileParser.hh" #include "MatroskaDemuxedTrack.hh" #include #include // for "gettimeofday() MatroskaFileParser::MatroskaFileParser(MatroskaFile& ourFile, FramedSource* inputSource, FramedSource::onCloseFunc* onEndFunc, void* onEndClientData, MatroskaDemux* ourDemux) : StreamParser(inputSource, onEndFunc, onEndClientData, continueParsing, this), fOurFile(ourFile), fInputSource(inputSource), fOnEndFunc(onEndFunc), fOnEndClientData(onEndClientData), fOurDemux(ourDemux), fCurOffsetInFile(0), fSavedCurOffsetInFile(0), fLimitOffsetInFile(0), fNumHeaderBytesToSkip(0), fClusterTimecode(0), fBlockTimecode(0), fFrameSizesWithinBlock(NULL), fPresentationTimeOffset(0.0) { if (ourDemux == NULL) { // Initialization fCurrentParseState = PARSING_START_OF_FILE; continueParsing(); } else { fCurrentParseState = LOOKING_FOR_CLUSTER; // In this case, parsing (of track data) doesn't start until a client starts reading from a track. } } MatroskaFileParser::~MatroskaFileParser() { delete[] fFrameSizesWithinBlock; Medium::close(fInputSource); } void MatroskaFileParser::seekToTime(double& seekNPT) { #ifdef DEBUG fprintf(stderr, "seekToTime(%f)\n", seekNPT); #endif if (seekNPT <= 0.0) { #ifdef DEBUG fprintf(stderr, "\t=> start of file\n"); #endif seekNPT = 0.0; seekToFilePosition(0); } else if (seekNPT >= fOurFile.fileDuration()) { #ifdef DEBUG fprintf(stderr, "\t=> end of file\n"); #endif seekNPT = fOurFile.fileDuration(); seekToEndOfFile(); } else { u_int64_t clusterOffsetInFile; unsigned blockNumWithinCluster; if (!fOurFile.lookupCuePoint(seekNPT, clusterOffsetInFile, blockNumWithinCluster)) { #ifdef DEBUG fprintf(stderr, "\t=> not supported\n"); #endif return; // seeking not supported } #ifdef DEBUG fprintf(stderr, "\t=> seek time %f, file position %llu, block number within cluster %d\n", seekNPT, clusterOffsetInFile, blockNumWithinCluster); #endif seekToFilePosition(clusterOffsetInFile); fCurrentParseState = LOOKING_FOR_BLOCK; // LATER handle "blockNumWithinCluster"; for now, we assume that it's 0 ##### } } void MatroskaFileParser ::continueParsing(void* clientData, unsigned char* /*ptr*/, unsigned /*size*/, struct timeval /*presentationTime*/) { ((MatroskaFileParser*)clientData)->continueParsing(); } void MatroskaFileParser::continueParsing() { if (fInputSource != NULL) { if (fInputSource->isCurrentlyAwaitingData()) return; // Our input source is currently being read. Wait until that read completes if (!parse()) { // We didn't complete the parsing, because we had to read more data from the source, or because we're waiting for // another read from downstream. Once that happens, we'll get called again. return; } } // We successfully parsed the file's 'Track' headers. Call our 'done' function now: if (fOnEndFunc != NULL) (*fOnEndFunc)(fOnEndClientData); } Boolean MatroskaFileParser::parse() { Boolean areDone = False; try { skipRemainingHeaderBytes(True); // if any do { switch (fCurrentParseState) { case PARSING_START_OF_FILE: { areDone = parseStartOfFile(); break; } case LOOKING_FOR_TRACKS: { lookForNextTrack(); break; } case PARSING_TRACK: { areDone = parseTrack(); if (areDone && fOurFile.fCuesOffset > 0) { // We've finished parsing the 'Track' information. There are also 'Cues' in the file, so parse those before finishing: // Seek to the specified position in the file. We were already told that the 'Cues' begins there: #ifdef DEBUG fprintf(stderr, "Seeking to file position %llu (the previously-reported location of 'Cues')\n", fOurFile.fCuesOffset); #endif seekToFilePosition(fOurFile.fCuesOffset); fCurrentParseState = PARSING_CUES; areDone = False; } break; } case PARSING_CUES: { areDone = parseCues(); break; } case LOOKING_FOR_CLUSTER: { if (fOurFile.fClusterOffset > 0) { // Optimization: Seek to the specified position in the file. We were already told that the 'Cluster' begins there: #ifdef DEBUG fprintf(stderr, "Optimization: Seeking to file position %llu (the previously-reported location of a 'Cluster')\n", fOurFile.fClusterOffset); #endif seekToFilePosition(fOurFile.fClusterOffset); } fCurrentParseState = LOOKING_FOR_BLOCK; break; } case LOOKING_FOR_BLOCK: { lookForNextBlock(); break; } case PARSING_BLOCK: { parseBlock(); break; } case DELIVERING_FRAME_WITHIN_BLOCK: { if (!deliverFrameWithinBlock()) return False; break; } case DELIVERING_FRAME_BYTES: { deliverFrameBytes(); return False; // Halt parsing for now. A new 'read' from downstream will cause parsing to resume. break; } } } while (!areDone); return True; } catch (int /*e*/) { #ifdef DEBUG fprintf(stderr, "MatroskaFileParser::parse() EXCEPTION (This is normal behavior - *not* an error)\n"); #endif return False; // the parsing got interrupted } } Boolean MatroskaFileParser::parseStartOfFile() { #ifdef DEBUG fprintf(stderr, "parsing start of file\n"); #endif EBMLId id; EBMLDataSize size; // The file must begin with the standard EBML header (which we skip): if (!parseEBMLIdAndSize(id, size) || id != MATROSKA_ID_EBML) { fOurFile.envir() << "ERROR: File does not begin with an EBML header\n"; return True; // We're done with the file, because it's not valid } #ifdef DEBUG fprintf(stderr, "MatroskaFileParser::parseStartOfFile(): Parsed id 0x%s (%s), size: %lld\n", id.hexString(), id.stringName(), size.val()); #endif fCurrentParseState = LOOKING_FOR_TRACKS; skipHeader(size); return False; // because we have more parsing to do - inside the 'Track' header } void MatroskaFileParser::lookForNextTrack() { #ifdef DEBUG fprintf(stderr, "looking for Track\n"); #endif EBMLId id; EBMLDataSize size; // Read and skip over (or enter) each Matroska header, until we get to a 'Track'. while (fCurrentParseState == LOOKING_FOR_TRACKS) { while (!parseEBMLIdAndSize(id, size)) {} #ifdef DEBUG fprintf(stderr, "MatroskaFileParser::lookForNextTrack(): Parsed id 0x%s (%s), size: %lld\n", id.hexString(), id.stringName(), size.val()); #endif switch (id.val()) { case MATROSKA_ID_SEGMENT: { // 'Segment' header: enter this // Remember the position, within the file, of the start of Segment data, because Seek Positions are relative to this: fOurFile.fSegmentDataOffset = fCurOffsetInFile; break; } case MATROSKA_ID_SEEK_HEAD: { // 'Seek Head' header: enter this break; } case MATROSKA_ID_SEEK: { // 'Seek' header: enter this break; } case MATROSKA_ID_SEEK_ID: { // 'Seek ID' header: get this value if (parseEBMLNumber(fLastSeekId)) { #ifdef DEBUG fprintf(stderr, "\tSeek ID 0x%s:\t%s\n", fLastSeekId.hexString(), fLastSeekId.stringName()); #endif } break; } case MATROSKA_ID_SEEK_POSITION: { // 'Seek Position' header: get this value u_int64_t seekPosition; if (parseEBMLVal_unsigned64(size, seekPosition)) { u_int64_t offsetInFile = fOurFile.fSegmentDataOffset + seekPosition; #ifdef DEBUG fprintf(stderr, "\tSeek Position %llu (=> offset within the file: %llu (0x%llx))\n", seekPosition, offsetInFile, offsetInFile); #endif // The only 'Seek Position's that we care about are for 'Cluster' and 'Cues': if (fLastSeekId == MATROSKA_ID_CLUSTER) { fOurFile.fClusterOffset = offsetInFile; } else if (fLastSeekId == MATROSKA_ID_CUES) { fOurFile.fCuesOffset = offsetInFile; } } break; } case MATROSKA_ID_INFO: { // 'Segment Info' header: enter this break; } case MATROSKA_ID_TIMECODE_SCALE: { // 'Timecode Scale' header: get this value unsigned timecodeScale; if (parseEBMLVal_unsigned(size, timecodeScale) && timecodeScale > 0) { fOurFile.fTimecodeScale = timecodeScale; #ifdef DEBUG fprintf(stderr, "\tTimecode Scale %u ns (=> Segment Duration == %f seconds)\n", fOurFile.timecodeScale(), fOurFile.segmentDuration()*(fOurFile.fTimecodeScale/1000000000.0f)); #endif } break; } case MATROSKA_ID_DURATION: { // 'Segment Duration' header: get this value if (parseEBMLVal_float(size, fOurFile.fSegmentDuration)) { #ifdef DEBUG fprintf(stderr, "\tSegment Duration %f (== %f seconds)\n", fOurFile.segmentDuration(), fOurFile.segmentDuration()*(fOurFile.fTimecodeScale/1000000000.0f)); #endif } break; } #ifdef DEBUG case MATROSKA_ID_TITLE: { // 'Segment Title': display this value char* title; if (parseEBMLVal_string(size, title)) { #ifdef DEBUG fprintf(stderr, "\tTitle: %s\n", title); #endif delete[] title; } break; } #endif case MATROSKA_ID_TRACKS: { // enter this, and move on to parsing 'Tracks' fLimitOffsetInFile = fCurOffsetInFile + size.val(); // Make sure we don't read past the end of this header fCurrentParseState = PARSING_TRACK; break; } default: { // skip over this header skipHeader(size); break; } } setParseState(); } } Boolean MatroskaFileParser::parseTrack() { #ifdef DEBUG fprintf(stderr, "parsing Track\n"); #endif // Read and process each Matroska header, until we get to the end of the Track: MatroskaTrack* track = NULL; EBMLId id; EBMLDataSize size; while (fCurOffsetInFile < fLimitOffsetInFile) { while (!parseEBMLIdAndSize(id, size)) {} #ifdef DEBUG if (id == MATROSKA_ID_TRACK_ENTRY) fprintf(stderr, "\n"); // makes debugging output easier to read fprintf(stderr, "MatroskaFileParser::parseTrack(): Parsed id 0x%s (%s), size: %lld\n", id.hexString(), id.stringName(), size.val()); #endif switch (id.val()) { case MATROSKA_ID_TRACK_ENTRY: { // 'Track Entry' header: enter this // Create a new "MatroskaTrack" object for this entry: if (track != NULL && track->trackNumber == 0) delete track; // We had a previous "MatroskaTrack" object that was never used track = new MatroskaTrack; break; } case MATROSKA_ID_TRACK_NUMBER: { unsigned trackNumber; if (parseEBMLVal_unsigned(size, trackNumber)) { #ifdef DEBUG fprintf(stderr, "\tTrack Number %d\n", trackNumber); #endif if (track != NULL && trackNumber != 0) { track->trackNumber = trackNumber; fOurFile.addTrack(track, trackNumber); } } break; } case MATROSKA_ID_TRACK_TYPE: { unsigned trackType; if (parseEBMLVal_unsigned(size, trackType) && track != NULL) { // We convert the Matroska 'track type' code into our own code (which we can use as a bitmap): track->trackType = trackType == 1 ? MATROSKA_TRACK_TYPE_VIDEO : trackType == 2 ? MATROSKA_TRACK_TYPE_AUDIO : trackType == 0x11 ? MATROSKA_TRACK_TYPE_SUBTITLE : MATROSKA_TRACK_TYPE_OTHER; #ifdef DEBUG fprintf(stderr, "\tTrack Type 0x%02x (%s)\n", trackType, track->trackType == MATROSKA_TRACK_TYPE_VIDEO ? "video" : track->trackType == MATROSKA_TRACK_TYPE_AUDIO ? "audio" : track->trackType == MATROSKA_TRACK_TYPE_SUBTITLE ? "subtitle" : ""); #endif } break; } case MATROSKA_ID_FLAG_ENABLED: { unsigned flagEnabled; if (parseEBMLVal_unsigned(size, flagEnabled)) { #ifdef DEBUG fprintf(stderr, "\tTrack is Enabled: %d\n", flagEnabled); #endif if (track != NULL) track->isEnabled = flagEnabled != 0; } break; } case MATROSKA_ID_FLAG_DEFAULT: { unsigned flagDefault; if (parseEBMLVal_unsigned(size, flagDefault)) { #ifdef DEBUG fprintf(stderr, "\tTrack is Default: %d\n", flagDefault); #endif if (track != NULL) track->isDefault = flagDefault != 0; } break; } case MATROSKA_ID_FLAG_FORCED: { unsigned flagForced; if (parseEBMLVal_unsigned(size, flagForced)) { #ifdef DEBUG fprintf(stderr, "\tTrack is Forced: %d\n", flagForced); #endif if (track != NULL) track->isForced = flagForced != 0; } break; } case MATROSKA_ID_DEFAULT_DURATION: { unsigned defaultDuration; if (parseEBMLVal_unsigned(size, defaultDuration)) { #ifdef DEBUG fprintf(stderr, "\tDefault duration %f ms\n", defaultDuration/1000000.0); #endif if (track != NULL) track->defaultDuration = defaultDuration; } break; } case MATROSKA_ID_MAX_BLOCK_ADDITION_ID: { unsigned maxBlockAdditionID; if (parseEBMLVal_unsigned(size, maxBlockAdditionID)) { #ifdef DEBUG fprintf(stderr, "\tMax Block Addition ID: %u\n", maxBlockAdditionID); #endif } break; } case MATROSKA_ID_NAME: { char* name; if (parseEBMLVal_string(size, name)) { #ifdef DEBUG fprintf(stderr, "\tName: %s\n", name); #endif if (track != NULL) { delete[] track->name; track->name = name; } else { delete[] name; } } break; } case MATROSKA_ID_LANGUAGE: { char* language; if (parseEBMLVal_string(size, language)) { #ifdef DEBUG fprintf(stderr, "\tLanguage: %s\n", language); #endif if (track != NULL) { delete[] track->language; track->language = language; } else { delete[] language; } } break; } case MATROSKA_ID_CODEC: { char* codecID; if (parseEBMLVal_string(size, codecID)) { #ifdef DEBUG fprintf(stderr, "\tCodec ID: %s\n", codecID); #endif if (track != NULL) { delete[] track->codecID; track->codecID = codecID; } else { delete[] codecID; } } break; } case MATROSKA_ID_CODEC_PRIVATE: { u_int8_t* codecPrivate; unsigned codecPrivateSize; if (parseEBMLVal_binary(size, codecPrivate)) { codecPrivateSize = (unsigned)size.val(); #ifdef DEBUG fprintf(stderr, "\tCodec Private: "); for (unsigned i = 0; i < codecPrivateSize; ++i) fprintf(stderr, "%02x:", codecPrivate[i]); fprintf(stderr, "\n"); #endif if (track != NULL) { delete[] track->codecPrivate; track->codecPrivate = codecPrivate; track->codecPrivateSize = codecPrivateSize; // Hack for H.264 and H.265: The 'codec private' data contains // the size of NAL unit lengths: if (track->codecID != NULL) { if (strcmp(track->codecID, "V_MPEG4/ISO/AVC") == 0) { // H.264 // Byte 4 of the 'codec private' data contains 'lengthSizeMinusOne': if (codecPrivateSize >= 5) track->subframeSizeSize = (codecPrivate[4])&0x3 + 1; } else if (strcmp(track->codecID, "V_MPEGH/ISO/HEVC") == 0) { // H.265 // H.265 'codec private' data is *supposed* to use the format that's described in // http://lists.matroska.org/pipermail/matroska-devel/2013-September/004567.html // However, some Matroska files use the same format that was used for H.264. // We check for this here, by checking various fields that are supposed to be // 'all-1' in the 'correct' format: if (codecPrivateSize < 23 || (codecPrivate[13]&0xF0) != 0xF0 || (codecPrivate[15]&0xFC) != 0xFC || (codecPrivate[16]&0xFC) != 0xFC || (codecPrivate[17]&0xF8) != 0xF8 || (codecPrivate[18]&0xF8) != 0xF8) { // The 'correct' format isn't being used, so assume the H.264 format instead: track->codecPrivateUsesH264FormatForH265 = True; // Byte 4 of the 'codec private' data contains 'lengthSizeMinusOne': if (codecPrivateSize >= 5) track->subframeSizeSize = (codecPrivate[4])&0x3 + 1; } else { // This looks like the 'correct' format: track->codecPrivateUsesH264FormatForH265 = False; // Byte 21 of the 'codec private' data contains 'lengthSizeMinusOne': track->subframeSizeSize = (codecPrivate[21])&0x3 + 1; } } } } else { delete[] codecPrivate; } } break; } case MATROSKA_ID_VIDEO: { // 'Video settings' header: enter this break; } case MATROSKA_ID_PIXEL_WIDTH: { unsigned pixelWidth; if (parseEBMLVal_unsigned(size, pixelWidth)) { #ifdef DEBUG fprintf(stderr, "\tPixel Width %d\n", pixelWidth); #endif } break; } case MATROSKA_ID_PIXEL_HEIGHT: { unsigned pixelHeight; if (parseEBMLVal_unsigned(size, pixelHeight)) { #ifdef DEBUG fprintf(stderr, "\tPixel Height %d\n", pixelHeight); #endif } break; } case MATROSKA_ID_DISPLAY_WIDTH: { unsigned displayWidth; if (parseEBMLVal_unsigned(size, displayWidth)) { #ifdef DEBUG fprintf(stderr, "\tDisplay Width %d\n", displayWidth); #endif } break; } case MATROSKA_ID_DISPLAY_HEIGHT: { unsigned displayHeight; if (parseEBMLVal_unsigned(size, displayHeight)) { #ifdef DEBUG fprintf(stderr, "\tDisplay Height %d\n", displayHeight); #endif } break; } case MATROSKA_ID_DISPLAY_UNIT: { unsigned displayUnit; if (parseEBMLVal_unsigned(size, displayUnit)) { #ifdef DEBUG fprintf(stderr, "\tDisplay Unit %d\n", displayUnit); #endif } break; } case MATROSKA_ID_AUDIO: { // 'Audio settings' header: enter this break; } case MATROSKA_ID_SAMPLING_FREQUENCY: { float samplingFrequency; if (parseEBMLVal_float(size, samplingFrequency)) { if (track != NULL) { track->samplingFrequency = (unsigned)samplingFrequency; #ifdef DEBUG fprintf(stderr, "\tSampling frequency %f (->%d)\n", samplingFrequency, track->samplingFrequency); #endif } } break; } case MATROSKA_ID_OUTPUT_SAMPLING_FREQUENCY: { float outputSamplingFrequency; if (parseEBMLVal_float(size, outputSamplingFrequency)) { #ifdef DEBUG fprintf(stderr, "\tOutput sampling frequency %f\n", outputSamplingFrequency); #endif } break; } case MATROSKA_ID_CHANNELS: { unsigned numChannels; if (parseEBMLVal_unsigned(size, numChannels)) { #ifdef DEBUG fprintf(stderr, "\tChannels %d\n", numChannels); #endif if (track != NULL) track->numChannels = numChannels; } break; } case MATROSKA_ID_BIT_DEPTH: { unsigned bitDepth; if (parseEBMLVal_unsigned(size, bitDepth)) { #ifdef DEBUG fprintf(stderr, "\tBit Depth %d\n", bitDepth); #endif } break; } case MATROSKA_ID_CONTENT_ENCODINGS: case MATROSKA_ID_CONTENT_ENCODING: { // 'Content Encodings' or 'Content Encoding' header: enter this break; } case MATROSKA_ID_CONTENT_COMPRESSION: { // 'Content Compression' header: enter this // Note: We currently support only 'Header Stripping' compression, not 'zlib' compression (the default algorithm). // Therefore, we disable this track, unless/until we later see that 'Header Stripping' is supported: if (track != NULL) track->isEnabled = False; break; } case MATROSKA_ID_CONTENT_COMP_ALGO: { unsigned contentCompAlgo; if (parseEBMLVal_unsigned(size, contentCompAlgo)) { #ifdef DEBUG fprintf(stderr, "\tContent Compression Algorithm %d (%s)\n", contentCompAlgo, contentCompAlgo == 0 ? "zlib" : contentCompAlgo == 3 ? "Header Stripping" : ""); #endif // The only compression algorithm that we support is #3: Header Stripping; disable the track otherwise if (track != NULL) track->isEnabled = contentCompAlgo == 3; } break; } case MATROSKA_ID_CONTENT_COMP_SETTINGS: { u_int8_t* headerStrippedBytes; unsigned headerStrippedBytesSize; if (parseEBMLVal_binary(size, headerStrippedBytes)) { headerStrippedBytesSize = (unsigned)size.val(); #ifdef DEBUG fprintf(stderr, "\tHeader Stripped Bytes: "); for (unsigned i = 0; i < headerStrippedBytesSize; ++i) fprintf(stderr, "%02x:", headerStrippedBytes[i]); fprintf(stderr, "\n"); #endif if (track != NULL) { delete[] track->headerStrippedBytes; track->headerStrippedBytes = headerStrippedBytes; track->headerStrippedBytesSize = headerStrippedBytesSize; } else { delete[] headerStrippedBytes; } } break; } case MATROSKA_ID_CONTENT_ENCRYPTION: { // 'Content Encrpytion' header: skip this // Note: We don't currently support encryption at all. Therefore, we disable this track: if (track != NULL) track->isEnabled = False; // Fall through to... } default: { // We don't process this header, so just skip over it: skipHeader(size); break; } } setParseState(); } fLimitOffsetInFile = 0; // reset if (track != NULL && track->trackNumber == 0) delete track; // We had a previous "MatroskaTrack" object that was never used return True; // we're done parsing track entries } void MatroskaFileParser::lookForNextBlock() { #ifdef DEBUG fprintf(stderr, "looking for Block\n"); #endif // Read and skip over each Matroska header, until we get to a 'Cluster': EBMLId id; EBMLDataSize size; while (fCurrentParseState == LOOKING_FOR_BLOCK) { while (!parseEBMLIdAndSize(id, size)) {} #ifdef DEBUG fprintf(stderr, "MatroskaFileParser::lookForNextBlock(): Parsed id 0x%s (%s), size: %lld\n", id.hexString(), id.stringName(), size.val()); #endif switch (id.val()) { case MATROSKA_ID_SEGMENT: { // 'Segment' header: enter this break; } case MATROSKA_ID_CLUSTER: { // 'Cluster' header: enter this break; } case MATROSKA_ID_TIMECODE: { // 'Timecode' header: get this value unsigned timecode; if (parseEBMLVal_unsigned(size, timecode)) { fClusterTimecode = timecode; #ifdef DEBUG fprintf(stderr, "\tCluster timecode: %d (== %f seconds)\n", fClusterTimecode, fClusterTimecode*(fOurFile.fTimecodeScale/1000000000.0)); #endif } break; } case MATROSKA_ID_BLOCK_GROUP: { // 'Block Group' header: enter this break; } case MATROSKA_ID_SIMPLEBLOCK: case MATROSKA_ID_BLOCK: { // 'SimpleBlock' or 'Block' header: enter this (and we're done) fBlockSize = (unsigned)size.val(); fCurrentParseState = PARSING_BLOCK; break; } case MATROSKA_ID_BLOCK_DURATION: { // 'Block Duration' header: get this value (but we currently don't do anything with it) unsigned blockDuration; if (parseEBMLVal_unsigned(size, blockDuration)) { #ifdef DEBUG fprintf(stderr, "\tblock duration: %d (== %f ms)\n", blockDuration, (float)(blockDuration*fOurFile.fTimecodeScale/1000000.0)); #endif } break; } // Attachments are parsed only if we're in DEBUG mode (otherwise we just skip over them): #ifdef DEBUG case MATROSKA_ID_ATTACHMENTS: { // 'Attachments': enter this break; } case MATROSKA_ID_ATTACHED_FILE: { // 'Attached File': enter this break; } case MATROSKA_ID_FILE_DESCRIPTION: { // 'File Description': get this value char* fileDescription; if (parseEBMLVal_string(size, fileDescription)) { #ifdef DEBUG fprintf(stderr, "\tFile Description: %s\n", fileDescription); #endif delete[] fileDescription; } break; } case MATROSKA_ID_FILE_NAME: { // 'File Name': get this value char* fileName; if (parseEBMLVal_string(size, fileName)) { #ifdef DEBUG fprintf(stderr, "\tFile Name: %s\n", fileName); #endif delete[] fileName; } break; } case MATROSKA_ID_FILE_MIME_TYPE: { // 'File MIME Type': get this value char* fileMIMEType; if (parseEBMLVal_string(size, fileMIMEType)) { #ifdef DEBUG fprintf(stderr, "\tFile MIME Type: %s\n", fileMIMEType); #endif delete[] fileMIMEType; } break; } case MATROSKA_ID_FILE_UID: { // 'File UID': get this value unsigned fileUID; if (parseEBMLVal_unsigned(size, fileUID)) { #ifdef DEBUG fprintf(stderr, "\tFile UID: 0x%x\n", fileUID); #endif } break; } #endif default: { // skip over this header skipHeader(size); break; } } setParseState(); } } Boolean MatroskaFileParser::parseCues() { #if defined(DEBUG) || defined(DEBUG_CUES) fprintf(stderr, "parsing Cues\n"); #endif EBMLId id; EBMLDataSize size; // Read the next header, which should be MATROSKA_ID_CUES: if (!parseEBMLIdAndSize(id, size) || id != MATROSKA_ID_CUES) return True; // The header wasn't what we expected, so we're done fLimitOffsetInFile = fCurOffsetInFile + size.val(); // Make sure we don't read past the end of this header double currentCueTime = 0.0; u_int64_t currentClusterOffsetInFile = 0; while (fCurOffsetInFile < fLimitOffsetInFile) { while (!parseEBMLIdAndSize(id, size)) {} #ifdef DEBUG_CUES if (id == MATROSKA_ID_CUE_POINT) fprintf(stderr, "\n"); // makes debugging output easier to read fprintf(stderr, "MatroskaFileParser::parseCues(): Parsed id 0x%s (%s), size: %lld\n", id.hexString(), id.stringName(), size.val()); #endif switch (id.val()) { case MATROSKA_ID_CUE_POINT: { // 'Cue Point' header: enter this break; } case MATROSKA_ID_CUE_TIME: { // 'Cue Time' header: get this value unsigned cueTime; if (parseEBMLVal_unsigned(size, cueTime)) { currentCueTime = cueTime*(fOurFile.fTimecodeScale/1000000000.0); #ifdef DEBUG_CUES fprintf(stderr, "\tCue Time %d (== %f seconds)\n", cueTime, currentCueTime); #endif } break; } case MATROSKA_ID_CUE_TRACK_POSITIONS: { // 'Cue Track Positions' header: enter this break; } case MATROSKA_ID_CUE_TRACK: { // 'Cue Track' header: get this value (but only for debugging; we don't do anything with it) unsigned cueTrack; if (parseEBMLVal_unsigned(size, cueTrack)) { #ifdef DEBUG_CUES fprintf(stderr, "\tCue Track %d\n", cueTrack); #endif } break; } case MATROSKA_ID_CUE_CLUSTER_POSITION: { // 'Cue Cluster Position' header: get this value u_int64_t cueClusterPosition; if (parseEBMLVal_unsigned64(size, cueClusterPosition)) { currentClusterOffsetInFile = fOurFile.fSegmentDataOffset + cueClusterPosition; #ifdef DEBUG_CUES fprintf(stderr, "\tCue Cluster Position %llu (=> offset within the file: %llu (0x%llx))\n", cueClusterPosition, currentClusterOffsetInFile, currentClusterOffsetInFile); #endif // Record this cue point: fOurFile.addCuePoint(currentCueTime, currentClusterOffsetInFile, 1/*default block number within cluster*/); } break; } case MATROSKA_ID_CUE_BLOCK_NUMBER: { // 'Cue Block Number' header: get this value unsigned cueBlockNumber; if (parseEBMLVal_unsigned(size, cueBlockNumber) && cueBlockNumber != 0) { #ifdef DEBUG_CUES fprintf(stderr, "\tCue Block Number %d\n", cueBlockNumber); #endif // Record this cue point (overwriting any existing entry for this cue time): fOurFile.addCuePoint(currentCueTime, currentClusterOffsetInFile, cueBlockNumber); } break; } default: { // We don't process this header, so just skip over it: skipHeader(size); break; } } setParseState(); } fLimitOffsetInFile = 0; // reset #if defined(DEBUG) || defined(DEBUG_CUES) fprintf(stderr, "done parsing Cues\n"); #endif #ifdef DEBUG_CUES fprintf(stderr, "Cue Point tree: "); fOurFile.printCuePoints(stderr); fprintf(stderr, "\n"); #endif return True; // we're done parsing Cues } typedef enum { NoLacing, XiphLacing, FixedSizeLacing, EBMLLacing } MatroskaLacingType; void MatroskaFileParser::parseBlock() { #ifdef DEBUG fprintf(stderr, "parsing SimpleBlock or Block\n"); #endif do { unsigned blockStartPos = curOffset(); // The block begins with the track number: EBMLNumber trackNumber; if (!parseEBMLNumber(trackNumber)) break; fBlockTrackNumber = (unsigned)trackNumber.val(); // If this track is not being read, then skip the rest of this block, and look for another one: if (fOurDemux->lookupDemuxedTrack(fBlockTrackNumber) == NULL) { unsigned headerBytesSeen = curOffset() - blockStartPos; if (headerBytesSeen < fBlockSize) { skipBytes(fBlockSize - headerBytesSeen); } #ifdef DEBUG fprintf(stderr, "\tSkipped block for unused track number %d\n", fBlockTrackNumber); #endif fCurrentParseState = LOOKING_FOR_BLOCK; setParseState(); return; } MatroskaTrack* track = fOurFile.lookup(fBlockTrackNumber); if (track == NULL) break; // shouldn't happen // The next two bytes are the block's timecode (relative to the cluster timecode) fBlockTimecode = (get1Byte()<<8)|get1Byte(); // The next byte indicates the type of 'lacing' used: u_int8_t c = get1Byte(); c &= 0x6; // we're interested in bits 5-6 only MatroskaLacingType lacingType = (c==0x0)?NoLacing : (c==0x02)?XiphLacing : (c==0x04)?FixedSizeLacing : EBMLLacing; #ifdef DEBUG fprintf(stderr, "\ttrack number %d, timecode %d (=> %f seconds), %s lacing\n", fBlockTrackNumber, fBlockTimecode, (fClusterTimecode+fBlockTimecode)*(fOurFile.fTimecodeScale/1000000000.0), (lacingType==NoLacing)?"no" : (lacingType==XiphLacing)?"Xiph" : (lacingType==FixedSizeLacing)?"fixed-size" : "EBML"); #endif if (lacingType == NoLacing) { fNumFramesInBlock = 1; } else { // The next byte tells us how many frames are present in this block fNumFramesInBlock = get1Byte() + 1; } delete[] fFrameSizesWithinBlock; fFrameSizesWithinBlock = new unsigned[fNumFramesInBlock]; if (fFrameSizesWithinBlock == NULL) break; if (lacingType == NoLacing) { unsigned headerBytesSeen = curOffset() - blockStartPos; if (headerBytesSeen > fBlockSize) break; fFrameSizesWithinBlock[0] = fBlockSize - headerBytesSeen; } else if (lacingType == FixedSizeLacing) { unsigned headerBytesSeen = curOffset() - blockStartPos; if (headerBytesSeen > fBlockSize) break; unsigned frameBytesAvailable = fBlockSize - headerBytesSeen; unsigned constantFrameSize = frameBytesAvailable/fNumFramesInBlock; for (unsigned i = 0; i < fNumFramesInBlock; ++i) { fFrameSizesWithinBlock[i] = constantFrameSize; } // If there are any bytes left over, assign them to the last frame: fFrameSizesWithinBlock[fNumFramesInBlock-1] += frameBytesAvailable%fNumFramesInBlock; } else { // EBML or Xiph lacing unsigned curFrameSize = 0; unsigned frameSizesTotal = 0; unsigned i; for (i = 0; i < fNumFramesInBlock-1; ++i) { if (lacingType == EBMLLacing) { EBMLNumber frameSize; if (!parseEBMLNumber(frameSize)) break; unsigned fsv = (unsigned)frameSize.val(); if (i == 0) { curFrameSize = fsv; } else { // The value we read is a signed value, that's added to the previous frame size, to get the current frame size: unsigned toSubtract = (fsv>0xFFFFFF)?0x07FFFFFF : (fsv>0xFFFF)?0x0FFFFF : (fsv>0xFF)?0x1FFF : 0x3F; int fsv_signed = fsv - toSubtract; curFrameSize += fsv_signed; if ((int)curFrameSize < 0) break; } } else { // Xiph lacing curFrameSize = 0; u_int8_t c; do { c = get1Byte(); curFrameSize += c; } while (c == 0xFF); } fFrameSizesWithinBlock[i] = curFrameSize; frameSizesTotal += curFrameSize; } if (i != fNumFramesInBlock-1) break; // an error occurred within the "for" loop // Compute the size of the final frame within the block (from the block's size, and the frame sizes already computed):) unsigned headerBytesSeen = curOffset() - blockStartPos; if (headerBytesSeen + frameSizesTotal > fBlockSize) break; fFrameSizesWithinBlock[i] = fBlockSize - (headerBytesSeen + frameSizesTotal); } // We're done parsing headers within the block, and (as a result) we now know the sizes of all frames within the block. // If we have 'stripped bytes' that are common to (the front of) all frames, then count them now: if (track->headerStrippedBytesSize != 0) { for (unsigned i = 0; i < fNumFramesInBlock; ++i) fFrameSizesWithinBlock[i] += track->headerStrippedBytesSize; } #ifdef DEBUG fprintf(stderr, "\tThis block contains %d frame(s); size(s):", fNumFramesInBlock); unsigned frameSizesTotal = 0; for (unsigned i = 0; i < fNumFramesInBlock; ++i) { fprintf(stderr, " %d", fFrameSizesWithinBlock[i]); frameSizesTotal += fFrameSizesWithinBlock[i]; } if (fNumFramesInBlock > 1) fprintf(stderr, " (total: %u)", frameSizesTotal); fprintf(stderr, " bytes\n"); #endif // Next, start delivering these frames: fCurrentParseState = DELIVERING_FRAME_WITHIN_BLOCK; fCurOffsetWithinFrame = fNextFrameNumberToDeliver = 0; setParseState(); return; } while (0); // An error occurred. Try to recover: #ifdef DEBUG fprintf(stderr, "parseBlock(): Error parsing data; trying to recover...\n"); #endif fCurrentParseState = LOOKING_FOR_BLOCK; } Boolean MatroskaFileParser::deliverFrameWithinBlock() { #ifdef DEBUG fprintf(stderr, "delivering frame within SimpleBlock or Block\n"); #endif do { MatroskaTrack* track = fOurFile.lookup(fBlockTrackNumber); if (track == NULL) break; // shouldn't happen MatroskaDemuxedTrack* demuxedTrack = fOurDemux->lookupDemuxedTrack(fBlockTrackNumber); if (demuxedTrack == NULL) break; // shouldn't happen if (!demuxedTrack->isCurrentlyAwaitingData()) { // Someone has been reading this stream, but isn't right now. // We can't deliver this frame until he asks for it, so punt for now. // The next time he asks for a frame, he'll get it. #ifdef DEBUG fprintf(stderr, "\tdeferring delivery of frame #%d (%d bytes)", fNextFrameNumberToDeliver, fFrameSizesWithinBlock[fNextFrameNumberToDeliver]); if (track->haveSubframes()) fprintf(stderr, "[offset %d]", fCurOffsetWithinFrame); fprintf(stderr, "\n"); #endif restoreSavedParserState(); // so we read from the beginning next time return False; } unsigned frameSize = fFrameSizesWithinBlock[fNextFrameNumberToDeliver]; if (track->haveSubframes()) { // The next "track->subframeSizeSize" bytes contain the length of a 'subframe': if (fCurOffsetWithinFrame + track->subframeSizeSize > frameSize) break; // sanity check unsigned subframeSize = 0; for (unsigned i = 0; i < track->subframeSizeSize; ++i) { u_int8_t c; getCommonFrameBytes(track, &c, 1, 0); if (fCurFrameNumBytesToGet > 0) { // it'll be 1 c = get1Byte(); ++fCurOffsetWithinFrame; } subframeSize = subframeSize*256 + c; } if (subframeSize == 0 || fCurOffsetWithinFrame + subframeSize > frameSize) break; // sanity check frameSize = subframeSize; } // Compute the presentation time of this frame (from the cluster timecode, the block timecode, and the default duration): double pt = (fClusterTimecode+fBlockTimecode)*(fOurFile.fTimecodeScale/1000000000.0) + fNextFrameNumberToDeliver*(track->defaultDuration/1000000000.0); if (fPresentationTimeOffset == 0.0) { // This is the first time we've computed a presentation time. Compute an offset to make the presentation times aligned // with 'wall clock' time: struct timeval timeNow; gettimeofday(&timeNow, NULL); double ptNow = timeNow.tv_sec + timeNow.tv_usec/1000000.0; fPresentationTimeOffset = ptNow - pt; } pt += fPresentationTimeOffset; struct timeval presentationTime; presentationTime.tv_sec = (unsigned)pt; presentationTime.tv_usec = (unsigned)((pt - presentationTime.tv_sec)*1000000); unsigned durationInMicroseconds = track->defaultDuration/1000; if (track->haveSubframes()) { // If this is a 'subframe', use a duration of 0 instead (unless it's the last 'subframe'): if (fCurOffsetWithinFrame + frameSize + track->subframeSizeSize < fFrameSizesWithinBlock[fNextFrameNumberToDeliver]) { // There's room for at least one more subframe after this, so give this subframe a duration of 0 durationInMicroseconds = 0; } } if (track->defaultDuration == 0) { // Adjust the frame duration to keep the sum of frame durations aligned with presentation times. if (demuxedTrack->prevPresentationTime().tv_sec != 0) { // not the first time for this track demuxedTrack->durationImbalance() += (presentationTime.tv_sec - demuxedTrack->prevPresentationTime().tv_sec)*1000000 + (presentationTime.tv_usec - demuxedTrack->prevPresentationTime().tv_usec); } int adjustment = 0; if (demuxedTrack->durationImbalance() > 0) { // The duration needs to be increased. int const adjustmentThreshold = 100000; // don't increase the duration by more than this amount (in case there's a mistake) adjustment = demuxedTrack->durationImbalance() > adjustmentThreshold ? adjustmentThreshold : demuxedTrack->durationImbalance(); } else if (demuxedTrack->durationImbalance() < 0) { // The duration needs to be decreased. adjustment = (unsigned)(-demuxedTrack->durationImbalance()) < durationInMicroseconds ? demuxedTrack->durationImbalance() : -(int)durationInMicroseconds; } durationInMicroseconds += adjustment; demuxedTrack->durationImbalance() -= durationInMicroseconds; // for next time demuxedTrack->prevPresentationTime() = presentationTime; // for next time } demuxedTrack->presentationTime() = presentationTime; demuxedTrack->durationInMicroseconds() = durationInMicroseconds; // Deliver the next block now: if (frameSize > demuxedTrack->maxSize()) { demuxedTrack->numTruncatedBytes() = frameSize - demuxedTrack->maxSize(); demuxedTrack->frameSize() = demuxedTrack->maxSize(); } else { // normal case demuxedTrack->numTruncatedBytes() = 0; demuxedTrack->frameSize() = frameSize; } getCommonFrameBytes(track, demuxedTrack->to(), demuxedTrack->frameSize(), demuxedTrack->numTruncatedBytes()); // Next, deliver (and/or skip) bytes from the input file: fCurrentParseState = DELIVERING_FRAME_BYTES; setParseState(); return True; } while (0); // An error occurred. Try to recover: #ifdef DEBUG fprintf(stderr, "deliverFrameWithinBlock(): Error parsing data; trying to recover...\n"); #endif fCurrentParseState = LOOKING_FOR_BLOCK; return True; } void MatroskaFileParser::deliverFrameBytes() { do { MatroskaTrack* track = fOurFile.lookup(fBlockTrackNumber); if (track == NULL) break; // shouldn't happen MatroskaDemuxedTrack* demuxedTrack = fOurDemux->lookupDemuxedTrack(fBlockTrackNumber); if (demuxedTrack == NULL) break; // shouldn't happen unsigned const BANK_SIZE = bankSize(); while (fCurFrameNumBytesToGet > 0) { // Hack: We can get no more than BANK_SIZE bytes at a time: unsigned numBytesToGet = fCurFrameNumBytesToGet > BANK_SIZE ? BANK_SIZE : fCurFrameNumBytesToGet; getBytes(fCurFrameTo, numBytesToGet); fCurFrameTo += numBytesToGet; fCurFrameNumBytesToGet -= numBytesToGet; fCurOffsetWithinFrame += numBytesToGet; setParseState(); } while (fCurFrameNumBytesToSkip > 0) { // Hack: We can skip no more than BANK_SIZE bytes at a time: unsigned numBytesToSkip = fCurFrameNumBytesToSkip > BANK_SIZE ? BANK_SIZE : fCurFrameNumBytesToSkip; skipBytes(numBytesToSkip); fCurFrameNumBytesToSkip -= numBytesToSkip; fCurOffsetWithinFrame += numBytesToSkip; setParseState(); } #ifdef DEBUG fprintf(stderr, "\tdelivered frame #%d: %d bytes", fNextFrameNumberToDeliver, demuxedTrack->frameSize()); if (track->haveSubframes()) fprintf(stderr, "[offset %d]", fCurOffsetWithinFrame - track->subframeSizeSize - demuxedTrack->frameSize() - demuxedTrack->numTruncatedBytes()); if (demuxedTrack->numTruncatedBytes() > 0) fprintf(stderr, " (%d bytes truncated)", demuxedTrack->numTruncatedBytes()); fprintf(stderr, " @%u.%06u (%.06f from start); duration %u us\n", demuxedTrack->presentationTime().tv_sec, demuxedTrack->presentationTime().tv_usec, demuxedTrack->presentationTime().tv_sec+demuxedTrack->presentationTime().tv_usec/1000000.0-fPresentationTimeOffset, demuxedTrack->durationInMicroseconds()); #endif if (!track->haveSubframes() || fCurOffsetWithinFrame + track->subframeSizeSize >= fFrameSizesWithinBlock[fNextFrameNumberToDeliver]) { // Either we don't have subframes, or there's no more room for another subframe => We're completely done with this frame now: ++fNextFrameNumberToDeliver; fCurOffsetWithinFrame = 0; } if (fNextFrameNumberToDeliver == fNumFramesInBlock) { // We've delivered all of the frames from this block. Look for another block next: fCurrentParseState = LOOKING_FOR_BLOCK; } else { fCurrentParseState = DELIVERING_FRAME_WITHIN_BLOCK; } setParseState(); FramedSource::afterGetting(demuxedTrack); // completes delivery return; } while (0); // An error occurred. Try to recover: #ifdef DEBUG fprintf(stderr, "deliverFrameBytes(): Error parsing data; trying to recover...\n"); #endif fCurrentParseState = LOOKING_FOR_BLOCK; } void MatroskaFileParser ::getCommonFrameBytes(MatroskaTrack* track, u_int8_t* to, unsigned numBytesToGet, unsigned numBytesToSkip) { if (track->headerStrippedBytesSize > fCurOffsetWithinFrame) { // We have some common 'header stripped' bytes that remain to be prepended to the frame. Use these first: unsigned numRemainingHeaderStrippedBytes = track->headerStrippedBytesSize - fCurOffsetWithinFrame; unsigned numHeaderStrippedBytesToGet; if (numBytesToGet <= numRemainingHeaderStrippedBytes) { numHeaderStrippedBytesToGet = numBytesToGet; numBytesToGet = 0; if (numBytesToGet + numBytesToSkip <= numRemainingHeaderStrippedBytes) { numBytesToSkip = 0; } else { numBytesToSkip = numBytesToGet + numBytesToSkip - numRemainingHeaderStrippedBytes; } } else { numHeaderStrippedBytesToGet = numRemainingHeaderStrippedBytes; numBytesToGet = numBytesToGet - numRemainingHeaderStrippedBytes; } if (numHeaderStrippedBytesToGet > 0) { memmove(to, &track->headerStrippedBytes[fCurOffsetWithinFrame], numHeaderStrippedBytesToGet); to += numHeaderStrippedBytesToGet; fCurOffsetWithinFrame += numHeaderStrippedBytesToGet; } } fCurFrameTo = to; fCurFrameNumBytesToGet = numBytesToGet; fCurFrameNumBytesToSkip = numBytesToSkip; } Boolean MatroskaFileParser::parseEBMLNumber(EBMLNumber& num) { unsigned i; u_int8_t bitmask = 0x80; for (i = 0; i < EBML_NUMBER_MAX_LEN; ++i) { while (1) { if (fLimitOffsetInFile > 0 && fCurOffsetInFile > fLimitOffsetInFile) return False; // We've hit our pre-set limit num.data[i] = get1Byte(); ++fCurOffsetInFile; // If we're looking for an id, skip any leading bytes that don't contain a '1' in the first 4 bits: if (i == 0/*we're a leading byte*/ && !num.stripLeading1/*we're looking for an id*/ && (num.data[i]&0xF0) == 0) { setParseState(); // ensures that we make forward progress if the parsing gets interrupted continue; } break; } if ((num.data[0]&bitmask) != 0) { // num[i] is the last byte of the id if (num.stripLeading1) num.data[0] &=~ bitmask; break; } bitmask >>= 1; } if (i == EBML_NUMBER_MAX_LEN) return False; num.len = i+1; return True; } Boolean MatroskaFileParser::parseEBMLIdAndSize(EBMLId& id, EBMLDataSize& size) { return parseEBMLNumber(id) && parseEBMLNumber(size); } Boolean MatroskaFileParser::parseEBMLVal_unsigned64(EBMLDataSize& size, u_int64_t& result) { u_int64_t sv = size.val(); if (sv > 8) return False; // size too large result = 0; // initially for (unsigned i = (unsigned)sv; i > 0; --i) { if (fLimitOffsetInFile > 0 && fCurOffsetInFile > fLimitOffsetInFile) return False; // We've hit our pre-set limit u_int8_t c = get1Byte(); ++fCurOffsetInFile; result = result*256 + c; } return True; } Boolean MatroskaFileParser::parseEBMLVal_unsigned(EBMLDataSize& size, unsigned& result) { if (size.val() > 4) return False; // size too large u_int64_t result64; if (!parseEBMLVal_unsigned64(size, result64)) return False; result = (unsigned)result64; return True; } Boolean MatroskaFileParser::parseEBMLVal_float(EBMLDataSize& size, float& result) { if (size.val() == 4) { // Normal case. Read the value as if it were a 4-byte integer, then copy it to the 'float' result: unsigned resultAsUnsigned; if (!parseEBMLVal_unsigned(size, resultAsUnsigned)) return False; if (sizeof result != sizeof resultAsUnsigned) return False; memcpy(&result, &resultAsUnsigned, sizeof result); return True; } else if (size.val() == 8) { // Read the value as if it were an 8-byte integer, then copy it to a 'double', the convert that to the 'float' result: u_int64_t resultAsUnsigned64; if (!parseEBMLVal_unsigned64(size, resultAsUnsigned64)) return False; double resultDouble; if (sizeof resultDouble != sizeof resultAsUnsigned64) return False; memcpy(&resultDouble, &resultAsUnsigned64, sizeof resultDouble); result = (float)resultDouble; return True; } else { // Unworkable size return False; } } Boolean MatroskaFileParser::parseEBMLVal_string(EBMLDataSize& size, char*& result) { unsigned resultLength = (unsigned)size.val(); result = new char[resultLength + 1]; // allow for the trailing '\0' if (result == NULL) return False; char* p = result; unsigned i; for (i = 0; i < resultLength; ++i) { if (fLimitOffsetInFile > 0 && fCurOffsetInFile > fLimitOffsetInFile) break; // We've hit our pre-set limit u_int8_t c = get1Byte(); ++fCurOffsetInFile; *p++ = c; } if (i < resultLength) { // an error occurred delete[] result; result = NULL; return False; } *p = '\0'; return True; } Boolean MatroskaFileParser::parseEBMLVal_binary(EBMLDataSize& size, u_int8_t*& result) { unsigned resultLength = (unsigned)size.val(); result = new u_int8_t[resultLength]; if (result == NULL) return False; u_int8_t* p = result; unsigned i; for (i = 0; i < resultLength; ++i) { if (fLimitOffsetInFile > 0 && fCurOffsetInFile > fLimitOffsetInFile) break; // We've hit our pre-set limit u_int8_t c = get1Byte(); ++fCurOffsetInFile; *p++ = c; } if (i < resultLength) { // an error occurred delete[] result; result = NULL; return False; } return True; } void MatroskaFileParser::skipHeader(EBMLDataSize const& size) { u_int64_t sv = (unsigned)size.val(); #ifdef DEBUG fprintf(stderr, "\tskipping %llu bytes\n", sv); #endif fNumHeaderBytesToSkip = sv; skipRemainingHeaderBytes(False); } void MatroskaFileParser::skipRemainingHeaderBytes(Boolean isContinuation) { if (fNumHeaderBytesToSkip == 0) return; // common case // Hack: To avoid tripping into a parser 'internal error' if we try to skip an excessively large // distance, break up the skipping into manageable chunks, to ensure forward progress: unsigned const maxBytesToSkip = bankSize(); while (fNumHeaderBytesToSkip > 0) { unsigned numBytesToSkipNow = fNumHeaderBytesToSkip < maxBytesToSkip ? (unsigned)fNumHeaderBytesToSkip : maxBytesToSkip; setParseState(); skipBytes(numBytesToSkipNow); #ifdef DEBUG if (isContinuation || numBytesToSkipNow < fNumHeaderBytesToSkip) { fprintf(stderr, "\t\t(skipped %u bytes; %llu bytes remaining)\n", numBytesToSkipNow, fNumHeaderBytesToSkip - numBytesToSkipNow); } #endif fCurOffsetInFile += numBytesToSkipNow; fNumHeaderBytesToSkip -= numBytesToSkipNow; } } void MatroskaFileParser::setParseState() { fSavedCurOffsetInFile = fCurOffsetInFile; fSavedCurOffsetWithinFrame = fCurOffsetWithinFrame; saveParserState(); } void MatroskaFileParser::restoreSavedParserState() { StreamParser::restoreSavedParserState(); fCurOffsetInFile = fSavedCurOffsetInFile; fCurOffsetWithinFrame = fSavedCurOffsetWithinFrame; } void MatroskaFileParser::seekToFilePosition(u_int64_t offsetInFile) { ByteStreamFileSource* fileSource = (ByteStreamFileSource*)fInputSource; // we know it's a "ByteStreamFileSource" if (fileSource != NULL) { fileSource->seekToByteAbsolute(offsetInFile); resetStateAfterSeeking(); } } void MatroskaFileParser::seekToEndOfFile() { ByteStreamFileSource* fileSource = (ByteStreamFileSource*)fInputSource; // we know it's a "ByteStreamFileSource" if (fileSource != NULL) { fileSource->seekToEnd(); resetStateAfterSeeking(); } } void MatroskaFileParser::resetStateAfterSeeking() { // Because we're resuming parsing after seeking to a new position in the file, reset the parser state: fCurOffsetInFile = fSavedCurOffsetInFile = 0; fCurOffsetWithinFrame = fSavedCurOffsetWithinFrame = 0; flushInput(); } live/liveMedia/MatroskaFile.cpp000444 001751 000000 00000047343 12265042432 016736 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A class that encapsulates a Matroska file. // Implementation #include "MatroskaFileParser.hh" #include "MatroskaDemuxedTrack.hh" #include ////////// CuePoint definition ////////// class CuePoint { public: CuePoint(double cueTime, u_int64_t clusterOffsetInFile, unsigned blockNumWithinCluster/* 1-based */); virtual ~CuePoint(); static void addCuePoint(CuePoint*& root, double cueTime, u_int64_t clusterOffsetInFile, unsigned blockNumWithinCluster/* 1-based */, Boolean& needToReviseBalanceOfParent); // If "cueTime" == "root.fCueTime", replace the existing data, otherwise add to the left or right subtree. // (Note that this is a static member function because - as a result of tree rotation - "root" might change.) Boolean lookup(double& cueTime, u_int64_t& resultClusterOffsetInFile, unsigned& resultBlockNumWithinCluster); static void fprintf(FILE* fid, CuePoint* cuePoint); // used for debugging; it's static to allow for "cuePoint == NULL" private: // The "CuePoint" tree is implemented as an AVL Tree, to keep it balanced (for efficient lookup). CuePoint* fSubTree[2]; // 0 => left; 1 => right CuePoint* left() const { return fSubTree[0]; } CuePoint* right() const { return fSubTree[1]; } char fBalance; // height of right subtree - height of left subtree static void rotate(unsigned direction/*0 => left; 1 => right*/, CuePoint*& root); // used to keep the tree in balance double fCueTime; u_int64_t fClusterOffsetInFile; unsigned fBlockNumWithinCluster; // 0-based }; UsageEnvironment& operator<<(UsageEnvironment& env, const CuePoint* cuePoint); // used for debugging ////////// MatroskaTrackTable definition ///////// // For looking up and iterating over the file's tracks: class MatroskaTrackTable { public: MatroskaTrackTable(); virtual ~MatroskaTrackTable(); void add(MatroskaTrack* newTrack, unsigned trackNumber); MatroskaTrack* lookup(unsigned trackNumber); unsigned numTracks() const; class Iterator { public: Iterator(MatroskaTrackTable& ourTable); virtual ~Iterator(); MatroskaTrack* next(); private: HashTable::Iterator* fIter; }; private: friend class Iterator; HashTable* fTable; }; ////////// MatroskaFile implementation ////////// void MatroskaFile ::createNew(UsageEnvironment& env, char const* fileName, onCreationFunc* onCreation, void* onCreationClientData, char const* preferredLanguage) { new MatroskaFile(env, fileName, onCreation, onCreationClientData, preferredLanguage); } MatroskaFile::MatroskaFile(UsageEnvironment& env, char const* fileName, onCreationFunc* onCreation, void* onCreationClientData, char const* preferredLanguage) : Medium(env), fFileName(strDup(fileName)), fOnCreation(onCreation), fOnCreationClientData(onCreationClientData), fPreferredLanguage(strDup(preferredLanguage)), fTimecodeScale(1000000), fSegmentDuration(0.0), fSegmentDataOffset(0), fClusterOffset(0), fCuesOffset(0), fCuePoints(NULL), fChosenVideoTrackNumber(0), fChosenAudioTrackNumber(0), fChosenSubtitleTrackNumber(0) { fTrackTable = new MatroskaTrackTable; fDemuxesTable = HashTable::create(ONE_WORD_HASH_KEYS); FramedSource* inputSource = ByteStreamFileSource::createNew(envir(), fileName); if (inputSource == NULL) { // The specified input file does not exist! fParserForInitialization = NULL; handleEndOfTrackHeaderParsing(); // we have no file, and thus no tracks, but we still need to signal this } else { // Initialize ourselves by parsing the file's 'Track' headers: fParserForInitialization = new MatroskaFileParser(*this, inputSource, handleEndOfTrackHeaderParsing, this, NULL); } } MatroskaFile::~MatroskaFile() { delete fParserForInitialization; delete fCuePoints; // Delete any outstanding "MatroskaDemux"s, and the table for them: MatroskaDemux* demux; while ((demux = (MatroskaDemux*)fDemuxesTable->RemoveNext()) != NULL) { delete demux; } delete fDemuxesTable; delete fTrackTable; delete[] (char*)fPreferredLanguage; delete[] (char*)fFileName; } void MatroskaFile::handleEndOfTrackHeaderParsing(void* clientData) { ((MatroskaFile*)clientData)->handleEndOfTrackHeaderParsing(); } class TrackChoiceRecord { public: unsigned trackNumber; u_int8_t trackType; unsigned choiceFlags; }; void MatroskaFile::handleEndOfTrackHeaderParsing() { // Having parsed all of our track headers, iterate through the tracks to figure out which ones should be played. // The Matroska 'specification' is rather imprecise about this (as usual). However, we use the following algorithm: // - Use one (but no more) enabled track of each type (video, audio, subtitle). (Ignore all tracks that are not 'enabled'.) // - For each track type, choose the one that's 'forced'. // - If more than one is 'forced', choose the first one that matches our preferred language, or the first if none matches. // - If none is 'forced', choose the one that's 'default'. // - If more than one is 'default', choose the first one that matches our preferred language, or the first if none matches. // - If none is 'default', choose the first one that matches our preferred language, or the first if none matches. unsigned numTracks = fTrackTable->numTracks(); if (numTracks > 0) { TrackChoiceRecord* trackChoice = new TrackChoiceRecord[numTracks]; unsigned numEnabledTracks = 0; MatroskaTrackTable::Iterator iter(*fTrackTable); MatroskaTrack* track; while ((track = iter.next()) != NULL) { if (!track->isEnabled || track->trackType == 0 || track->codecID == NULL) continue; // track not enabled, or not fully-defined trackChoice[numEnabledTracks].trackNumber = track->trackNumber; trackChoice[numEnabledTracks].trackType = track->trackType; // Assign flags for this track so that, when sorted, the largest value becomes our choice: unsigned choiceFlags = 0; if (fPreferredLanguage != NULL && track->language != NULL && strcmp(fPreferredLanguage, track->language) == 0) { // This track matches our preferred language: choiceFlags |= 1; } if (track->isForced) { choiceFlags |= 4; } else if (track->isDefault) { choiceFlags |= 2; } trackChoice[numEnabledTracks].choiceFlags = choiceFlags; ++numEnabledTracks; } // Choose the desired track for each track type: for (u_int8_t trackType = 0x01; trackType != MATROSKA_TRACK_TYPE_OTHER; trackType <<= 1) { int bestNum = -1; int bestChoiceFlags = -1; for (unsigned i = 0; i < numEnabledTracks; ++i) { if (trackChoice[i].trackType == trackType && (int)trackChoice[i].choiceFlags > bestChoiceFlags) { bestNum = i; bestChoiceFlags = (int)trackChoice[i].choiceFlags; } } if (bestChoiceFlags >= 0) { // There is a track for this track type if (trackType == MATROSKA_TRACK_TYPE_VIDEO) fChosenVideoTrackNumber = trackChoice[bestNum].trackNumber; else if (trackType == MATROSKA_TRACK_TYPE_AUDIO) fChosenAudioTrackNumber = trackChoice[bestNum].trackNumber; else fChosenSubtitleTrackNumber = trackChoice[bestNum].trackNumber; } } delete[] trackChoice; } #ifdef DEBUG if (fChosenVideoTrackNumber > 0) fprintf(stderr, "Chosen video track: #%d\n", fChosenVideoTrackNumber); else fprintf(stderr, "No chosen video track\n"); if (fChosenAudioTrackNumber > 0) fprintf(stderr, "Chosen audio track: #%d\n", fChosenAudioTrackNumber); else fprintf(stderr, "No chosen audio track\n"); if (fChosenSubtitleTrackNumber > 0) fprintf(stderr, "Chosen subtitle track: #%d\n", fChosenSubtitleTrackNumber); else fprintf(stderr, "No chosen subtitle track\n"); #endif // Delete our parser, because it's done its job now: delete fParserForInitialization; fParserForInitialization = NULL; // Finally, signal our caller that we've been created and initialized: if (fOnCreation != NULL) (*fOnCreation)(this, fOnCreationClientData); } MatroskaTrack* MatroskaFile::lookup(unsigned trackNumber) const { return fTrackTable->lookup(trackNumber); } MatroskaDemux* MatroskaFile::newDemux() { MatroskaDemux* demux = new MatroskaDemux(*this); fDemuxesTable->Add((char const*)demux, demux); return demux; } void MatroskaFile::removeDemux(MatroskaDemux* demux) { fDemuxesTable->Remove((char const*)demux); } float MatroskaFile::fileDuration() { if (fCuePoints == NULL) return 0.0; // Hack, because the RTSP server code assumes that duration > 0 => seekable. (fix this) ##### return segmentDuration()*(timecodeScale()/1000000000.0f); } void MatroskaFile::addTrack(MatroskaTrack* newTrack, unsigned trackNumber) { fTrackTable->add(newTrack, trackNumber); } void MatroskaFile::addCuePoint(double cueTime, u_int64_t clusterOffsetInFile, unsigned blockNumWithinCluster) { Boolean dummy = False; // not used CuePoint::addCuePoint(fCuePoints, cueTime, clusterOffsetInFile, blockNumWithinCluster, dummy); } Boolean MatroskaFile::lookupCuePoint(double& cueTime, u_int64_t& resultClusterOffsetInFile, unsigned& resultBlockNumWithinCluster) { if (fCuePoints == NULL) return False; (void)fCuePoints->lookup(cueTime, resultClusterOffsetInFile, resultBlockNumWithinCluster); return True; } void MatroskaFile::printCuePoints(FILE* fid) { CuePoint::fprintf(fid, fCuePoints); } ////////// MatroskaTrackTable implementation ////////// MatroskaTrackTable::MatroskaTrackTable() : fTable(HashTable::create(ONE_WORD_HASH_KEYS)) { } MatroskaTrackTable::~MatroskaTrackTable() { // Remove and delete all of our "MatroskaTrack" descriptors, and the hash table itself: MatroskaTrack* track; while ((track = (MatroskaTrack*)fTable->RemoveNext()) != NULL) { delete track; } delete fTable; } void MatroskaTrackTable::add(MatroskaTrack* newTrack, unsigned trackNumber) { if (newTrack != NULL && newTrack->trackNumber != 0) fTable->Remove((char const*)newTrack->trackNumber); MatroskaTrack* existingTrack = (MatroskaTrack*)fTable->Add((char const*)trackNumber, newTrack); delete existingTrack; // in case it wasn't NULL } MatroskaTrack* MatroskaTrackTable::lookup(unsigned trackNumber) { return (MatroskaTrack*)fTable->Lookup((char const*)trackNumber); } unsigned MatroskaTrackTable::numTracks() const { return fTable->numEntries(); } MatroskaTrackTable::Iterator::Iterator(MatroskaTrackTable& ourTable) { fIter = HashTable::Iterator::create(*(ourTable.fTable)); } MatroskaTrackTable::Iterator::~Iterator() { delete fIter; } MatroskaTrack* MatroskaTrackTable::Iterator::next() { char const* key; return (MatroskaTrack*)fIter->next(key); } ////////// MatroskaTrack implementation ////////// MatroskaTrack::MatroskaTrack() : trackNumber(0/*not set*/), trackType(0/*unknown*/), isEnabled(True), isDefault(True), isForced(False), defaultDuration(0), name(NULL), language(NULL), codecID(NULL), samplingFrequency(0), numChannels(2), mimeType(""), codecPrivateSize(0), codecPrivate(NULL), codecPrivateUsesH264FormatForH265(False), headerStrippedBytesSize(0), headerStrippedBytes(NULL), subframeSizeSize(0) { } MatroskaTrack::~MatroskaTrack() { delete[] name; delete[] language; delete[] codecID; delete[] codecPrivate; delete[] headerStrippedBytes; } ////////// MatroskaDemux implementation ////////// MatroskaDemux::MatroskaDemux(MatroskaFile& ourFile) : Medium(ourFile.envir()), fOurFile(ourFile), fDemuxedTracksTable(HashTable::create(ONE_WORD_HASH_KEYS)), fNextTrackTypeToCheck(0x1) { fOurParser = new MatroskaFileParser(ourFile, ByteStreamFileSource::createNew(envir(), ourFile.fileName()), handleEndOfFile, this, this); } MatroskaDemux::~MatroskaDemux() { // Begin by acting as if we've reached the end of the source file. This should cause all of our demuxed tracks to get closed. handleEndOfFile(); // Then delete our table of "MatroskaDemuxedTrack"s // - but not the "MatroskaDemuxedTrack"s themselves; that should have already happened: delete fDemuxedTracksTable; delete fOurParser; fOurFile.removeDemux(this); } FramedSource* MatroskaDemux::newDemuxedTrack() { unsigned dummyResultTrackNumber; return newDemuxedTrack(dummyResultTrackNumber); } FramedSource* MatroskaDemux::newDemuxedTrack(unsigned& resultTrackNumber) { FramedSource* result; resultTrackNumber = 0; for (result = NULL; result == NULL && fNextTrackTypeToCheck != MATROSKA_TRACK_TYPE_OTHER; fNextTrackTypeToCheck <<= 1) { if (fNextTrackTypeToCheck == MATROSKA_TRACK_TYPE_VIDEO) resultTrackNumber = fOurFile.chosenVideoTrackNumber(); else if (fNextTrackTypeToCheck == MATROSKA_TRACK_TYPE_AUDIO) resultTrackNumber = fOurFile.chosenAudioTrackNumber(); else if (fNextTrackTypeToCheck == MATROSKA_TRACK_TYPE_SUBTITLE) resultTrackNumber = fOurFile.chosenSubtitleTrackNumber(); result = newDemuxedTrackByTrackNumber(resultTrackNumber); } return result; } FramedSource* MatroskaDemux::newDemuxedTrackByTrackNumber(unsigned trackNumber) { if (trackNumber == 0) return NULL; FramedSource* track = new MatroskaDemuxedTrack(envir(), trackNumber, *this); fDemuxedTracksTable->Add((char const*)trackNumber, track); return track; } MatroskaDemuxedTrack* MatroskaDemux::lookupDemuxedTrack(unsigned trackNumber) { return (MatroskaDemuxedTrack*)fDemuxedTracksTable->Lookup((char const*)trackNumber); } void MatroskaDemux::removeTrack(unsigned trackNumber) { fDemuxedTracksTable->Remove((char const*)trackNumber); if (fDemuxedTracksTable->numEntries() == 0) { // We no longer have any demuxed tracks, so delete ourselves now: delete this; } } void MatroskaDemux::continueReading() { fOurParser->continueParsing(); } void MatroskaDemux::seekToTime(double& seekNPT) { if (fOurParser != NULL) fOurParser->seekToTime(seekNPT); } void MatroskaDemux::handleEndOfFile(void* clientData) { ((MatroskaDemux*)clientData)->handleEndOfFile(); } void MatroskaDemux::handleEndOfFile() { // Iterate through all of our 'demuxed tracks', handling 'end of input' on each one. // Hack: Because this can cause the hash table to get modified underneath us, we don't call the handlers until after we've // first iterated through all of the tracks. unsigned numTracks = fDemuxedTracksTable->numEntries(); if (numTracks == 0) return; MatroskaDemuxedTrack** tracks = new MatroskaDemuxedTrack*[numTracks]; HashTable::Iterator* iter = HashTable::Iterator::create(*fDemuxedTracksTable); unsigned i; char const* trackNumber; for (i = 0; i < numTracks; ++i) { tracks[i] = (MatroskaDemuxedTrack*)iter->next(trackNumber); } delete iter; for (i = 0; i < numTracks; ++i) { if (tracks[i] == NULL) continue; // sanity check; shouldn't happen FramedSource::handleClosure(tracks[i]); } delete[] tracks; } ////////// CuePoint implementation ////////// CuePoint::CuePoint(double cueTime, u_int64_t clusterOffsetInFile, unsigned blockNumWithinCluster) : fBalance(0), fCueTime(cueTime), fClusterOffsetInFile(clusterOffsetInFile), fBlockNumWithinCluster(blockNumWithinCluster - 1) { fSubTree[0] = fSubTree[1] = NULL; } CuePoint::~CuePoint() { delete fSubTree[0]; delete fSubTree[1]; } #ifndef ABS #define ABS(x) (x)<0 ? -(x) : (x) #endif void CuePoint::addCuePoint(CuePoint*& root, double cueTime, u_int64_t clusterOffsetInFile, unsigned blockNumWithinCluster, Boolean& needToReviseBalanceOfParent) { needToReviseBalanceOfParent = False; // by default; may get changed below if (root == NULL) { root = new CuePoint(cueTime, clusterOffsetInFile, blockNumWithinCluster); needToReviseBalanceOfParent = True; } else if (cueTime == root->fCueTime) { // Replace existing data: root->fClusterOffsetInFile = clusterOffsetInFile; root->fBlockNumWithinCluster = blockNumWithinCluster - 1; } else { // Add to our left or right subtree: int direction = cueTime > root->fCueTime; // 0 (left) or 1 (right) Boolean needToReviseOurBalance = False; addCuePoint(root->fSubTree[direction], cueTime, clusterOffsetInFile, blockNumWithinCluster, needToReviseOurBalance); if (needToReviseOurBalance) { // We need to change our 'balance' number, perhaps while also performing a rotation to bring ourself back into balance: if (root->fBalance == 0) { // We were balanced before, but now we're unbalanced (by 1) on the "direction" side: root->fBalance = -1 + 2*direction; // -1 for "direction" 0; 1 for "direction" 1 needToReviseBalanceOfParent = True; } else if (root->fBalance == 1 - 2*direction) { // 1 for "direction" 0; -1 for "direction" 1 // We were unbalanced (by 1) on the side opposite to where we added an entry, so now we're balanced: root->fBalance = 0; } else { // We were unbalanced (by 1) on the side where we added an entry, so now we're unbalanced by 2, and have to rebalance: if (root->fSubTree[direction]->fBalance == -1 + 2*direction) { // -1 for "direction" 0; 1 for "direction" 1 // We're 'doubly-unbalanced' on this side, so perform a single rotation in the opposite direction: root->fBalance = root->fSubTree[direction]->fBalance = 0; rotate(1-direction, root); } else { // This is the Left-Right case (for "direction" 0) or the Right-Left case (for "direction" 1); perform two rotations: char newParentCurBalance = root->fSubTree[direction]->fSubTree[1-direction]->fBalance; if (newParentCurBalance == 1 - 2*direction) { // 1 for "direction" 0; -1 for "direction" 1 root->fBalance = 0; root->fSubTree[direction]->fBalance = -1 + 2*direction; // -1 for "direction" 0; 1 for "direction" 1 } else if (newParentCurBalance == 0) { root->fBalance = 0; root->fSubTree[direction]->fBalance = 0; } else { root->fBalance = 1 - 2*direction; // 1 for "direction" 0; -1 for "direction" 1 root->fSubTree[direction]->fBalance = 0; } rotate(direction, root->fSubTree[direction]); root->fSubTree[direction]->fBalance = 0; // the new root will be balanced rotate(1-direction, root); } } } } } Boolean CuePoint::lookup(double& cueTime, u_int64_t& resultClusterOffsetInFile, unsigned& resultBlockNumWithinCluster) { if (cueTime < fCueTime) { if (left() == NULL) { resultClusterOffsetInFile = 0; resultBlockNumWithinCluster = 0; return False; } else { return left()->lookup(cueTime, resultClusterOffsetInFile, resultBlockNumWithinCluster); } } else { if (right() == NULL || !right()->lookup(cueTime, resultClusterOffsetInFile, resultBlockNumWithinCluster)) { // Use this record: cueTime = fCueTime; resultClusterOffsetInFile = fClusterOffsetInFile; resultBlockNumWithinCluster = fBlockNumWithinCluster; } return True; } } void CuePoint::fprintf(FILE* fid, CuePoint* cuePoint) { if (cuePoint != NULL) { ::fprintf(fid, "["); fprintf(fid, cuePoint->left()); ::fprintf(fid, ",%.1f{%d},", cuePoint->fCueTime, cuePoint->fBalance); fprintf(fid, cuePoint->right()); ::fprintf(fid, "]"); } } void CuePoint::rotate(unsigned direction/*0 => left; 1 => right*/, CuePoint*& root) { CuePoint* pivot = root->fSubTree[1-direction]; // ASSERT: pivot != NULL root->fSubTree[1-direction] = pivot->fSubTree[direction]; pivot->fSubTree[direction] = root; root = pivot; } live/liveMedia/MP3StreamState.cpp000444 001751 000000 00000032144 12265042432 017122 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A class encapsulating the state of a MP3 stream // Implementation #include "MP3StreamState.hh" #include "InputFile.hh" #include "GroupsockHelper.hh" #if defined(__WIN32__) || defined(_WIN32) #define snprintf _snprintf #if _MSC_VER >= 1400 // 1400 == vs2005 #define fileno _fileno #endif #endif #define MILLION 1000000 MP3StreamState::MP3StreamState(UsageEnvironment& env) : fEnv(env), fFid(NULL), fPresentationTimeScale(1) { } MP3StreamState::~MP3StreamState() { // Close our open file or socket: if (fFid != NULL && fFid != stdin) { if (fFidIsReallyASocket) { intptr_t fid_long = (intptr_t)fFid; closeSocket((int)fid_long); } else { CloseInputFile(fFid); } } } void MP3StreamState::assignStream(FILE* fid, unsigned fileSize) { fFid = fid; if (fileSize == (unsigned)(-1)) { /*HACK#####*/ fFidIsReallyASocket = 1; fFileSize = 0; } else { fFidIsReallyASocket = 0; fFileSize = fileSize; } fNumFramesInFile = 0; // until we know otherwise fIsVBR = fHasXingTOC = False; // ditto // Set the first frame's 'presentation time' to the current wall time: gettimeofday(&fNextFramePresentationTime, NULL); } struct timeval MP3StreamState::currentFramePlayTime() const { unsigned const numSamples = 1152; unsigned const freq = fr().samplingFreq*(1 + fr().isMPEG2); // result is numSamples/freq unsigned const uSeconds = ((numSamples*2*MILLION)/freq + 1)/2; // rounds to nearest integer struct timeval result; result.tv_sec = uSeconds/MILLION; result.tv_usec = uSeconds%MILLION; return result; } float MP3StreamState::filePlayTime() const { unsigned numFramesInFile = fNumFramesInFile; if (numFramesInFile == 0) { // Estimate the number of frames from the file size, and the // size of the current frame: numFramesInFile = fFileSize/(4 + fCurrentFrame.frameSize); } struct timeval const pt = currentFramePlayTime(); return numFramesInFile*(pt.tv_sec + pt.tv_usec/(float)MILLION); } unsigned MP3StreamState::getByteNumberFromPositionFraction(float fraction) { if (fHasXingTOC) { // The file is VBR, with a Xing TOC; use it to determine which byte to seek to: float percent = fraction*100.0f; unsigned a = (unsigned)percent; if (a > 99) a = 99; unsigned fa = fXingTOC[a]; unsigned fb; if (a < 99) { fb = fXingTOC[a+1]; } else { fb = 256; } fraction = (fa + (fb-fa)*(percent-a))/256.0f; } return (unsigned)(fraction*fFileSize); } void MP3StreamState::seekWithinFile(unsigned seekByteNumber) { if (fFidIsReallyASocket) return; // it's not seekable SeekFile64(fFid, seekByteNumber, SEEK_SET); } unsigned MP3StreamState::findNextHeader(struct timeval& presentationTime) { presentationTime = fNextFramePresentationTime; if (!findNextFrame()) return 0; // From this frame, figure out the *next* frame's presentation time: struct timeval framePlayTime = currentFramePlayTime(); if (fPresentationTimeScale > 1) { // Scale this value unsigned secondsRem = framePlayTime.tv_sec % fPresentationTimeScale; framePlayTime.tv_sec -= secondsRem; framePlayTime.tv_usec += secondsRem*MILLION; framePlayTime.tv_sec /= fPresentationTimeScale; framePlayTime.tv_usec /= fPresentationTimeScale; } fNextFramePresentationTime.tv_usec += framePlayTime.tv_usec; fNextFramePresentationTime.tv_sec += framePlayTime.tv_sec + fNextFramePresentationTime.tv_usec/MILLION; fNextFramePresentationTime.tv_usec %= MILLION; return fr().hdr; } Boolean MP3StreamState::readFrame(unsigned char* outBuf, unsigned outBufSize, unsigned& resultFrameSize, unsigned& resultDurationInMicroseconds) { /* We assume that "mp3FindNextHeader()" has already been called */ resultFrameSize = 4 + fr().frameSize; if (outBufSize < resultFrameSize) { #ifdef DEBUG_ERRORS fprintf(stderr, "Insufficient buffer size for reading input frame (%d, need %d)\n", outBufSize, resultFrameSize); #endif if (outBufSize < 4) outBufSize = 0; resultFrameSize = outBufSize; return False; } if (resultFrameSize >= 4) { unsigned& hdr = fr().hdr; *outBuf++ = (unsigned char)(hdr>>24); *outBuf++ = (unsigned char)(hdr>>16); *outBuf++ = (unsigned char)(hdr>>8); *outBuf++ = (unsigned char)(hdr); memmove(outBuf, fr().frameBytes, resultFrameSize-4); } struct timeval const pt = currentFramePlayTime(); resultDurationInMicroseconds = pt.tv_sec*MILLION + pt.tv_usec; return True; } void MP3StreamState::getAttributes(char* buffer, unsigned bufferSize) const { char const* formatStr = "bandwidth %d MPEGnumber %d MPEGlayer %d samplingFrequency %d isStereo %d playTime %d isVBR %d"; unsigned fpt = (unsigned)(filePlayTime() + 0.5); // rounds to nearest integer #if defined(IRIX) || defined(ALPHA) || defined(_QNX4) || defined(IMN_PIM) || defined(CRIS) /* snprintf() isn't defined, so just use sprintf() - ugh! */ sprintf(buffer, formatStr, fr().bitrate, fr().isMPEG2 ? 2 : 1, fr().layer, fr().samplingFreq, fr().isStereo, fpt, fIsVBR); #else snprintf(buffer, bufferSize, formatStr, fr().bitrate, fr().isMPEG2 ? 2 : 1, fr().layer, fr().samplingFreq, fr().isStereo, fpt, fIsVBR); #endif } // This is crufty old code that needs to be cleaned up ##### #define HDRCMPMASK 0xfffffd00 Boolean MP3StreamState::findNextFrame() { unsigned char hbuf[8]; unsigned l; int i; int attempt = 0; read_again: if (readFromStream(hbuf, 4) != 4) return False; fr().hdr = ((unsigned long) hbuf[0] << 24) | ((unsigned long) hbuf[1] << 16) | ((unsigned long) hbuf[2] << 8) | (unsigned long) hbuf[3]; #ifdef DEBUG_PARSE fprintf(stderr, "fr().hdr: 0x%08x\n", fr().hdr); #endif if (fr().oldHdr != fr().hdr || !fr().oldHdr) { i = 0; init_resync: #ifdef DEBUG_PARSE fprintf(stderr, "init_resync: fr().hdr: 0x%08x\n", fr().hdr); #endif if ( (fr().hdr & 0xffe00000) != 0xffe00000 || (fr().hdr & 0x00060000) == 0 // undefined 'layer' field || (fr().hdr & 0x0000F000) == 0 // 'free format' bitrate index || (fr().hdr & 0x0000F000) == 0x0000F000 // undefined bitrate index || (fr().hdr & 0x00000C00) == 0x00000C00 // undefined frequency index || (fr().hdr & 0x00000003) != 0x00000000 // 'emphasis' field unexpectedly set ) { /* RSF: Do the following test even if we're not at the start of the file, in case we have two or more separate MP3 files cat'ed together: */ /* Check for RIFF hdr */ if (fr().hdr == ('R'<<24)+('I'<<16)+('F'<<8)+'F') { unsigned char buf[70 /*was: 40*/]; #ifdef DEBUG_ERRORS fprintf(stderr,"Skipped RIFF header\n"); #endif readFromStream(buf, 66); /* already read 4 */ goto read_again; } /* Check for ID3 hdr */ if ((fr().hdr&0xFFFFFF00) == ('I'<<24)+('D'<<16)+('3'<<8)) { unsigned tagSize, bytesToSkip; unsigned char buf[1000]; readFromStream(buf, 6); /* already read 4 */ tagSize = ((buf[2]&0x7F)<<21) + ((buf[3]&0x7F)<<14) + ((buf[4]&0x7F)<<7) + (buf[5]&0x7F); bytesToSkip = tagSize; while (bytesToSkip > 0) { unsigned bytesToRead = sizeof buf; if (bytesToRead > bytesToSkip) { bytesToRead = bytesToSkip; } readFromStream(buf, bytesToRead); bytesToSkip -= bytesToRead; } #ifdef DEBUG_ERRORS fprintf(stderr,"Skipped %d-byte ID3 header\n", tagSize); #endif goto read_again; } /* give up after 20,000 bytes */ if (i++ < 20000/*4096*//*1024*/) { memmove (&hbuf[0], &hbuf[1], 3); if (readFromStream(hbuf+3,1) != 1) { return False; } fr().hdr <<= 8; fr().hdr |= hbuf[3]; fr().hdr &= 0xffffffff; #ifdef DEBUG_PARSE fprintf(stderr, "calling init_resync %d\n", i); #endif goto init_resync; } #ifdef DEBUG_ERRORS fprintf(stderr,"Giving up searching valid MPEG header\n"); #endif return False; #ifdef DEBUG_ERRORS fprintf(stderr,"Illegal Audio-MPEG-Header 0x%08lx at offset 0x%lx.\n", fr().hdr,tell_stream(str)-4); #endif /* Read more bytes until we find something that looks reasonably like a valid header. This is not a perfect strategy, but it should get us back on the track within a short time (and hopefully without too much distortion in the audio output). */ do { attempt++; memmove (&hbuf[0], &hbuf[1], 7); if (readFromStream(&hbuf[3],1) != 1) { return False; } /* This is faster than combining fr().hdr from scratch */ fr().hdr = ((fr().hdr << 8) | hbuf[3]) & 0xffffffff; if (!fr().oldHdr) goto init_resync; /* "considered harmful", eh? */ } while ((fr().hdr & HDRCMPMASK) != (fr().oldHdr & HDRCMPMASK) && (fr().hdr & HDRCMPMASK) != (fr().firstHdr & HDRCMPMASK)); #ifdef DEBUG_ERRORS fprintf (stderr, "Skipped %d bytes in input.\n", attempt); #endif } if (!fr().firstHdr) { fr().firstHdr = fr().hdr; } fr().setParamsFromHeader(); fr().setBytePointer(fr().frameBytes, fr().frameSize); fr().oldHdr = fr().hdr; if (fr().isFreeFormat) { #ifdef DEBUG_ERRORS fprintf(stderr,"Free format not supported.\n"); #endif return False; } #ifdef MP3_ONLY if (fr().layer != 3) { #ifdef DEBUG_ERRORS fprintf(stderr, "MPEG layer %d is not supported!\n", fr().layer); #endif return False; } #endif } if ((l = readFromStream(fr().frameBytes, fr().frameSize)) != fr().frameSize) { if (l == 0) return False; memset(fr().frameBytes+1, 0, fr().frameSize-1); } return True; } static Boolean socketIsReadable(int socket) { const unsigned numFds = socket+1; fd_set rd_set; FD_ZERO(&rd_set); FD_SET((unsigned)socket, &rd_set); struct timeval timeout; timeout.tv_sec = timeout.tv_usec = 0; int result = select(numFds, &rd_set, NULL, NULL, &timeout); return result != 0; // not > 0, because windows can return -1 for file sockets } static char watchVariable; static void checkFunc(void* /*clientData*/) { watchVariable = ~0; } static void waitUntilSocketIsReadable(UsageEnvironment& env, int socket) { while (!socketIsReadable(socket)) { // Delay a short period of time before checking again. unsigned usecsToDelay = 1000; // 1 ms env.taskScheduler().scheduleDelayedTask(usecsToDelay, (TaskFunc*)checkFunc, (void*)NULL); watchVariable = 0; env.taskScheduler().doEventLoop(&watchVariable); // This allows other tasks to run while we're waiting: } } unsigned MP3StreamState::readFromStream(unsigned char* buf, unsigned numChars) { // Hack for doing socket I/O instead of file I/O (e.g., on Windows) if (fFidIsReallyASocket) { intptr_t fid_long = (intptr_t)fFid; int sock = (int)fid_long; unsigned totBytesRead = 0; do { waitUntilSocketIsReadable(fEnv, sock); int bytesRead = recv(sock, &((char*)buf)[totBytesRead], numChars-totBytesRead, 0); if (bytesRead < 0) return 0; totBytesRead += (unsigned)bytesRead; } while (totBytesRead < numChars); return totBytesRead; } else { #ifndef _WIN32_WCE waitUntilSocketIsReadable(fEnv, (int)fileno(fFid)); #endif return fread(buf, 1, numChars, fFid); } } #define XING_FRAMES_FLAG 0x0001 #define XING_BYTES_FLAG 0x0002 #define XING_TOC_FLAG 0x0004 #define XING_VBR_SCALE_FLAG 0x0008 void MP3StreamState::checkForXingHeader() { // Look for 'Xing' in the first 4 bytes after the 'side info': if (fr().frameSize < fr().sideInfoSize) return; unsigned bytesAvailable = fr().frameSize - fr().sideInfoSize; unsigned char* p = &(fr().frameBytes[fr().sideInfoSize]); if (bytesAvailable < 8) return; if (p[0] != 'X' || p[1] != 'i' || p[2] != 'n' || p[3] != 'g') return; // We found it. fIsVBR = True; u_int32_t flags = (p[4]<<24) | (p[5]<<16) | (p[6]<<8) | p[7]; unsigned i = 8; bytesAvailable -= 8; if (flags&XING_FRAMES_FLAG) { // The next 4 bytes are the number of frames: if (bytesAvailable < 4) return; fNumFramesInFile = (p[i]<<24)|(p[i+1]<<16)|(p[i+2]<<8)|(p[i+3]); i += 4; bytesAvailable -= 4; } if (flags&XING_BYTES_FLAG) { // The next 4 bytes is the file size: if (bytesAvailable < 4) return; fFileSize = (p[i]<<24)|(p[i+1]<<16)|(p[i+2]<<8)|(p[i+3]); i += 4; bytesAvailable -= 4; } if (flags&XING_TOC_FLAG) { // Fill in the Xing 'table of contents': if (bytesAvailable < XING_TOC_LENGTH) return; fHasXingTOC = True; for (unsigned j = 0; j < XING_TOC_LENGTH; ++j) { fXingTOC[j] = p[i+j]; } i += XING_TOC_FLAG; bytesAvailable -= XING_TOC_FLAG; } } live/liveMedia/H264or5VideoStreamDiscreteFramer.cpp000444 001751 000000 00000010657 12265042432 022407 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A simplified version of "H264or5VideoStreamFramer" that takes only complete, // discrete frames (rather than an arbitrary byte stream) as input. // This avoids the parsing and data copying overhead of the full // "H264or5VideoStreamFramer". // Implementation #include "H264or5VideoStreamDiscreteFramer.hh" H264or5VideoStreamDiscreteFramer ::H264or5VideoStreamDiscreteFramer(int hNumber, UsageEnvironment& env, FramedSource* inputSource) : H264or5VideoStreamFramer(hNumber, env, inputSource, False/*don't create a parser*/, False) { } H264or5VideoStreamDiscreteFramer::~H264or5VideoStreamDiscreteFramer() { } void H264or5VideoStreamDiscreteFramer::doGetNextFrame() { // Arrange to read data (which should be a complete H.264 or H.265 NAL unit) // from our data source, directly into the client's input buffer. // After reading this, we'll do some parsing on the frame. fInputSource->getNextFrame(fTo, fMaxSize, afterGettingFrame, this, FramedSource::handleClosure, this); } void H264or5VideoStreamDiscreteFramer ::afterGettingFrame(void* clientData, unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime, unsigned durationInMicroseconds) { H264or5VideoStreamDiscreteFramer* source = (H264or5VideoStreamDiscreteFramer*)clientData; source->afterGettingFrame1(frameSize, numTruncatedBytes, presentationTime, durationInMicroseconds); } void H264or5VideoStreamDiscreteFramer ::afterGettingFrame1(unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime, unsigned durationInMicroseconds) { // Get the "nal_unit_type", to see if this NAL unit is one that we want to save a copy of: u_int8_t nal_unit_type; if (fHNumber == 264 && frameSize >= 1) { nal_unit_type = fTo[0]&0x1F; } else if (fHNumber == 265 && frameSize >= 2) { nal_unit_type = (fTo[0]&0x7E)>>1; } else { // This is too short to be a valid NAL unit, so just assume a bogus nal_unit_type nal_unit_type = 0xFF; } // Begin by checking for a (likely) common error: NAL units that (erroneously) begin with a // 0x00000001 or 0x000001 'start code'. (Those start codes should only be in byte-stream data; // *not* data that consists of discrete NAL units.) // Once again, to be clear: The NAL units that you feed to a "H264or5VideoStreamDiscreteFramer" // MUST NOT include start codes. if (frameSize >= 4 && fTo[0] == 0 && fTo[1] == 0 && ((fTo[2] == 0 && fTo[3] == 1) || fTo[2] == 1)) { envir() << "H264or5VideoStreamDiscreteFramer error: MPEG 'start code' seen in the input\n"; } else if (isVPS(nal_unit_type)) { // Video parameter set (VPS) saveCopyOfVPS(fTo, frameSize); } else if (isSPS(nal_unit_type)) { // Sequence parameter set (SPS) saveCopyOfSPS(fTo, frameSize); } else if (isPPS(nal_unit_type)) { // Picture parameter set (PPS) saveCopyOfPPS(fTo, frameSize); } // Next, check whether this NAL unit ends the current 'access unit' (basically, a video frame). // Unfortunately, we can't do this reliably, because we don't yet know anything about the // *next* NAL unit that we'll see. So, we guess this as best as we can, by assuming that // if this NAL unit is a VCL NAL unit, then it ends the current 'access unit'. if (isVCL(nal_unit_type)) fPictureEndMarker = True; // Finally, complete delivery to the client: fFrameSize = frameSize; fNumTruncatedBytes = numTruncatedBytes; fPresentationTime = presentationTime; fDurationInMicroseconds = durationInMicroseconds; afterGetting(this); } live/liveMedia/H265VideoFileServerMediaSubsession.cpp000444 001751 000000 00000011122 12265042432 022757 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A 'ServerMediaSubsession' object that creates new, unicast, "RTPSink"s // on demand, from a H265 video file. // Implementation #include "H265VideoFileServerMediaSubsession.hh" #include "H265VideoRTPSink.hh" #include "ByteStreamFileSource.hh" #include "H265VideoStreamFramer.hh" H265VideoFileServerMediaSubsession* H265VideoFileServerMediaSubsession::createNew(UsageEnvironment& env, char const* fileName, Boolean reuseFirstSource) { return new H265VideoFileServerMediaSubsession(env, fileName, reuseFirstSource); } H265VideoFileServerMediaSubsession::H265VideoFileServerMediaSubsession(UsageEnvironment& env, char const* fileName, Boolean reuseFirstSource) : FileServerMediaSubsession(env, fileName, reuseFirstSource), fAuxSDPLine(NULL), fDoneFlag(0), fDummyRTPSink(NULL) { } H265VideoFileServerMediaSubsession::~H265VideoFileServerMediaSubsession() { delete[] fAuxSDPLine; } static void afterPlayingDummy(void* clientData) { H265VideoFileServerMediaSubsession* subsess = (H265VideoFileServerMediaSubsession*)clientData; subsess->afterPlayingDummy1(); } void H265VideoFileServerMediaSubsession::afterPlayingDummy1() { // Unschedule any pending 'checking' task: envir().taskScheduler().unscheduleDelayedTask(nextTask()); // Signal the event loop that we're done: setDoneFlag(); } static void checkForAuxSDPLine(void* clientData) { H265VideoFileServerMediaSubsession* subsess = (H265VideoFileServerMediaSubsession*)clientData; subsess->checkForAuxSDPLine1(); } void H265VideoFileServerMediaSubsession::checkForAuxSDPLine1() { char const* dasl; if (fAuxSDPLine != NULL) { // Signal the event loop that we're done: setDoneFlag(); } else if (fDummyRTPSink != NULL && (dasl = fDummyRTPSink->auxSDPLine()) != NULL) { fAuxSDPLine = strDup(dasl); fDummyRTPSink = NULL; // Signal the event loop that we're done: setDoneFlag(); } else { // try again after a brief delay: int uSecsToDelay = 100000; // 100 ms nextTask() = envir().taskScheduler().scheduleDelayedTask(uSecsToDelay, (TaskFunc*)checkForAuxSDPLine, this); } } char const* H265VideoFileServerMediaSubsession::getAuxSDPLine(RTPSink* rtpSink, FramedSource* inputSource) { if (fAuxSDPLine != NULL) return fAuxSDPLine; // it's already been set up (for a previous client) if (fDummyRTPSink == NULL) { // we're not already setting it up for another, concurrent stream // Note: For H265 video files, the 'config' information (used for several payload-format // specific parameters in the SDP description) isn't known until we start reading the file. // This means that "rtpSink"s "auxSDPLine()" will be NULL initially, // and we need to start reading data from our file until this changes. fDummyRTPSink = rtpSink; // Start reading the file: fDummyRTPSink->startPlaying(*inputSource, afterPlayingDummy, this); // Check whether the sink's 'auxSDPLine()' is ready: checkForAuxSDPLine(this); } envir().taskScheduler().doEventLoop(&fDoneFlag); return fAuxSDPLine; } FramedSource* H265VideoFileServerMediaSubsession::createNewStreamSource(unsigned /*clientSessionId*/, unsigned& estBitrate) { estBitrate = 500; // kbps, estimate // Create the video source: ByteStreamFileSource* fileSource = ByteStreamFileSource::createNew(envir(), fFileName); if (fileSource == NULL) return NULL; fFileSize = fileSource->fileSize(); // Create a framer for the Video Elementary Stream: return H265VideoStreamFramer::createNew(envir(), fileSource); } RTPSink* H265VideoFileServerMediaSubsession ::createNewRTPSink(Groupsock* rtpGroupsock, unsigned char rtpPayloadTypeIfDynamic, FramedSource* /*inputSource*/) { return H265VideoRTPSink::createNew(envir(), rtpGroupsock, rtpPayloadTypeIfDynamic); } live/liveMedia/DeviceSource.cpp000444 001751 000000 00000015701 12265042432 016726 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A template for a MediaSource encapsulating an audio/video input device // // NOTE: Sections of this code labeled "%%% TO BE WRITTEN %%%" are incomplete, and need to be written by the programmer // (depending on the features of the particular device). // Implementation #include "DeviceSource.hh" #include // for "gettimeofday()" DeviceSource* DeviceSource::createNew(UsageEnvironment& env, DeviceParameters params) { return new DeviceSource(env, params); } EventTriggerId DeviceSource::eventTriggerId = 0; unsigned DeviceSource::referenceCount = 0; DeviceSource::DeviceSource(UsageEnvironment& env, DeviceParameters params) : FramedSource(env), fParams(params) { if (referenceCount == 0) { // Any global initialization of the device would be done here: //%%% TO BE WRITTEN %%% } ++referenceCount; // Any instance-specific initialization of the device would be done here: //%%% TO BE WRITTEN %%% // We arrange here for our "deliverFrame" member function to be called // whenever the next frame of data becomes available from the device. // // If the device can be accessed as a readable socket, then one easy way to do this is using a call to // envir().taskScheduler().turnOnBackgroundReadHandling( ... ) // (See examples of this call in the "liveMedia" directory.) // // If, however, the device *cannot* be accessed as a readable socket, then instead we can implement it using 'event triggers': // Create an 'event trigger' for this device (if it hasn't already been done): if (eventTriggerId == 0) { eventTriggerId = envir().taskScheduler().createEventTrigger(deliverFrame0); } } DeviceSource::~DeviceSource() { // Any instance-specific 'destruction' (i.e., resetting) of the device would be done here: //%%% TO BE WRITTEN %%% --referenceCount; if (referenceCount == 0) { // Any global 'destruction' (i.e., resetting) of the device would be done here: //%%% TO BE WRITTEN %%% // Reclaim our 'event trigger' envir().taskScheduler().deleteEventTrigger(eventTriggerId); eventTriggerId = 0; } } void DeviceSource::doGetNextFrame() { // This function is called (by our 'downstream' object) when it asks for new data. // Note: If, for some reason, the source device stops being readable (e.g., it gets closed), then you do the following: if (0 /* the source stops being readable */ /*%%% TO BE WRITTEN %%%*/) { handleClosure(this); return; } // If a new frame of data is immediately available to be delivered, then do this now: if (0 /* a new frame of data is immediately available to be delivered*/ /*%%% TO BE WRITTEN %%%*/) { deliverFrame(); } // No new data is immediately available to be delivered. We don't do anything more here. // Instead, our event trigger must be called (e.g., from a separate thread) when new data becomes available. } void DeviceSource::deliverFrame0(void* clientData) { ((DeviceSource*)clientData)->deliverFrame(); } void DeviceSource::deliverFrame() { // This function is called when new frame data is available from the device. // We deliver this data by copying it to the 'downstream' object, using the following parameters (class members): // 'in' parameters (these should *not* be modified by this function): // fTo: The frame data is copied to this address. // (Note that the variable "fTo" is *not* modified. Instead, // the frame data is copied to the address pointed to by "fTo".) // fMaxSize: This is the maximum number of bytes that can be copied // (If the actual frame is larger than this, then it should // be truncated, and "fNumTruncatedBytes" set accordingly.) // 'out' parameters (these are modified by this function): // fFrameSize: Should be set to the delivered frame size (<= fMaxSize). // fNumTruncatedBytes: Should be set iff the delivered frame would have been // bigger than "fMaxSize", in which case it's set to the number of bytes // that have been omitted. // fPresentationTime: Should be set to the frame's presentation time // (seconds, microseconds). This time must be aligned with 'wall-clock time' - i.e., the time that you would get // by calling "gettimeofday()". // fDurationInMicroseconds: Should be set to the frame's duration, if known. // If, however, the device is a 'live source' (e.g., encoded from a camera or microphone), then we probably don't need // to set this variable, because - in this case - data will never arrive 'early'. // Note the code below. if (!isCurrentlyAwaitingData()) return; // we're not ready for the data yet u_int8_t* newFrameDataStart = (u_int8_t*)0xDEADBEEF; //%%% TO BE WRITTEN %%% unsigned newFrameSize = 0; //%%% TO BE WRITTEN %%% // Deliver the data here: if (newFrameSize > fMaxSize) { fFrameSize = fMaxSize; fNumTruncatedBytes = newFrameSize - fMaxSize; } else { fFrameSize = newFrameSize; } gettimeofday(&fPresentationTime, NULL); // If you have a more accurate time - e.g., from an encoder - then use that instead. // If the device is *not* a 'live source' (e.g., it comes instead from a file or buffer), then set "fDurationInMicroseconds" here. memmove(fTo, newFrameDataStart, fFrameSize); // After delivering the data, inform the reader that it is now available: FramedSource::afterGetting(this); } // The following code would be called to signal that a new frame of data has become available. // This (unlike other "LIVE555 Streaming Media" library code) may be called from a separate thread. // (Note, however, that "triggerEvent()" cannot be called with the same 'event trigger id' from different threads. // Also, if you want to have multiple device threads, each one using a different 'event trigger id', then you will need // to make "eventTriggerId" a non-static member variable of "DeviceSource".) void signalNewFrameData() { TaskScheduler* ourScheduler = NULL; //%%% TO BE WRITTEN %%% DeviceSource* ourDevice = NULL; //%%% TO BE WRITTEN %%% if (ourScheduler != NULL) { // sanity check ourScheduler->triggerEvent(DeviceSource::eventTriggerId, ourDevice); } } live/liveMedia/MediaSource.cpp000444 001751 000000 00000005052 12265042432 016544 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // Media Sources // Implementation #include "MediaSource.hh" ////////// MediaSource ////////// MediaSource::MediaSource(UsageEnvironment& env) : Medium(env) { } MediaSource::~MediaSource() { } Boolean MediaSource::isSource() const { return True; } char const* MediaSource::MIMEtype() const { return "application/OCTET-STREAM"; // default type } Boolean MediaSource::isFramedSource() const { return False; // default implementation } Boolean MediaSource::isRTPSource() const { return False; // default implementation } Boolean MediaSource::isMPEG1or2VideoStreamFramer() const { return False; // default implementation } Boolean MediaSource::isMPEG4VideoStreamFramer() const { return False; // default implementation } Boolean MediaSource::isH264VideoStreamFramer() const { return False; // default implementation } Boolean MediaSource::isH265VideoStreamFramer() const { return False; // default implementation } Boolean MediaSource::isDVVideoStreamFramer() const { return False; // default implementation } Boolean MediaSource::isJPEGVideoSource() const { return False; // default implementation } Boolean MediaSource::isAMRAudioSource() const { return False; // default implementation } Boolean MediaSource::lookupByName(UsageEnvironment& env, char const* sourceName, MediaSource*& resultSource) { resultSource = NULL; // unless we succeed Medium* medium; if (!Medium::lookupByName(env, sourceName, medium)) return False; if (!medium->isSource()) { env.setResultMsg(sourceName, " is not a media source"); return False; } resultSource = (MediaSource*)medium; return True; } void MediaSource::getAttributes() const { // Default implementation envir().setResultMsg(""); } live/liveMedia/COPYING000755 001751 000000 00000000000 12265042432 016132 2../COPYINGustar00rsfwheel000000 000000 live/liveMedia/MP3FileSource.cpp000444 001751 000000 00000012727 12265042432 016733 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // MP3 File Sources // Implementation #include "MP3FileSource.hh" #include "MP3StreamState.hh" #include "InputFile.hh" ////////// MP3FileSource ////////// MP3FileSource::MP3FileSource(UsageEnvironment& env, FILE* fid) : FramedFileSource(env, fid), fStreamState(new MP3StreamState(env)) { } MP3FileSource::~MP3FileSource() { delete fStreamState; } char const* MP3FileSource::MIMEtype() const { return "audio/MPEG"; } MP3FileSource* MP3FileSource::createNew(UsageEnvironment& env, char const* fileName) { MP3FileSource* newSource = NULL; do { FILE* fid; fid = OpenInputFile(env, fileName); if (fid == NULL) break; newSource = new MP3FileSource(env, fid); if (newSource == NULL) break; unsigned fileSize = (unsigned)GetFileSize(fileName, fid); newSource->assignStream(fid, fileSize); if (!newSource->initializeStream()) break; return newSource; } while (0); Medium::close(newSource); return NULL; } float MP3FileSource::filePlayTime() const { return fStreamState->filePlayTime(); } unsigned MP3FileSource::fileSize() const { return fStreamState->fileSize(); } void MP3FileSource::setPresentationTimeScale(unsigned scale) { fStreamState->setPresentationTimeScale(scale); } void MP3FileSource::seekWithinFile(double seekNPT, double streamDuration) { float fileDuration = filePlayTime(); // First, make sure that 0.0 <= seekNPT <= seekNPT + streamDuration <= fileDuration if (seekNPT < 0.0) { seekNPT = 0.0; } else if (seekNPT > fileDuration) { seekNPT = fileDuration; } if (streamDuration < 0.0) { streamDuration = 0.0; } else if (seekNPT + streamDuration > fileDuration) { streamDuration = fileDuration - seekNPT; } float seekFraction = (float)seekNPT/fileDuration; unsigned seekByteNumber = fStreamState->getByteNumberFromPositionFraction(seekFraction); fStreamState->seekWithinFile(seekByteNumber); fLimitNumBytesToStream = False; // by default if (streamDuration > 0.0) { float endFraction = (float)(seekNPT + streamDuration)/fileDuration; unsigned endByteNumber = fStreamState->getByteNumberFromPositionFraction(endFraction); if (endByteNumber > seekByteNumber) { // sanity check fNumBytesToStream = endByteNumber - seekByteNumber; fLimitNumBytesToStream = True; } } else { } } void MP3FileSource::getAttributes() const { char buffer[200]; fStreamState->getAttributes(buffer, sizeof buffer); envir().setResultMsg(buffer); } void MP3FileSource::doGetNextFrame() { if (!doGetNextFrame1()) { handleClosure(this); return; } // Switch to another task: #if defined(__WIN32__) || defined(_WIN32) // HACK: liveCaster/lc uses an implementation of scheduleDelayedTask() // that performs very badly (chewing up lots of CPU time, apparently polling) // on Windows. Until this is fixed, we just call our "afterGetting()" // function directly. This avoids infinite recursion, as long as our sink // is discontinuous, which is the case for the RTP sink that liveCaster/lc // uses. ##### afterGetting(this); #else nextTask() = envir().taskScheduler().scheduleDelayedTask(0, (TaskFunc*)afterGetting, this); #endif } Boolean MP3FileSource::doGetNextFrame1() { if (fLimitNumBytesToStream && fNumBytesToStream == 0) return False; // we've already streamed as much as we were asked for if (!fHaveJustInitialized) { if (fStreamState->findNextHeader(fPresentationTime) == 0) return False; } else { fPresentationTime = fFirstFramePresentationTime; fHaveJustInitialized = False; } if (!fStreamState->readFrame(fTo, fMaxSize, fFrameSize, fDurationInMicroseconds)) { char tmp[200]; sprintf(tmp, "Insufficient buffer size %d for reading MPEG audio frame (needed %d)\n", fMaxSize, fFrameSize); envir().setResultMsg(tmp); fFrameSize = fMaxSize; return False; } if (fNumBytesToStream > fFrameSize) fNumBytesToStream -= fFrameSize; else fNumBytesToStream = 0; return True; } void MP3FileSource::assignStream(FILE* fid, unsigned fileSize) { fStreamState->assignStream(fid, fileSize); } Boolean MP3FileSource::initializeStream() { // Make sure the file has an appropriate header near the start: if (fStreamState->findNextHeader(fFirstFramePresentationTime) == 0) { envir().setResultMsg("not an MPEG audio file"); return False; } fStreamState->checkForXingHeader(); // in case this is a VBR file fHaveJustInitialized = True; fLimitNumBytesToStream = False; fNumBytesToStream = 0; // Hack: It's possible that our environment's 'result message' has been // reset within this function, so set it again to our name now: envir().setResultMsg(name()); return True; } live/liveMedia/FileSink.cpp000444 001751 000000 00000012417 12265042432 016053 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // File sinks // Implementation #if (defined(__WIN32__) || defined(_WIN32)) && !defined(_WIN32_WCE) #include #include #endif #include "FileSink.hh" #include "GroupsockHelper.hh" #include "OutputFile.hh" ////////// FileSink ////////// FileSink::FileSink(UsageEnvironment& env, FILE* fid, unsigned bufferSize, char const* perFrameFileNamePrefix) : MediaSink(env), fOutFid(fid), fBufferSize(bufferSize), fSamePresentationTimeCounter(0) { fBuffer = new unsigned char[bufferSize]; if (perFrameFileNamePrefix != NULL) { fPerFrameFileNamePrefix = strDup(perFrameFileNamePrefix); fPerFrameFileNameBuffer = new char[strlen(perFrameFileNamePrefix) + 100]; } else { fPerFrameFileNamePrefix = NULL; fPerFrameFileNameBuffer = NULL; } fPrevPresentationTime.tv_sec = ~0; fPrevPresentationTime.tv_usec = 0; } FileSink::~FileSink() { delete[] fPerFrameFileNameBuffer; delete[] fPerFrameFileNamePrefix; delete[] fBuffer; if (fOutFid != NULL) fclose(fOutFid); } FileSink* FileSink::createNew(UsageEnvironment& env, char const* fileName, unsigned bufferSize, Boolean oneFilePerFrame) { do { FILE* fid; char const* perFrameFileNamePrefix; if (oneFilePerFrame) { // Create the fid for each frame fid = NULL; perFrameFileNamePrefix = fileName; } else { // Normal case: create the fid once fid = OpenOutputFile(env, fileName); if (fid == NULL) break; perFrameFileNamePrefix = NULL; } return new FileSink(env, fid, bufferSize, perFrameFileNamePrefix); } while (0); return NULL; } Boolean FileSink::continuePlaying() { if (fSource == NULL) return False; fSource->getNextFrame(fBuffer, fBufferSize, afterGettingFrame, this, onSourceClosure, this); return True; } void FileSink::afterGettingFrame(void* clientData, unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime, unsigned /*durationInMicroseconds*/) { FileSink* sink = (FileSink*)clientData; sink->afterGettingFrame(frameSize, numTruncatedBytes, presentationTime); } void FileSink::addData(unsigned char const* data, unsigned dataSize, struct timeval presentationTime) { if (fPerFrameFileNameBuffer != NULL && fOutFid == NULL) { // Special case: Open a new file on-the-fly for this frame if (presentationTime.tv_usec == fPrevPresentationTime.tv_usec && presentationTime.tv_sec == fPrevPresentationTime.tv_sec) { // The presentation time is unchanged from the previous frame, so we add a 'counter' // suffix to the file name, to distinguish them: sprintf(fPerFrameFileNameBuffer, "%s-%lu.%06lu-%u", fPerFrameFileNamePrefix, presentationTime.tv_sec, presentationTime.tv_usec, ++fSamePresentationTimeCounter); } else { sprintf(fPerFrameFileNameBuffer, "%s-%lu.%06lu", fPerFrameFileNamePrefix, presentationTime.tv_sec, presentationTime.tv_usec); fPrevPresentationTime = presentationTime; // for next time fSamePresentationTimeCounter = 0; // for next time } fOutFid = OpenOutputFile(envir(), fPerFrameFileNameBuffer); } // Write to our file: #ifdef TEST_LOSS static unsigned const framesPerPacket = 10; static unsigned const frameCount = 0; static Boolean const packetIsLost; if ((frameCount++)%framesPerPacket == 0) { packetIsLost = (our_random()%10 == 0); // simulate 10% packet loss ##### } if (!packetIsLost) #endif if (fOutFid != NULL && data != NULL) { fwrite(data, 1, dataSize, fOutFid); } } void FileSink::afterGettingFrame(unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime) { if (numTruncatedBytes > 0) { envir() << "FileSink::afterGettingFrame(): The input frame data was too large for our buffer size (" << fBufferSize << "). " << numTruncatedBytes << " bytes of trailing data was dropped! Correct this by increasing the \"bufferSize\" parameter in the \"createNew()\" call to at least " << fBufferSize + numTruncatedBytes << "\n"; } addData(fBuffer, frameSize, presentationTime); if (fOutFid == NULL || fflush(fOutFid) == EOF) { // The output file has closed. Handle this the same way as if the input source had closed: if (fSource != NULL) fSource->stopGettingFrames(); onSourceClosure(); return; } if (fPerFrameFileNameBuffer != NULL) { if (fOutFid != NULL) { fclose(fOutFid); fOutFid = NULL; } } // Then try getting the next frame: continuePlaying(); } live/liveMedia/RTPInterface.cpp000444 001751 000000 00000054740 12265042432 016642 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // An abstraction of a network interface used for RTP (or RTCP). // (This allows the RTP-over-TCP hack (RFC 2326, section 10.12) to // be implemented transparently.) // Implementation #include "RTPInterface.hh" #include #include ////////// Helper Functions - Definition ////////// // Helper routines and data structures, used to implement // sending/receiving RTP/RTCP over a TCP socket: // Reading RTP-over-TCP is implemented using two levels of hash tables. // The top-level hash table maps TCP socket numbers to a // "SocketDescriptor" that contains a hash table for each of the // sub-channels that are reading from this socket. static HashTable* socketHashTable(UsageEnvironment& env, Boolean createIfNotPresent = True) { _Tables* ourTables = _Tables::getOurTables(env, createIfNotPresent); if (ourTables == NULL) return NULL; if (ourTables->socketTable == NULL) { // Create a new socket number -> SocketDescriptor mapping table: ourTables->socketTable = HashTable::create(ONE_WORD_HASH_KEYS); } return (HashTable*)(ourTables->socketTable); } class SocketDescriptor { public: SocketDescriptor(UsageEnvironment& env, int socketNum); virtual ~SocketDescriptor(); void registerRTPInterface(unsigned char streamChannelId, RTPInterface* rtpInterface); RTPInterface* lookupRTPInterface(unsigned char streamChannelId); void deregisterRTPInterface(unsigned char streamChannelId); void setServerRequestAlternativeByteHandler(ServerRequestAlternativeByteHandler* handler, void* clientData) { fServerRequestAlternativeByteHandler = handler; fServerRequestAlternativeByteHandlerClientData = clientData; } private: static void tcpReadHandler(SocketDescriptor*, int mask); Boolean tcpReadHandler1(int mask); private: UsageEnvironment& fEnv; int fOurSocketNum; HashTable* fSubChannelHashTable; ServerRequestAlternativeByteHandler* fServerRequestAlternativeByteHandler; void* fServerRequestAlternativeByteHandlerClientData; u_int8_t fStreamChannelId, fSizeByte1; Boolean fReadErrorOccurred, fDeleteMyselfNext, fAreInReadHandlerLoop; enum { AWAITING_DOLLAR, AWAITING_STREAM_CHANNEL_ID, AWAITING_SIZE1, AWAITING_SIZE2, AWAITING_PACKET_DATA } fTCPReadingState; }; static SocketDescriptor* lookupSocketDescriptor(UsageEnvironment& env, int sockNum, Boolean createIfNotFound = True) { HashTable* table = socketHashTable(env, createIfNotFound); if (table == NULL) return NULL; char const* key = (char const*)(long)sockNum; SocketDescriptor* socketDescriptor = (SocketDescriptor*)(table->Lookup(key)); if (socketDescriptor == NULL) { if (createIfNotFound) { socketDescriptor = new SocketDescriptor(env, sockNum); table->Add((char const*)(long)(sockNum), socketDescriptor); } else if (table->IsEmpty()) { // We can also delete the table (to reclaim space): _Tables* ourTables = _Tables::getOurTables(env); delete table; ourTables->socketTable = NULL; ourTables->reclaimIfPossible(); } } return socketDescriptor; } static void removeSocketDescription(UsageEnvironment& env, int sockNum) { char const* key = (char const*)(long)sockNum; HashTable* table = socketHashTable(env); table->Remove(key); if (table->IsEmpty()) { // We can also delete the table (to reclaim space): _Tables* ourTables = _Tables::getOurTables(env); delete table; ourTables->socketTable = NULL; ourTables->reclaimIfPossible(); } } ////////// RTPInterface - Implementation ////////// RTPInterface::RTPInterface(Medium* owner, Groupsock* gs) : fOwner(owner), fGS(gs), fTCPStreams(NULL), fNextTCPReadSize(0), fNextTCPReadStreamSocketNum(-1), fNextTCPReadStreamChannelId(0xFF), fReadHandlerProc(NULL), fAuxReadHandlerFunc(NULL), fAuxReadHandlerClientData(NULL) { // Make the socket non-blocking, even though it will be read from only asynchronously, when packets arrive. // The reason for this is that, in some OSs, reads on a blocking socket can (allegedly) sometimes block, // even if the socket was previously reported (e.g., by "select()") as having data available. // (This can supposedly happen if the UDP checksum fails, for example.) makeSocketNonBlocking(fGS->socketNum()); increaseSendBufferTo(envir(), fGS->socketNum(), 50*1024); } RTPInterface::~RTPInterface() { stopNetworkReading(); delete fTCPStreams; } void RTPInterface::setStreamSocket(int sockNum, unsigned char streamChannelId) { fGS->removeAllDestinations(); envir().taskScheduler().disableBackgroundHandling(fGS->socketNum()); // turn off any reading on our datagram socket fGS->reset(); // and close our datagram socket, because we won't be using it anymore addStreamSocket(sockNum, streamChannelId); } void RTPInterface::addStreamSocket(int sockNum, unsigned char streamChannelId) { if (sockNum < 0) return; for (tcpStreamRecord* streams = fTCPStreams; streams != NULL; streams = streams->fNext) { if (streams->fStreamSocketNum == sockNum && streams->fStreamChannelId == streamChannelId) { return; // we already have it } } fTCPStreams = new tcpStreamRecord(sockNum, streamChannelId, fTCPStreams); // Also, make sure this new socket is set up for receiving RTP/RTCP-over-TCP: SocketDescriptor* socketDescriptor = lookupSocketDescriptor(envir(), sockNum); socketDescriptor->registerRTPInterface(streamChannelId, this); } static void deregisterSocket(UsageEnvironment& env, int sockNum, unsigned char streamChannelId) { SocketDescriptor* socketDescriptor = lookupSocketDescriptor(env, sockNum, False); if (socketDescriptor != NULL) { socketDescriptor->deregisterRTPInterface(streamChannelId); // Note: This may delete "socketDescriptor", // if no more interfaces are using this socket } } void RTPInterface::removeStreamSocket(int sockNum, unsigned char streamChannelId) { for (tcpStreamRecord** streamsPtr = &fTCPStreams; *streamsPtr != NULL; streamsPtr = &((*streamsPtr)->fNext)) { if ((*streamsPtr)->fStreamSocketNum == sockNum && (*streamsPtr)->fStreamChannelId == streamChannelId) { deregisterSocket(envir(), sockNum, streamChannelId); // Then remove the record pointed to by *streamsPtr : tcpStreamRecord* next = (*streamsPtr)->fNext; (*streamsPtr)->fNext = NULL; delete (*streamsPtr); *streamsPtr = next; return; } } } void RTPInterface::setServerRequestAlternativeByteHandler(UsageEnvironment& env, int socketNum, ServerRequestAlternativeByteHandler* handler, void* clientData) { SocketDescriptor* socketDescriptor = lookupSocketDescriptor(env, socketNum, False); if (socketDescriptor != NULL) socketDescriptor->setServerRequestAlternativeByteHandler(handler, clientData); } void RTPInterface::clearServerRequestAlternativeByteHandler(UsageEnvironment& env, int socketNum) { setServerRequestAlternativeByteHandler(env, socketNum, NULL, NULL); } Boolean RTPInterface::sendPacket(unsigned char* packet, unsigned packetSize) { Boolean success = True; // we'll return False instead if any of the sends fail // Normal case: Send as a UDP packet: if (!fGS->output(envir(), fGS->ttl(), packet, packetSize)) success = False; // Also, send over each of our TCP sockets: for (tcpStreamRecord* streams = fTCPStreams; streams != NULL; streams = streams->fNext) { if (!sendRTPorRTCPPacketOverTCP(packet, packetSize, streams->fStreamSocketNum, streams->fStreamChannelId)) { success = False; } } return success; } void RTPInterface ::startNetworkReading(TaskScheduler::BackgroundHandlerProc* handlerProc) { // Normal case: Arrange to read UDP packets: envir().taskScheduler(). turnOnBackgroundReadHandling(fGS->socketNum(), handlerProc, fOwner); // Also, receive RTP over TCP, on each of our TCP connections: fReadHandlerProc = handlerProc; for (tcpStreamRecord* streams = fTCPStreams; streams != NULL; streams = streams->fNext) { // Get a socket descriptor for "streams->fStreamSocketNum": SocketDescriptor* socketDescriptor = lookupSocketDescriptor(envir(), streams->fStreamSocketNum); // Tell it about our subChannel: socketDescriptor->registerRTPInterface(streams->fStreamChannelId, this); } } Boolean RTPInterface::handleRead(unsigned char* buffer, unsigned bufferMaxSize, unsigned& bytesRead, struct sockaddr_in& fromAddress, Boolean& packetReadWasIncomplete) { packetReadWasIncomplete = False; // by default Boolean readSuccess; if (fNextTCPReadStreamSocketNum < 0) { // Normal case: read from the (datagram) 'groupsock': readSuccess = fGS->handleRead(buffer, bufferMaxSize, bytesRead, fromAddress); } else { // Read from the TCP connection: bytesRead = 0; unsigned totBytesToRead = fNextTCPReadSize; if (totBytesToRead > bufferMaxSize) totBytesToRead = bufferMaxSize; unsigned curBytesToRead = totBytesToRead; int curBytesRead; while ((curBytesRead = readSocket(envir(), fNextTCPReadStreamSocketNum, &buffer[bytesRead], curBytesToRead, fromAddress)) > 0) { bytesRead += curBytesRead; if (bytesRead >= totBytesToRead) break; curBytesToRead -= curBytesRead; } fNextTCPReadSize -= bytesRead; if (fNextTCPReadSize == 0) { // We've read all of the data that we asked for readSuccess = True; } else if (curBytesRead < 0) { // There was an error reading the socket bytesRead = 0; readSuccess = False; } else { // We need to read more bytes, and there was not an error reading the socket packetReadWasIncomplete = True; return True; } fNextTCPReadStreamSocketNum = -1; // default, for next time } if (readSuccess && fAuxReadHandlerFunc != NULL) { // Also pass the newly-read packet data to our auxilliary handler: (*fAuxReadHandlerFunc)(fAuxReadHandlerClientData, buffer, bytesRead); } return readSuccess; } void RTPInterface::stopNetworkReading() { // Normal case envir().taskScheduler().turnOffBackgroundReadHandling(fGS->socketNum()); // Also turn off read handling on each of our TCP connections: for (tcpStreamRecord* streams = fTCPStreams; streams != NULL; streams = streams->fNext) { deregisterSocket(envir(), streams->fStreamSocketNum, streams->fStreamChannelId); } } ////////// Helper Functions - Implementation ///////// Boolean RTPInterface::sendRTPorRTCPPacketOverTCP(u_int8_t* packet, unsigned packetSize, int socketNum, unsigned char streamChannelId) { #ifdef DEBUG_SEND fprintf(stderr, "sendRTPorRTCPPacketOverTCP: %d bytes over channel %d (socket %d)\n", packetSize, streamChannelId, socketNum); fflush(stderr); #endif // Send a RTP/RTCP packet over TCP, using the encoding defined in RFC 2326, section 10.12: // $ // (If the initial "send()" of '$' succeeds, then we force // the subsequent "send()" for the data to succeed, even if we have to do so with // a blocking "send()".) do { u_int8_t framingHeader[4]; framingHeader[0] = '$'; framingHeader[1] = streamChannelId; framingHeader[2] = (u_int8_t) ((packetSize&0xFF00)>>8); framingHeader[3] = (u_int8_t) (packetSize&0xFF); if (!sendDataOverTCP(socketNum, framingHeader, 4, False)) break; if (!sendDataOverTCP(socketNum, packet, packetSize, True)) break; #ifdef DEBUG_SEND fprintf(stderr, "sendRTPorRTCPPacketOverTCP: completed\n"); fflush(stderr); #endif return True; } while (0); #ifdef DEBUG_SEND fprintf(stderr, "sendRTPorRTCPPacketOverTCP: failed! (errno %d)\n", envir().getErrno()); fflush(stderr); #endif return False; } Boolean RTPInterface::sendDataOverTCP(int socketNum, u_int8_t const* data, unsigned dataSize, Boolean forceSendToSucceed) { int sendResult = send(socketNum, (char const*)data, dataSize, 0/*flags*/); if (sendResult < (int)dataSize) { // The TCP send() failed - at least partially. unsigned numBytesSentSoFar = sendResult < 0 ? 0 : (unsigned)sendResult; if (numBytesSentSoFar > 0 || (forceSendToSucceed && envir().getErrno() == EAGAIN)) { // The OS's TCP send buffer has filled up (because the stream's bitrate has exceeded // the capacity of the TCP connection!). // Force this data write to succeed, by blocking if necessary until it does: unsigned numBytesRemainingToSend = dataSize - numBytesSentSoFar; #ifdef DEBUG_SEND fprintf(stderr, "sendDataOverTCP: resending %d-byte send (blocking)\n", numBytesRemainingToSend); fflush(stderr); #endif makeSocketBlocking(socketNum); sendResult = send(socketNum, (char const*)(&data[numBytesSentSoFar]), numBytesRemainingToSend, 0/*flags*/); makeSocketNonBlocking(socketNum); return sendResult == (int)numBytesRemainingToSend; } return False; } return True; } SocketDescriptor::SocketDescriptor(UsageEnvironment& env, int socketNum) :fEnv(env), fOurSocketNum(socketNum), fSubChannelHashTable(HashTable::create(ONE_WORD_HASH_KEYS)), fServerRequestAlternativeByteHandler(NULL), fServerRequestAlternativeByteHandlerClientData(NULL), fReadErrorOccurred(False), fDeleteMyselfNext(False), fAreInReadHandlerLoop(False), fTCPReadingState(AWAITING_DOLLAR) { } SocketDescriptor::~SocketDescriptor() { fEnv.taskScheduler().turnOffBackgroundReadHandling(fOurSocketNum); removeSocketDescription(fEnv, fOurSocketNum); if (fSubChannelHashTable != NULL) { // Remove knowledge of this socket from any "RTPInterface"s that are using it: HashTable::Iterator* iter = HashTable::Iterator::create(*fSubChannelHashTable); RTPInterface* rtpInterface; char const* key; while ((rtpInterface = (RTPInterface*)(iter->next(key))) != NULL) { u_int64_t streamChannelIdLong = (u_int64_t)key; unsigned char streamChannelId = (unsigned char)streamChannelIdLong; rtpInterface->removeStreamSocket(fOurSocketNum, streamChannelId); } delete iter; // Then remove the hash table entries themselves, and then remove the hash table: while (fSubChannelHashTable->RemoveNext() != NULL) {} delete fSubChannelHashTable; } // Finally: if (fServerRequestAlternativeByteHandler != NULL) { // Hack: Pass a special character to our alternative byte handler, to tell it that either // - an error occurred when reading the TCP socket, or // - no error occurred, but it needs to take over control of the TCP socket once again. u_int8_t specialChar = fReadErrorOccurred ? 0xFF : 0xFE; (*fServerRequestAlternativeByteHandler)(fServerRequestAlternativeByteHandlerClientData, specialChar); } } void SocketDescriptor::registerRTPInterface(unsigned char streamChannelId, RTPInterface* rtpInterface) { Boolean isFirstRegistration = fSubChannelHashTable->IsEmpty(); #if defined(DEBUG_SEND)||defined(DEBUG_RECEIVE) fprintf(stderr, "SocketDescriptor(socket %d)::registerRTPInterface(channel %d): isFirstRegistration %d\n", fOurSocketNum, streamChannelId, isFirstRegistration); #endif fSubChannelHashTable->Add((char const*)(long)streamChannelId, rtpInterface); if (isFirstRegistration) { // Arrange to handle reads on this TCP socket: TaskScheduler::BackgroundHandlerProc* handler = (TaskScheduler::BackgroundHandlerProc*)&tcpReadHandler; fEnv.taskScheduler(). setBackgroundHandling(fOurSocketNum, SOCKET_READABLE|SOCKET_EXCEPTION, handler, this); } } RTPInterface* SocketDescriptor ::lookupRTPInterface(unsigned char streamChannelId) { char const* lookupArg = (char const*)(long)streamChannelId; return (RTPInterface*)(fSubChannelHashTable->Lookup(lookupArg)); } void SocketDescriptor ::deregisterRTPInterface(unsigned char streamChannelId) { #if defined(DEBUG_SEND)||defined(DEBUG_RECEIVE) fprintf(stderr, "SocketDescriptor(socket %d)::deregisterRTPInterface(channel %d)\n", fOurSocketNum, streamChannelId); #endif fSubChannelHashTable->Remove((char const*)(long)streamChannelId); if (fSubChannelHashTable->IsEmpty()) { // No more interfaces are using us, so it's curtains for us now: if (fAreInReadHandlerLoop) { fDeleteMyselfNext = True; // we can't delete ourself yet, but we'll do so from "tcpReadHandler()" below } else { delete this; } } } void SocketDescriptor::tcpReadHandler(SocketDescriptor* socketDescriptor, int mask) { // Call the read handler until it returns false, with a limit to avoid starving other sockets unsigned count = 2000; socketDescriptor->fAreInReadHandlerLoop = True; while (!socketDescriptor->fDeleteMyselfNext && socketDescriptor->tcpReadHandler1(mask) && --count > 0) {} socketDescriptor->fAreInReadHandlerLoop = False; if (socketDescriptor->fDeleteMyselfNext) delete socketDescriptor; } Boolean SocketDescriptor::tcpReadHandler1(int mask) { // We expect the following data over the TCP channel: // optional RTSP command or response bytes (before the first '$' character) // a '$' character // a 1-byte channel id // a 2-byte packet size (in network byte order) // the packet data. // However, because the socket is being read asynchronously, this data might arrive in pieces. u_int8_t c; struct sockaddr_in fromAddress; if (fTCPReadingState != AWAITING_PACKET_DATA) { int result = readSocket(fEnv, fOurSocketNum, &c, 1, fromAddress); if (result == 0) { // There was no more data to read return False; } else if (result != 1) { // error reading TCP socket, so we will no longer handle it #ifdef DEBUG_RECEIVE fprintf(stderr, "SocketDescriptor(socket %d)::tcpReadHandler(): readSocket(1 byte) returned %d (error)\n", fOurSocketNum, result); #endif fReadErrorOccurred = True; fDeleteMyselfNext = True; return False; } } Boolean callAgain = True; switch (fTCPReadingState) { case AWAITING_DOLLAR: { if (c == '$') { #ifdef DEBUG_RECEIVE fprintf(stderr, "SocketDescriptor(socket %d)::tcpReadHandler(): Saw '$'\n", fOurSocketNum); #endif fTCPReadingState = AWAITING_STREAM_CHANNEL_ID; } else { // This character is part of a RTSP request or command, which is handled separately: if (fServerRequestAlternativeByteHandler != NULL && c != 0xFF && c != 0xFE) { // Hack: 0xFF and 0xFE are used as special signaling characters, so don't send them (*fServerRequestAlternativeByteHandler)(fServerRequestAlternativeByteHandlerClientData, c); } } break; } case AWAITING_STREAM_CHANNEL_ID: { // The byte that we read is the stream channel id. if (lookupRTPInterface(c) != NULL) { // sanity check fStreamChannelId = c; fTCPReadingState = AWAITING_SIZE1; } else { // This wasn't a stream channel id that we expected. We're (somehow) in a strange state. Try to recover: #ifdef DEBUG_RECEIVE fprintf(stderr, "SocketDescriptor(socket %d)::tcpReadHandler(): Saw nonexistent stream channel id: 0x%02x\n", fOurSocketNum, c); #endif fTCPReadingState = AWAITING_DOLLAR; } break; } case AWAITING_SIZE1: { // The byte that we read is the first (high) byte of the 16-bit RTP or RTCP packet 'size'. fSizeByte1 = c; fTCPReadingState = AWAITING_SIZE2; break; } case AWAITING_SIZE2: { // The byte that we read is the second (low) byte of the 16-bit RTP or RTCP packet 'size'. unsigned short size = (fSizeByte1<<8)|c; // Record the information about the packet data that will be read next: RTPInterface* rtpInterface = lookupRTPInterface(fStreamChannelId); if (rtpInterface != NULL) { rtpInterface->fNextTCPReadSize = size; rtpInterface->fNextTCPReadStreamSocketNum = fOurSocketNum; rtpInterface->fNextTCPReadStreamChannelId = fStreamChannelId; } fTCPReadingState = AWAITING_PACKET_DATA; break; } case AWAITING_PACKET_DATA: { callAgain = False; fTCPReadingState = AWAITING_DOLLAR; // the next state, unless we end up having to read more data in the current state // Call the appropriate read handler to get the packet data from the TCP stream: RTPInterface* rtpInterface = lookupRTPInterface(fStreamChannelId); if (rtpInterface != NULL) { if (rtpInterface->fNextTCPReadSize == 0) { // We've already read all the data for this packet. break; } if (rtpInterface->fReadHandlerProc != NULL) { #ifdef DEBUG_RECEIVE fprintf(stderr, "SocketDescriptor(socket %d)::tcpReadHandler(): reading %d bytes on channel %d\n", fOurSocketNum, rtpInterface->fNextTCPReadSize, rtpInterface->fNextTCPReadStreamChannelId); #endif fTCPReadingState = AWAITING_PACKET_DATA; rtpInterface->fReadHandlerProc(rtpInterface->fOwner, mask); } else { #ifdef DEBUG_RECEIVE fprintf(stderr, "SocketDescriptor(socket %d)::tcpReadHandler(): No handler proc for \"rtpInterface\" for channel %d; need to skip %d remaining bytes\n", fOurSocketNum, fStreamChannelId, rtpInterface->fNextTCPReadSize); #endif int result = readSocket(fEnv, fOurSocketNum, &c, 1, fromAddress); if (result < 0) { // error reading TCP socket, so we will no longer handle it #ifdef DEBUG_RECEIVE fprintf(stderr, "SocketDescriptor(socket %d)::tcpReadHandler(): readSocket(1 byte) returned %d (error)\n", fOurSocketNum, result); #endif fReadErrorOccurred = True; fDeleteMyselfNext = True; return False; } else { fTCPReadingState = AWAITING_PACKET_DATA; if (result == 1) { --rtpInterface->fNextTCPReadSize; callAgain = True; } } } } #ifdef DEBUG_RECEIVE else fprintf(stderr, "SocketDescriptor(socket %d)::tcpReadHandler(): No \"rtpInterface\" for channel %d\n", fOurSocketNum, fStreamChannelId); #endif } } return callAgain; } ////////// tcpStreamRecord implementation ////////// tcpStreamRecord ::tcpStreamRecord(int streamSocketNum, unsigned char streamChannelId, tcpStreamRecord* next) : fNext(next), fStreamSocketNum(streamSocketNum), fStreamChannelId(streamChannelId) { } tcpStreamRecord::~tcpStreamRecord() { delete fNext; } live/liveMedia/MediaSink.cpp000400 001751 000000 00000014513 12265042432 016202 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // Media Sinks // Implementation #include "MediaSink.hh" #include "GroupsockHelper.hh" #include ////////// MediaSink ////////// MediaSink::MediaSink(UsageEnvironment& env) : Medium(env), fSource(NULL) { } MediaSink::~MediaSink() { stopPlaying(); } Boolean MediaSink::isSink() const { return True; } Boolean MediaSink::lookupByName(UsageEnvironment& env, char const* sinkName, MediaSink*& resultSink) { resultSink = NULL; // unless we succeed Medium* medium; if (!Medium::lookupByName(env, sinkName, medium)) return False; if (!medium->isSink()) { env.setResultMsg(sinkName, " is not a media sink"); return False; } resultSink = (MediaSink*)medium; return True; } Boolean MediaSink::sourceIsCompatibleWithUs(MediaSource& source) { // We currently support only framed sources. return source.isFramedSource(); } Boolean MediaSink::startPlaying(MediaSource& source, afterPlayingFunc* afterFunc, void* afterClientData) { // Make sure we're not already being played: if (fSource != NULL) { envir().setResultMsg("This sink is already being played"); return False; } // Make sure our source is compatible: if (!sourceIsCompatibleWithUs(source)) { envir().setResultMsg("MediaSink::startPlaying(): source is not compatible!"); return False; } fSource = (FramedSource*)&source; fAfterFunc = afterFunc; fAfterClientData = afterClientData; return continuePlaying(); } void MediaSink::stopPlaying() { // First, tell the source that we're no longer interested: if (fSource != NULL) fSource->stopGettingFrames(); // Cancel any pending tasks: envir().taskScheduler().unscheduleDelayedTask(nextTask()); fSource = NULL; // indicates that we can be played again fAfterFunc = NULL; } void MediaSink::onSourceClosure(void* clientData) { MediaSink* sink = (MediaSink*)clientData; sink->onSourceClosure(); } void MediaSink::onSourceClosure() { // Cancel any pending tasks: envir().taskScheduler().unscheduleDelayedTask(nextTask()); fSource = NULL; // indicates that we can be played again if (fAfterFunc != NULL) { (*fAfterFunc)(fAfterClientData); } } Boolean MediaSink::isRTPSink() const { return False; // default implementation } ////////// OutPacketBuffer ////////// unsigned OutPacketBuffer::maxSize = 60000; // by default OutPacketBuffer::OutPacketBuffer(unsigned preferredPacketSize, unsigned maxPacketSize) : fPreferred(preferredPacketSize), fMax(maxPacketSize), fOverflowDataSize(0) { unsigned maxNumPackets = (maxSize + (maxPacketSize-1))/maxPacketSize; fLimit = maxNumPackets*maxPacketSize; fBuf = new unsigned char[fLimit]; resetPacketStart(); resetOffset(); resetOverflowData(); } OutPacketBuffer::~OutPacketBuffer() { delete[] fBuf; } void OutPacketBuffer::enqueue(unsigned char const* from, unsigned numBytes) { if (numBytes > totalBytesAvailable()) { #ifdef DEBUG fprintf(stderr, "OutPacketBuffer::enqueue() warning: %d > %d\n", numBytes, totalBytesAvailable()); #endif numBytes = totalBytesAvailable(); } if (curPtr() != from) memmove(curPtr(), from, numBytes); increment(numBytes); } void OutPacketBuffer::enqueueWord(u_int32_t word) { u_int32_t nWord = htonl(word); enqueue((unsigned char*)&nWord, 4); } void OutPacketBuffer::insert(unsigned char const* from, unsigned numBytes, unsigned toPosition) { unsigned realToPosition = fPacketStart + toPosition; if (realToPosition + numBytes > fLimit) { if (realToPosition > fLimit) return; // we can't do this numBytes = fLimit - realToPosition; } memmove(&fBuf[realToPosition], from, numBytes); if (toPosition + numBytes > fCurOffset) { fCurOffset = toPosition + numBytes; } } void OutPacketBuffer::insertWord(u_int32_t word, unsigned toPosition) { u_int32_t nWord = htonl(word); insert((unsigned char*)&nWord, 4, toPosition); } void OutPacketBuffer::extract(unsigned char* to, unsigned numBytes, unsigned fromPosition) { unsigned realFromPosition = fPacketStart + fromPosition; if (realFromPosition + numBytes > fLimit) { // sanity check if (realFromPosition > fLimit) return; // we can't do this numBytes = fLimit - realFromPosition; } memmove(to, &fBuf[realFromPosition], numBytes); } u_int32_t OutPacketBuffer::extractWord(unsigned fromPosition) { u_int32_t nWord; extract((unsigned char*)&nWord, 4, fromPosition); return ntohl(nWord); } void OutPacketBuffer::skipBytes(unsigned numBytes) { if (numBytes > totalBytesAvailable()) { numBytes = totalBytesAvailable(); } increment(numBytes); } void OutPacketBuffer ::setOverflowData(unsigned overflowDataOffset, unsigned overflowDataSize, struct timeval const& presentationTime, unsigned durationInMicroseconds) { fOverflowDataOffset = overflowDataOffset; fOverflowDataSize = overflowDataSize; fOverflowPresentationTime = presentationTime; fOverflowDurationInMicroseconds = durationInMicroseconds; } void OutPacketBuffer::useOverflowData() { enqueue(&fBuf[fPacketStart + fOverflowDataOffset], fOverflowDataSize); fCurOffset -= fOverflowDataSize; // undoes increment performed by "enqueue" resetOverflowData(); } void OutPacketBuffer::adjustPacketStart(unsigned numBytes) { fPacketStart += numBytes; if (fOverflowDataOffset >= numBytes) { fOverflowDataOffset -= numBytes; } else { fOverflowDataOffset = 0; fOverflowDataSize = 0; // an error otherwise } } void OutPacketBuffer::resetPacketStart() { if (fOverflowDataSize > 0) { fOverflowDataOffset += fPacketStart; } fPacketStart = 0; } live/liveMedia/Media.cpp000444 001751 000000 00000011033 12265042432 015357 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // Media // Implementation #include "Media.hh" #include "HashTable.hh" ////////// Medium ////////// Medium::Medium(UsageEnvironment& env) : fEnviron(env), fNextTask(NULL) { // First generate a name for the new medium: MediaLookupTable::ourMedia(env)->generateNewName(fMediumName, mediumNameMaxLen); env.setResultMsg(fMediumName); // Then add it to our table: MediaLookupTable::ourMedia(env)->addNew(this, fMediumName); } Medium::~Medium() { // Remove any tasks that might be pending for us: fEnviron.taskScheduler().unscheduleDelayedTask(fNextTask); } Boolean Medium::lookupByName(UsageEnvironment& env, char const* mediumName, Medium*& resultMedium) { resultMedium = MediaLookupTable::ourMedia(env)->lookup(mediumName); if (resultMedium == NULL) { env.setResultMsg("Medium ", mediumName, " does not exist"); return False; } return True; } void Medium::close(UsageEnvironment& env, char const* name) { MediaLookupTable::ourMedia(env)->remove(name); } void Medium::close(Medium* medium) { if (medium == NULL) return; close(medium->envir(), medium->name()); } Boolean Medium::isSource() const { return False; // default implementation } Boolean Medium::isSink() const { return False; // default implementation } Boolean Medium::isRTCPInstance() const { return False; // default implementation } Boolean Medium::isRTSPClient() const { return False; // default implementation } Boolean Medium::isRTSPServer() const { return False; // default implementation } Boolean Medium::isMediaSession() const { return False; // default implementation } Boolean Medium::isServerMediaSession() const { return False; // default implementation } Boolean Medium::isDarwinInjector() const { return False; // default implementation } ////////// _Tables implementation ////////// _Tables* _Tables::getOurTables(UsageEnvironment& env, Boolean createIfNotPresent) { if (env.liveMediaPriv == NULL && createIfNotPresent) { env.liveMediaPriv = new _Tables(env); } return (_Tables*)(env.liveMediaPriv); } void _Tables::reclaimIfPossible() { if (mediaTable == NULL && socketTable == NULL) { fEnv.liveMediaPriv = NULL; delete this; } } _Tables::_Tables(UsageEnvironment& env) : mediaTable(NULL), socketTable(NULL), fEnv(env) { } _Tables::~_Tables() { } ////////// MediaLookupTable implementation ////////// MediaLookupTable* MediaLookupTable::ourMedia(UsageEnvironment& env) { _Tables* ourTables = _Tables::getOurTables(env); if (ourTables->mediaTable == NULL) { // Create a new table to record the media that are to be created in // this environment: ourTables->mediaTable = new MediaLookupTable(env); } return ourTables->mediaTable; } Medium* MediaLookupTable::lookup(char const* name) const { return (Medium*)(fTable->Lookup(name)); } void MediaLookupTable::addNew(Medium* medium, char* mediumName) { fTable->Add(mediumName, (void*)medium); } void MediaLookupTable::remove(char const* name) { Medium* medium = lookup(name); if (medium != NULL) { fTable->Remove(name); if (fTable->IsEmpty()) { // We can also delete ourselves (to reclaim space): _Tables* ourTables = _Tables::getOurTables(fEnv); delete this; ourTables->mediaTable = NULL; ourTables->reclaimIfPossible(); } delete medium; } } void MediaLookupTable::generateNewName(char* mediumName, unsigned /*maxLen*/) { // We should really use snprintf() here, but not all systems have it sprintf(mediumName, "liveMedia%d", fNameGenerator++); } MediaLookupTable::MediaLookupTable(UsageEnvironment& env) : fEnv(env), fTable(HashTable::create(STRING_HASH_KEYS)), fNameGenerator(0) { } MediaLookupTable::~MediaLookupTable() { delete fTable; } live/liveMedia/ServerMediaSession.cpp000444 001751 000000 00000035351 12265042432 020123 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A data structure that represents a session that consists of // potentially multiple (audio and/or video) sub-sessions // (This data structure is used for media *streamers* - i.e., servers. // For media receivers, use "MediaSession" instead.) // Implementation #include "ServerMediaSession.hh" #include #include ////////// ServerMediaSession ////////// ServerMediaSession* ServerMediaSession ::createNew(UsageEnvironment& env, char const* streamName, char const* info, char const* description, Boolean isSSM, char const* miscSDPLines) { return new ServerMediaSession(env, streamName, info, description, isSSM, miscSDPLines); } Boolean ServerMediaSession ::lookupByName(UsageEnvironment& env, char const* mediumName, ServerMediaSession*& resultSession) { resultSession = NULL; // unless we succeed Medium* medium; if (!Medium::lookupByName(env, mediumName, medium)) return False; if (!medium->isServerMediaSession()) { env.setResultMsg(mediumName, " is not a 'ServerMediaSession' object"); return False; } resultSession = (ServerMediaSession*)medium; return True; } static char const* const libNameStr = "LIVE555 Streaming Media v"; char const* const libVersionStr = LIVEMEDIA_LIBRARY_VERSION_STRING; ServerMediaSession::ServerMediaSession(UsageEnvironment& env, char const* streamName, char const* info, char const* description, Boolean isSSM, char const* miscSDPLines) : Medium(env), fIsSSM(isSSM), fSubsessionsHead(NULL), fSubsessionsTail(NULL), fSubsessionCounter(0), fReferenceCount(0), fDeleteWhenUnreferenced(False) { fStreamName = strDup(streamName == NULL ? "" : streamName); char* libNamePlusVersionStr = NULL; // by default if (info == NULL || description == NULL) { libNamePlusVersionStr = new char[strlen(libNameStr) + strlen(libVersionStr) + 1]; sprintf(libNamePlusVersionStr, "%s%s", libNameStr, libVersionStr); } fInfoSDPString = strDup(info == NULL ? libNamePlusVersionStr : info); fDescriptionSDPString = strDup(description == NULL ? libNamePlusVersionStr : description); delete[] libNamePlusVersionStr; fMiscSDPLines = strDup(miscSDPLines == NULL ? "" : miscSDPLines); gettimeofday(&fCreationTime, NULL); } ServerMediaSession::~ServerMediaSession() { deleteAllSubsessions(); delete[] fStreamName; delete[] fInfoSDPString; delete[] fDescriptionSDPString; delete[] fMiscSDPLines; } Boolean ServerMediaSession::addSubsession(ServerMediaSubsession* subsession) { if (subsession->fParentSession != NULL) return False; // it's already used if (fSubsessionsTail == NULL) { fSubsessionsHead = subsession; } else { fSubsessionsTail->fNext = subsession; } fSubsessionsTail = subsession; subsession->fParentSession = this; subsession->fTrackNumber = ++fSubsessionCounter; return True; } void ServerMediaSession::testScaleFactor(float& scale) { // First, try setting all subsessions to the desired scale. // If the subsessions' actual scales differ from each other, choose the // value that's closest to 1, and then try re-setting all subsessions to that // value. If the subsessions' actual scales still differ, re-set them all to 1. float minSSScale = 1.0; float maxSSScale = 1.0; float bestSSScale = 1.0; float bestDistanceTo1 = 0.0; ServerMediaSubsession* subsession; for (subsession = fSubsessionsHead; subsession != NULL; subsession = subsession->fNext) { float ssscale = scale; subsession->testScaleFactor(ssscale); if (subsession == fSubsessionsHead) { // this is the first subsession minSSScale = maxSSScale = bestSSScale = ssscale; bestDistanceTo1 = (float)fabs(ssscale - 1.0f); } else { if (ssscale < minSSScale) { minSSScale = ssscale; } else if (ssscale > maxSSScale) { maxSSScale = ssscale; } float distanceTo1 = (float)fabs(ssscale - 1.0f); if (distanceTo1 < bestDistanceTo1) { bestSSScale = ssscale; bestDistanceTo1 = distanceTo1; } } } if (minSSScale == maxSSScale) { // All subsessions are at the same scale: minSSScale == bestSSScale == maxSSScale scale = minSSScale; return; } // The scales for each subsession differ. Try to set each one to the value // that's closest to 1: for (subsession = fSubsessionsHead; subsession != NULL; subsession = subsession->fNext) { float ssscale = bestSSScale; subsession->testScaleFactor(ssscale); if (ssscale != bestSSScale) break; // no luck } if (subsession == NULL) { // All subsessions are at the same scale: bestSSScale scale = bestSSScale; return; } // Still no luck. Set each subsession's scale to 1: for (subsession = fSubsessionsHead; subsession != NULL; subsession = subsession->fNext) { float ssscale = 1; subsession->testScaleFactor(ssscale); } scale = 1; } float ServerMediaSession::duration() const { float minSubsessionDuration = 0.0; float maxSubsessionDuration = 0.0; for (ServerMediaSubsession* subsession = fSubsessionsHead; subsession != NULL; subsession = subsession->fNext) { // Hack: If any subsession supports seeking by 'absolute' time, then return a negative value, to indicate that only subsessions // will have a "a=range:" attribute: char* absStartTime = NULL; char* absEndTime = NULL; subsession->getAbsoluteTimeRange(absStartTime, absEndTime); if (absStartTime != NULL) return -1.0f; float ssduration = subsession->duration(); if (subsession == fSubsessionsHead) { // this is the first subsession minSubsessionDuration = maxSubsessionDuration = ssduration; } else if (ssduration < minSubsessionDuration) { minSubsessionDuration = ssduration; } else if (ssduration > maxSubsessionDuration) { maxSubsessionDuration = ssduration; } } if (maxSubsessionDuration != minSubsessionDuration) { return -maxSubsessionDuration; // because subsession durations differ } else { return maxSubsessionDuration; // all subsession durations are the same } } void ServerMediaSession::deleteAllSubsessions() { Medium::close(fSubsessionsHead); fSubsessionsHead = fSubsessionsTail = NULL; fSubsessionCounter = 0; } Boolean ServerMediaSession::isServerMediaSession() const { return True; } char* ServerMediaSession::generateSDPDescription() { AddressString ipAddressStr(ourIPAddress(envir())); unsigned ipAddressStrSize = strlen(ipAddressStr.val()); // For a SSM sessions, we need a "a=source-filter: incl ..." line also: char* sourceFilterLine; if (fIsSSM) { char const* const sourceFilterFmt = "a=source-filter: incl IN IP4 * %s\r\n" "a=rtcp-unicast: reflection\r\n"; unsigned const sourceFilterFmtSize = strlen(sourceFilterFmt) + ipAddressStrSize + 1; sourceFilterLine = new char[sourceFilterFmtSize]; sprintf(sourceFilterLine, sourceFilterFmt, ipAddressStr.val()); } else { sourceFilterLine = strDup(""); } char* rangeLine = NULL; // for now char* sdp = NULL; // for now do { // Count the lengths of each subsession's media-level SDP lines. // (We do this first, because the call to "subsession->sdpLines()" // causes correct subsession 'duration()'s to be calculated later.) unsigned sdpLength = 0; ServerMediaSubsession* subsession; for (subsession = fSubsessionsHead; subsession != NULL; subsession = subsession->fNext) { char const* sdpLines = subsession->sdpLines(); if (sdpLines == NULL) continue; // the media's not available sdpLength += strlen(sdpLines); } if (sdpLength == 0) break; // the session has no usable subsessions // Unless subsessions have differing durations, we also have a "a=range:" line: float dur = duration(); if (dur == 0.0) { rangeLine = strDup("a=range:npt=0-\r\n"); } else if (dur > 0.0) { char buf[100]; sprintf(buf, "a=range:npt=0-%.3f\r\n", dur); rangeLine = strDup(buf); } else { // subsessions have differing durations, so "a=range:" lines go there rangeLine = strDup(""); } char const* const sdpPrefixFmt = "v=0\r\n" "o=- %ld%06ld %d IN IP4 %s\r\n" "s=%s\r\n" "i=%s\r\n" "t=0 0\r\n" "a=tool:%s%s\r\n" "a=type:broadcast\r\n" "a=control:*\r\n" "%s" "%s" "a=x-qt-text-nam:%s\r\n" "a=x-qt-text-inf:%s\r\n" "%s"; sdpLength += strlen(sdpPrefixFmt) + 20 + 6 + 20 + ipAddressStrSize + strlen(fDescriptionSDPString) + strlen(fInfoSDPString) + strlen(libNameStr) + strlen(libVersionStr) + strlen(sourceFilterLine) + strlen(rangeLine) + strlen(fDescriptionSDPString) + strlen(fInfoSDPString) + strlen(fMiscSDPLines); sdp = new char[sdpLength]; if (sdp == NULL) break; // Generate the SDP prefix (session-level lines): sprintf(sdp, sdpPrefixFmt, fCreationTime.tv_sec, fCreationTime.tv_usec, // o= 1, // o= // (needs to change if params are modified) ipAddressStr.val(), // o=
fDescriptionSDPString, // s= fInfoSDPString, // i= libNameStr, libVersionStr, // a=tool: sourceFilterLine, // a=source-filter: incl (if a SSM session) rangeLine, // a=range: line fDescriptionSDPString, // a=x-qt-text-nam: line fInfoSDPString, // a=x-qt-text-inf: line fMiscSDPLines); // miscellaneous session SDP lines (if any) // Then, add the (media-level) lines for each subsession: char* mediaSDP = sdp; for (subsession = fSubsessionsHead; subsession != NULL; subsession = subsession->fNext) { mediaSDP += strlen(mediaSDP); char const* sdpLines = subsession->sdpLines(); if (sdpLines != NULL) sprintf(mediaSDP, "%s", sdpLines); } } while (0); delete[] rangeLine; delete[] sourceFilterLine; return sdp; } ////////// ServerMediaSessionIterator ////////// ServerMediaSubsessionIterator ::ServerMediaSubsessionIterator(ServerMediaSession& session) : fOurSession(session) { reset(); } ServerMediaSubsessionIterator::~ServerMediaSubsessionIterator() { } ServerMediaSubsession* ServerMediaSubsessionIterator::next() { ServerMediaSubsession* result = fNextPtr; if (fNextPtr != NULL) fNextPtr = fNextPtr->fNext; return result; } void ServerMediaSubsessionIterator::reset() { fNextPtr = fOurSession.fSubsessionsHead; } ////////// ServerMediaSubsession ////////// ServerMediaSubsession::ServerMediaSubsession(UsageEnvironment& env) : Medium(env), fParentSession(NULL), fServerAddressForSDP(0), fPortNumForSDP(0), fNext(NULL), fTrackNumber(0), fTrackId(NULL) { } ServerMediaSubsession::~ServerMediaSubsession() { delete[] (char*)fTrackId; Medium::close(fNext); } char const* ServerMediaSubsession::trackId() { if (fTrackNumber == 0) return NULL; // not yet in a ServerMediaSession if (fTrackId == NULL) { char buf[100]; sprintf(buf, "track%d", fTrackNumber); fTrackId = strDup(buf); } return fTrackId; } void ServerMediaSubsession::pauseStream(unsigned /*clientSessionId*/, void* /*streamToken*/) { // default implementation: do nothing } void ServerMediaSubsession::seekStream(unsigned /*clientSessionId*/, void* /*streamToken*/, double& /*seekNPT*/, double /*streamDuration*/, u_int64_t& numBytes) { // default implementation: do nothing numBytes = 0; } void ServerMediaSubsession::seekStream(unsigned /*clientSessionId*/, void* /*streamToken*/, char*& absStart, char*& absEnd) { // default implementation: do nothing (but delete[] and assign "absStart" and "absEnd" to NULL, to show that we don't handle this) delete[] absStart; absStart = NULL; delete[] absEnd; absEnd = NULL; } void ServerMediaSubsession::nullSeekStream(unsigned /*clientSessionId*/, void* /*streamToken*/) { // default implementation: do nothing } void ServerMediaSubsession::setStreamScale(unsigned /*clientSessionId*/, void* /*streamToken*/, float /*scale*/) { // default implementation: do nothing } float ServerMediaSubsession::getCurrentNPT(void* /*streamToken*/) { // default implementation: return 0.0 return 0.0; } FramedSource* ServerMediaSubsession::getStreamSource(void* /*streamToken*/) { // default implementation: return NULL return NULL; } void ServerMediaSubsession::deleteStream(unsigned /*clientSessionId*/, void*& /*streamToken*/) { // default implementation: do nothing } void ServerMediaSubsession::testScaleFactor(float& scale) { // default implementation: Support scale = 1 only scale = 1; } float ServerMediaSubsession::duration() const { // default implementation: assume an unbounded session: return 0.0; } void ServerMediaSubsession::getAbsoluteTimeRange(char*& absStartTime, char*& absEndTime) const { // default implementation: We don't support seeking by 'absolute' time, so indicate this by setting both parameters to NULL: absStartTime = absEndTime = NULL; } void ServerMediaSubsession::setServerAddressAndPortForSDP(netAddressBits addressBits, portNumBits portBits) { fServerAddressForSDP = addressBits; fPortNumForSDP = portBits; } char const* ServerMediaSubsession::rangeSDPLine() const { // First, check for the special case where we support seeking by 'absolute' time: char* absStart = NULL; char* absEnd = NULL; getAbsoluteTimeRange(absStart, absEnd); if (absStart != NULL) { char buf[100]; if (absEnd != NULL) { sprintf(buf, "a=range:clock=%s-%s\r\n", absStart, absEnd); } else { sprintf(buf, "a=range:clock=%s-\r\n", absStart); } return strDup(buf); } if (fParentSession == NULL) return NULL; // If all of our parent's subsessions have the same duration // (as indicated by "fParentSession->duration() >= 0"), there's no "a=range:" line: if (fParentSession->duration() >= 0.0) return strDup(""); // Use our own duration for a "a=range:" line: float ourDuration = duration(); if (ourDuration == 0.0) { return strDup("a=range:npt=0-\r\n"); } else { char buf[100]; sprintf(buf, "a=range:npt=0-%.3f\r\n", ourDuration); return strDup(buf); } } live/liveMedia/ByteStreamFileSource.cpp000444 001751 000000 00000014233 12265042432 020405 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A file source that is a plain byte stream (rather than frames) // Implementation #include "ByteStreamFileSource.hh" #include "InputFile.hh" #include "GroupsockHelper.hh" ////////// ByteStreamFileSource ////////// ByteStreamFileSource* ByteStreamFileSource::createNew(UsageEnvironment& env, char const* fileName, unsigned preferredFrameSize, unsigned playTimePerFrame) { FILE* fid = OpenInputFile(env, fileName); if (fid == NULL) return NULL; ByteStreamFileSource* newSource = new ByteStreamFileSource(env, fid, preferredFrameSize, playTimePerFrame); newSource->fFileSize = GetFileSize(fileName, fid); return newSource; } ByteStreamFileSource* ByteStreamFileSource::createNew(UsageEnvironment& env, FILE* fid, unsigned preferredFrameSize, unsigned playTimePerFrame) { if (fid == NULL) return NULL; ByteStreamFileSource* newSource = new ByteStreamFileSource(env, fid, preferredFrameSize, playTimePerFrame); newSource->fFileSize = GetFileSize(NULL, fid); return newSource; } void ByteStreamFileSource::seekToByteAbsolute(u_int64_t byteNumber, u_int64_t numBytesToStream) { SeekFile64(fFid, (int64_t)byteNumber, SEEK_SET); fNumBytesToStream = numBytesToStream; fLimitNumBytesToStream = fNumBytesToStream > 0; } void ByteStreamFileSource::seekToByteRelative(int64_t offset) { SeekFile64(fFid, offset, SEEK_CUR); } void ByteStreamFileSource::seekToEnd() { SeekFile64(fFid, 0, SEEK_END); } ByteStreamFileSource::ByteStreamFileSource(UsageEnvironment& env, FILE* fid, unsigned preferredFrameSize, unsigned playTimePerFrame) : FramedFileSource(env, fid), fFileSize(0), fPreferredFrameSize(preferredFrameSize), fPlayTimePerFrame(playTimePerFrame), fLastPlayTime(0), fHaveStartedReading(False), fLimitNumBytesToStream(False), fNumBytesToStream(0) { #ifndef READ_FROM_FILES_SYNCHRONOUSLY makeSocketNonBlocking(fileno(fFid)); #endif // Test whether the file is seekable fFidIsSeekable = FileIsSeekable(fFid); } ByteStreamFileSource::~ByteStreamFileSource() { if (fFid == NULL) return; #ifndef READ_FROM_FILES_SYNCHRONOUSLY envir().taskScheduler().turnOffBackgroundReadHandling(fileno(fFid)); #endif CloseInputFile(fFid); } void ByteStreamFileSource::doGetNextFrame() { if (feof(fFid) || ferror(fFid) || (fLimitNumBytesToStream && fNumBytesToStream == 0)) { handleClosure(this); return; } #ifdef READ_FROM_FILES_SYNCHRONOUSLY doReadFromFile(); #else if (!fHaveStartedReading) { // Await readable data from the file: envir().taskScheduler().turnOnBackgroundReadHandling(fileno(fFid), (TaskScheduler::BackgroundHandlerProc*)&fileReadableHandler, this); fHaveStartedReading = True; } #endif } void ByteStreamFileSource::doStopGettingFrames() { envir().taskScheduler().unscheduleDelayedTask(nextTask()); #ifndef READ_FROM_FILES_SYNCHRONOUSLY envir().taskScheduler().turnOffBackgroundReadHandling(fileno(fFid)); fHaveStartedReading = False; #endif } void ByteStreamFileSource::fileReadableHandler(ByteStreamFileSource* source, int /*mask*/) { if (!source->isCurrentlyAwaitingData()) { source->doStopGettingFrames(); // we're not ready for the data yet return; } source->doReadFromFile(); } void ByteStreamFileSource::doReadFromFile() { // Try to read as many bytes as will fit in the buffer provided (or "fPreferredFrameSize" if less) if (fLimitNumBytesToStream && fNumBytesToStream < (u_int64_t)fMaxSize) { fMaxSize = (unsigned)fNumBytesToStream; } if (fPreferredFrameSize > 0 && fPreferredFrameSize < fMaxSize) { fMaxSize = fPreferredFrameSize; } #ifdef READ_FROM_FILES_SYNCHRONOUSLY fFrameSize = fread(fTo, 1, fMaxSize, fFid); #else if (fFidIsSeekable) { fFrameSize = fread(fTo, 1, fMaxSize, fFid); } else { // For non-seekable files (e.g., pipes), call "read()" rather than "fread()", to ensure that the read doesn't block: fFrameSize = read(fileno(fFid), fTo, fMaxSize); } #endif if (fFrameSize == 0) { handleClosure(this); return; } fNumBytesToStream -= fFrameSize; // Set the 'presentation time': if (fPlayTimePerFrame > 0 && fPreferredFrameSize > 0) { if (fPresentationTime.tv_sec == 0 && fPresentationTime.tv_usec == 0) { // This is the first frame, so use the current time: gettimeofday(&fPresentationTime, NULL); } else { // Increment by the play time of the previous data: unsigned uSeconds = fPresentationTime.tv_usec + fLastPlayTime; fPresentationTime.tv_sec += uSeconds/1000000; fPresentationTime.tv_usec = uSeconds%1000000; } // Remember the play time of this data: fLastPlayTime = (fPlayTimePerFrame*fFrameSize)/fPreferredFrameSize; fDurationInMicroseconds = fLastPlayTime; } else { // We don't know a specific play time duration for this data, // so just record the current time as being the 'presentation time': gettimeofday(&fPresentationTime, NULL); } // Inform the reader that he has data: #ifdef READ_FROM_FILES_SYNCHRONOUSLY // To avoid possible infinite recursion, we need to return to the event loop to do this: nextTask() = envir().taskScheduler().scheduleDelayedTask(0, (TaskFunc*)FramedSource::afterGetting, this); #else // Because the file read was done from the event loop, we can call the // 'after getting' function directly, without risk of infinite recursion: FramedSource::afterGetting(this); #endif } live/liveMedia/QuickTimeFileSink.cpp000444 001751 000000 00000237400 12265042432 017670 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A sink that generates a QuickTime file from a composite media session // Implementation #include "QuickTimeFileSink.hh" #include "QuickTimeGenericRTPSource.hh" #include "GroupsockHelper.hh" #include "InputFile.hh" #include "OutputFile.hh" #include "H263plusVideoRTPSource.hh" // for the special header #include "MPEG4GenericRTPSource.hh" //for "samplingFrequencyFromAudioSpecificConfig()" #include "MPEG4LATMAudioRTPSource.hh" // for "parseGeneralConfigStr()" #include "Base64.hh" #include #define fourChar(x,y,z,w) ( ((x)<<24)|((y)<<16)|((z)<<8)|(w) ) #define H264_IDR_FRAME 0x65 //bit 8 == 0, bits 7-6 (ref) == 3, bits 5-0 (type) == 5 ////////// SubsessionIOState, ChunkDescriptor /////////// // A structure used to represent the I/O state of each input 'subsession': class ChunkDescriptor { public: ChunkDescriptor(int64_t offsetInFile, unsigned size, unsigned frameSize, unsigned frameDuration, struct timeval presentationTime); ChunkDescriptor* extendChunk(int64_t newOffsetInFile, unsigned newSize, unsigned newFrameSize, unsigned newFrameDuration, struct timeval newPresentationTime); // this may end up allocating a new chunk instead public: ChunkDescriptor* fNextChunk; int64_t fOffsetInFile; unsigned fNumFrames; unsigned fFrameSize; unsigned fFrameDuration; struct timeval fPresentationTime; // of the start of the data }; class SubsessionBuffer { public: SubsessionBuffer(unsigned bufferSize) : fBufferSize(bufferSize) { reset(); fData = new unsigned char[bufferSize]; } virtual ~SubsessionBuffer() { delete[] fData; } void reset() { fBytesInUse = 0; } void addBytes(unsigned numBytes) { fBytesInUse += numBytes; } unsigned char* dataStart() { return &fData[0]; } unsigned char* dataEnd() { return &fData[fBytesInUse]; } unsigned bytesInUse() const { return fBytesInUse; } unsigned bytesAvailable() const { return fBufferSize - fBytesInUse; } void setPresentationTime(struct timeval const& presentationTime) { fPresentationTime = presentationTime; } struct timeval const& presentationTime() const {return fPresentationTime;} private: unsigned fBufferSize; struct timeval fPresentationTime; unsigned char* fData; unsigned fBytesInUse; }; class SyncFrame { public: SyncFrame(unsigned frameNum); public: class SyncFrame *nextSyncFrame; unsigned sfFrameNum; }; // A 64-bit counter, used below: class Count64 { public: Count64() : hi(0), lo(0) { } void operator+=(unsigned arg); u_int32_t hi, lo; }; class SubsessionIOState { public: SubsessionIOState(QuickTimeFileSink& sink, MediaSubsession& subsession); virtual ~SubsessionIOState(); Boolean setQTstate(); void setFinalQTstate(); void afterGettingFrame(unsigned packetDataSize, struct timeval presentationTime); void onSourceClosure(); Boolean syncOK(struct timeval presentationTime); // returns true iff data is usable despite a sync check static void setHintTrack(SubsessionIOState* hintedTrack, SubsessionIOState* hintTrack); Boolean isHintTrack() const { return fTrackHintedByUs != NULL; } Boolean hasHintTrack() const { return fHintTrackForUs != NULL; } UsageEnvironment& envir() const { return fOurSink.envir(); } public: static unsigned fCurrentTrackNumber; unsigned fTrackID; SubsessionIOState* fHintTrackForUs; SubsessionIOState* fTrackHintedByUs; SubsessionBuffer *fBuffer, *fPrevBuffer; QuickTimeFileSink& fOurSink; MediaSubsession& fOurSubsession; unsigned short fLastPacketRTPSeqNum; Boolean fOurSourceIsActive; Boolean fHaveBeenSynced; // used in synchronizing with other streams struct timeval fSyncTime; Boolean fQTEnableTrack; unsigned fQTcomponentSubtype; char const* fQTcomponentName; typedef unsigned (QuickTimeFileSink::*atomCreationFunc)(); atomCreationFunc fQTMediaInformationAtomCreator; atomCreationFunc fQTMediaDataAtomCreator; char const* fQTAudioDataType; unsigned short fQTSoundSampleVersion; unsigned fQTTimeScale; unsigned fQTTimeUnitsPerSample; unsigned fQTBytesPerFrame; unsigned fQTSamplesPerFrame; // These next fields are derived from the ones above, // plus the information from each chunk: unsigned fQTTotNumSamples; unsigned fQTDurationM; // in media time units unsigned fQTDurationT; // in track time units int64_t fTKHD_durationPosn; // position of the duration in the output 'tkhd' atom unsigned fQTInitialOffsetDuration; // if there's a pause at the beginning ChunkDescriptor *fHeadChunk, *fTailChunk; unsigned fNumChunks; SyncFrame *fHeadSyncFrame, *fTailSyncFrame; // Counters to be used in the hint track's 'udta'/'hinf' atom; struct hinf { Count64 trpy; Count64 nump; Count64 tpyl; // Is 'maxr' needed? Computing this would be a PITA. ##### Count64 dmed; Count64 dimm; // 'drep' is always 0 // 'tmin' and 'tmax' are always 0 unsigned pmax; unsigned dmax; } fHINF; private: void useFrame(SubsessionBuffer& buffer); void useFrameForHinting(unsigned frameSize, struct timeval presentationTime, unsigned startSampleNumber); // used by the above two routines: unsigned useFrame1(unsigned sourceDataSize, struct timeval presentationTime, unsigned frameDuration, int64_t destFileOffset); // returns the number of samples in this data private: // A structure used for temporarily storing frame state: struct { unsigned frameSize; struct timeval presentationTime; int64_t destFileOffset; // used for non-hint tracks only // The remaining fields are used for hint tracks only: unsigned startSampleNumber; unsigned short seqNum; unsigned rtpHeader; unsigned char numSpecialHeaders; // used when our RTP source has special headers unsigned specialHeaderBytesLength; // ditto unsigned char specialHeaderBytes[SPECIAL_HEADER_BUFFER_SIZE]; // ditto unsigned packetSizes[256]; } fPrevFrameState; }; ////////// QuickTimeFileSink implementation ////////// QuickTimeFileSink::QuickTimeFileSink(UsageEnvironment& env, MediaSession& inputSession, char const* outputFileName, unsigned bufferSize, unsigned short movieWidth, unsigned short movieHeight, unsigned movieFPS, Boolean packetLossCompensate, Boolean syncStreams, Boolean generateHintTracks, Boolean generateMP4Format) : Medium(env), fInputSession(inputSession), fBufferSize(bufferSize), fPacketLossCompensate(packetLossCompensate), fSyncStreams(syncStreams), fGenerateMP4Format(generateMP4Format), fAreCurrentlyBeingPlayed(False), fLargestRTPtimestampFrequency(0), fNumSubsessions(0), fNumSyncedSubsessions(0), fHaveCompletedOutputFile(False), fMovieWidth(movieWidth), fMovieHeight(movieHeight), fMovieFPS(movieFPS), fMaxTrackDurationM(0) { fOutFid = OpenOutputFile(env, outputFileName); if (fOutFid == NULL) return; fNewestSyncTime.tv_sec = fNewestSyncTime.tv_usec = 0; fFirstDataTime.tv_sec = fFirstDataTime.tv_usec = (unsigned)(~0); // Set up I/O state for each input subsession: MediaSubsessionIterator iter(fInputSession); MediaSubsession* subsession; while ((subsession = iter.next()) != NULL) { // Ignore subsessions without a data source: FramedSource* subsessionSource = subsession->readSource(); if (subsessionSource == NULL) continue; // If "subsession's" SDP description specified screen dimension // or frame rate parameters, then use these. (Note that this must // be done before the call to "setQTState()" below.) if (subsession->videoWidth() != 0) { fMovieWidth = subsession->videoWidth(); } if (subsession->videoHeight() != 0) { fMovieHeight = subsession->videoHeight(); } if (subsession->videoFPS() != 0) { fMovieFPS = subsession->videoFPS(); } SubsessionIOState* ioState = new SubsessionIOState(*this, *subsession); if (ioState == NULL || !ioState->setQTstate()) { // We're not able to output a QuickTime track for this subsession delete ioState; ioState = NULL; continue; } subsession->miscPtr = (void*)ioState; if (generateHintTracks) { // Also create a hint track for this track: SubsessionIOState* hintTrack = new SubsessionIOState(*this, *subsession); SubsessionIOState::setHintTrack(ioState, hintTrack); if (!hintTrack->setQTstate()) { delete hintTrack; SubsessionIOState::setHintTrack(ioState, NULL); } } // Also set a 'BYE' handler for this subsession's RTCP instance: if (subsession->rtcpInstance() != NULL) { subsession->rtcpInstance()->setByeHandler(onRTCPBye, ioState); } unsigned rtpTimestampFrequency = subsession->rtpTimestampFrequency(); if (rtpTimestampFrequency > fLargestRTPtimestampFrequency) { fLargestRTPtimestampFrequency = rtpTimestampFrequency; } ++fNumSubsessions; } // Use the current time as the file's creation and modification // time. Use Apple's time format: seconds since January 1, 1904 gettimeofday(&fStartTime, NULL); fAppleCreationTime = fStartTime.tv_sec - 0x83dac000; // Begin by writing a "mdat" atom at the start of the file. // (Later, when we've finished copying data to the file, we'll come // back and fill in its size.) fMDATposition = TellFile64(fOutFid); addAtomHeader64("mdat"); // add 64Bit offset fMDATposition += 8; } QuickTimeFileSink::~QuickTimeFileSink() { completeOutputFile(); // Then, stop streaming and delete each active "SubsessionIOState": MediaSubsessionIterator iter(fInputSession); MediaSubsession* subsession; while ((subsession = iter.next()) != NULL) { subsession->readSource()->stopGettingFrames(); SubsessionIOState* ioState = (SubsessionIOState*)(subsession->miscPtr); if (ioState == NULL) continue; delete ioState->fHintTrackForUs; // if any delete ioState; } // Finally, close our output file: CloseOutputFile(fOutFid); } QuickTimeFileSink* QuickTimeFileSink::createNew(UsageEnvironment& env, MediaSession& inputSession, char const* outputFileName, unsigned bufferSize, unsigned short movieWidth, unsigned short movieHeight, unsigned movieFPS, Boolean packetLossCompensate, Boolean syncStreams, Boolean generateHintTracks, Boolean generateMP4Format) { QuickTimeFileSink* newSink = new QuickTimeFileSink(env, inputSession, outputFileName, bufferSize, movieWidth, movieHeight, movieFPS, packetLossCompensate, syncStreams, generateHintTracks, generateMP4Format); if (newSink == NULL || newSink->fOutFid == NULL) { Medium::close(newSink); return NULL; } return newSink; } Boolean QuickTimeFileSink::startPlaying(afterPlayingFunc* afterFunc, void* afterClientData) { // Make sure we're not already being played: if (fAreCurrentlyBeingPlayed) { envir().setResultMsg("This sink has already been played"); return False; } fAreCurrentlyBeingPlayed = True; fAfterFunc = afterFunc; fAfterClientData = afterClientData; return continuePlaying(); } Boolean QuickTimeFileSink::continuePlaying() { // Run through each of our input session's 'subsessions', // asking for a frame from each one: Boolean haveActiveSubsessions = False; MediaSubsessionIterator iter(fInputSession); MediaSubsession* subsession; while ((subsession = iter.next()) != NULL) { FramedSource* subsessionSource = subsession->readSource(); if (subsessionSource == NULL) continue; if (subsessionSource->isCurrentlyAwaitingData()) continue; SubsessionIOState* ioState = (SubsessionIOState*)(subsession->miscPtr); if (ioState == NULL) continue; haveActiveSubsessions = True; unsigned char* toPtr = ioState->fBuffer->dataEnd(); unsigned toSize = ioState->fBuffer->bytesAvailable(); subsessionSource->getNextFrame(toPtr, toSize, afterGettingFrame, ioState, onSourceClosure, ioState); } if (!haveActiveSubsessions) { envir().setResultMsg("No subsessions are currently active"); return False; } return True; } void QuickTimeFileSink ::afterGettingFrame(void* clientData, unsigned packetDataSize, unsigned numTruncatedBytes, struct timeval presentationTime, unsigned /*durationInMicroseconds*/) { SubsessionIOState* ioState = (SubsessionIOState*)clientData; if (!ioState->syncOK(presentationTime)) { // Ignore this data: ioState->fOurSink.continuePlaying(); return; } if (numTruncatedBytes > 0) { ioState->envir() << "QuickTimeFileSink::afterGettingFrame(): The input frame data was too large for our buffer. " << numTruncatedBytes << " bytes of trailing data was dropped! Correct this by increasing the \"bufferSize\" parameter in the \"createNew()\" call.\n"; } ioState->afterGettingFrame(packetDataSize, presentationTime); } void QuickTimeFileSink::onSourceClosure(void* clientData) { SubsessionIOState* ioState = (SubsessionIOState*)clientData; ioState->onSourceClosure(); } void QuickTimeFileSink::onSourceClosure1() { // Check whether *all* of the subsession sources have closed. // If not, do nothing for now: MediaSubsessionIterator iter(fInputSession); MediaSubsession* subsession; while ((subsession = iter.next()) != NULL) { SubsessionIOState* ioState = (SubsessionIOState*)(subsession->miscPtr); if (ioState == NULL) continue; if (ioState->fOurSourceIsActive) return; // this source hasn't closed } completeOutputFile(); // Call our specified 'after' function: if (fAfterFunc != NULL) { (*fAfterFunc)(fAfterClientData); } } void QuickTimeFileSink::onRTCPBye(void* clientData) { SubsessionIOState* ioState = (SubsessionIOState*)clientData; struct timeval timeNow; gettimeofday(&timeNow, NULL); unsigned secsDiff = timeNow.tv_sec - ioState->fOurSink.fStartTime.tv_sec; MediaSubsession& subsession = ioState->fOurSubsession; ioState->envir() << "Received RTCP \"BYE\" on \"" << subsession.mediumName() << "/" << subsession.codecName() << "\" subsession (after " << secsDiff << " seconds)\n"; // Handle the reception of a RTCP "BYE" as if the source had closed: ioState->onSourceClosure(); } static Boolean timevalGE(struct timeval const& tv1, struct timeval const& tv2) { return (unsigned)tv1.tv_sec > (unsigned)tv2.tv_sec || (tv1.tv_sec == tv2.tv_sec && (unsigned)tv1.tv_usec >= (unsigned)tv2.tv_usec); } void QuickTimeFileSink::completeOutputFile() { if (fHaveCompletedOutputFile || fOutFid == NULL) return; // Begin by filling in the initial "mdat" atom with the current // file size: int64_t curFileSize = TellFile64(fOutFid); setWord64(fMDATposition, (u_int64_t)curFileSize); // Then, note the time of the first received data: MediaSubsessionIterator iter(fInputSession); MediaSubsession* subsession; while ((subsession = iter.next()) != NULL) { SubsessionIOState* ioState = (SubsessionIOState*)(subsession->miscPtr); if (ioState == NULL) continue; ChunkDescriptor* const headChunk = ioState->fHeadChunk; if (headChunk != NULL && timevalGE(fFirstDataTime, headChunk->fPresentationTime)) { fFirstDataTime = headChunk->fPresentationTime; } } // Then, update the QuickTime-specific state for each active track: iter.reset(); while ((subsession = iter.next()) != NULL) { SubsessionIOState* ioState = (SubsessionIOState*)(subsession->miscPtr); if (ioState == NULL) continue; ioState->setFinalQTstate(); // Do the same for a hint track (if any): if (ioState->hasHintTrack()) { ioState->fHintTrackForUs->setFinalQTstate(); } } if (fGenerateMP4Format) { // Begin with a "ftyp" atom: addAtom_ftyp(); } // Then, add a "moov" atom for the file metadata: addAtom_moov(); // We're done: fHaveCompletedOutputFile = True; } ////////// SubsessionIOState, ChunkDescriptor implementation /////////// unsigned SubsessionIOState::fCurrentTrackNumber = 0; SubsessionIOState::SubsessionIOState(QuickTimeFileSink& sink, MediaSubsession& subsession) : fHintTrackForUs(NULL), fTrackHintedByUs(NULL), fOurSink(sink), fOurSubsession(subsession), fLastPacketRTPSeqNum(0), fHaveBeenSynced(False), fQTTotNumSamples(0), fHeadChunk(NULL), fTailChunk(NULL), fNumChunks(0), fHeadSyncFrame(NULL), fTailSyncFrame(NULL) { fTrackID = ++fCurrentTrackNumber; fBuffer = new SubsessionBuffer(fOurSink.fBufferSize); fPrevBuffer = sink.fPacketLossCompensate ? new SubsessionBuffer(fOurSink.fBufferSize) : NULL; FramedSource* subsessionSource = subsession.readSource(); fOurSourceIsActive = subsessionSource != NULL; fPrevFrameState.presentationTime.tv_sec = 0; fPrevFrameState.presentationTime.tv_usec = 0; fPrevFrameState.seqNum = 0; } SubsessionIOState::~SubsessionIOState() { delete fBuffer; delete fPrevBuffer; // Delete the list of chunk descriptors: ChunkDescriptor* chunk = fHeadChunk; while (chunk != NULL) { ChunkDescriptor* next = chunk->fNextChunk; delete chunk; chunk = next; } // Delete the list of sync frames: SyncFrame* syncFrame = fHeadSyncFrame; while (syncFrame != NULL) { SyncFrame* next = syncFrame->nextSyncFrame; delete syncFrame; syncFrame = next; } } Boolean SubsessionIOState::setQTstate() { char const* noCodecWarning1 = "Warning: We don't implement a QuickTime "; char const* noCodecWarning2 = " Media Data Type for the \""; char const* noCodecWarning3 = "\" track, so we'll insert a dummy \"????\" Media Data Atom instead. A separate, codec-specific editing pass will be needed before this track can be played.\n"; do { fQTEnableTrack = True; // enable this track in the movie by default fQTTimeScale = fOurSubsession.rtpTimestampFrequency(); // by default fQTTimeUnitsPerSample = 1; // by default fQTBytesPerFrame = 0; // by default - indicates that the whole packet data is a frame fQTSamplesPerFrame = 1; // by default // Make sure our subsession's medium is one that we know how to // represent in a QuickTime file: if (isHintTrack()) { // Hint tracks are treated specially fQTEnableTrack = False; // hint tracks are marked as inactive fQTcomponentSubtype = fourChar('h','i','n','t'); fQTcomponentName = "hint media handler"; fQTMediaInformationAtomCreator = &QuickTimeFileSink::addAtom_gmhd; fQTMediaDataAtomCreator = &QuickTimeFileSink::addAtom_rtp; } else if (strcmp(fOurSubsession.mediumName(), "audio") == 0) { fQTcomponentSubtype = fourChar('s','o','u','n'); fQTcomponentName = "Apple Sound Media Handler"; fQTMediaInformationAtomCreator = &QuickTimeFileSink::addAtom_smhd; fQTMediaDataAtomCreator = &QuickTimeFileSink::addAtom_soundMediaGeneral; // by default fQTSoundSampleVersion = 0; // by default // Make sure that our subsession's codec is one that we can handle: if (strcmp(fOurSubsession.codecName(), "X-QT") == 0 || strcmp(fOurSubsession.codecName(), "X-QUICKTIME") == 0) { fQTMediaDataAtomCreator = &QuickTimeFileSink::addAtom_genericMedia; } else if (strcmp(fOurSubsession.codecName(), "PCMU") == 0) { fQTAudioDataType = "ulaw"; fQTBytesPerFrame = 1; } else if (strcmp(fOurSubsession.codecName(), "GSM") == 0) { fQTAudioDataType = "agsm"; fQTBytesPerFrame = 33; fQTSamplesPerFrame = 160; } else if (strcmp(fOurSubsession.codecName(), "PCMA") == 0) { fQTAudioDataType = "alaw"; fQTBytesPerFrame = 1; } else if (strcmp(fOurSubsession.codecName(), "QCELP") == 0) { fQTMediaDataAtomCreator = &QuickTimeFileSink::addAtom_Qclp; fQTSamplesPerFrame = 160; } else if (strcmp(fOurSubsession.codecName(), "MPEG4-GENERIC") == 0 || strcmp(fOurSubsession.codecName(), "MP4A-LATM") == 0) { fQTMediaDataAtomCreator = &QuickTimeFileSink::addAtom_mp4a; fQTTimeUnitsPerSample = 1024; // QT considers each frame to be a 'sample' // The time scale (frequency) comes from the 'config' information. // It might be different from the RTP timestamp frequency (e.g., aacPlus). unsigned frequencyFromConfig = samplingFrequencyFromAudioSpecificConfig(fOurSubsession.fmtp_config()); if (frequencyFromConfig != 0) fQTTimeScale = frequencyFromConfig; } else { envir() << noCodecWarning1 << "Audio" << noCodecWarning2 << fOurSubsession.codecName() << noCodecWarning3; fQTMediaDataAtomCreator = &QuickTimeFileSink::addAtom_dummy; fQTEnableTrack = False; // disable this track in the movie } } else if (strcmp(fOurSubsession.mediumName(), "video") == 0) { fQTcomponentSubtype = fourChar('v','i','d','e'); fQTcomponentName = "Apple Video Media Handler"; fQTMediaInformationAtomCreator = &QuickTimeFileSink::addAtom_vmhd; // Make sure that our subsession's codec is one that we can handle: if (strcmp(fOurSubsession.codecName(), "X-QT") == 0 || strcmp(fOurSubsession.codecName(), "X-QUICKTIME") == 0) { fQTMediaDataAtomCreator = &QuickTimeFileSink::addAtom_genericMedia; } else if (strcmp(fOurSubsession.codecName(), "H263-1998") == 0 || strcmp(fOurSubsession.codecName(), "H263-2000") == 0) { fQTMediaDataAtomCreator = &QuickTimeFileSink::addAtom_h263; fQTTimeScale = 600; fQTTimeUnitsPerSample = fQTTimeScale/fOurSink.fMovieFPS; } else if (strcmp(fOurSubsession.codecName(), "H264") == 0) { fQTMediaDataAtomCreator = &QuickTimeFileSink::addAtom_avc1; fQTTimeScale = 600; fQTTimeUnitsPerSample = fQTTimeScale/fOurSink.fMovieFPS; } else if (strcmp(fOurSubsession.codecName(), "MP4V-ES") == 0) { fQTMediaDataAtomCreator = &QuickTimeFileSink::addAtom_mp4v; fQTTimeScale = 600; fQTTimeUnitsPerSample = fQTTimeScale/fOurSink.fMovieFPS; } else { envir() << noCodecWarning1 << "Video" << noCodecWarning2 << fOurSubsession.codecName() << noCodecWarning3; fQTMediaDataAtomCreator = &QuickTimeFileSink::addAtom_dummy; fQTEnableTrack = False; // disable this track in the movie } } else { envir() << "Warning: We don't implement a QuickTime Media Handler for media type \"" << fOurSubsession.mediumName() << "\""; break; } #ifdef QT_SUPPORT_PARTIALLY_ONLY envir() << "Warning: We don't have sufficient codec-specific information (e.g., sample sizes) to fully generate the \"" << fOurSubsession.mediumName() << "/" << fOurSubsession.codecName() << "\" track, so we'll disable this track in the movie. A separate, codec-specific editing pass will be needed before this track can be played\n"; fQTEnableTrack = False; // disable this track in the movie #endif return True; } while (0); envir() << ", so a track for the \"" << fOurSubsession.mediumName() << "/" << fOurSubsession.codecName() << "\" subsession will not be included in the output QuickTime file\n"; return False; } void SubsessionIOState::setFinalQTstate() { // Compute derived parameters, by running through the list of chunks: fQTDurationT = 0; ChunkDescriptor* chunk = fHeadChunk; while (chunk != NULL) { unsigned const numFrames = chunk->fNumFrames; unsigned const dur = numFrames*chunk->fFrameDuration; fQTDurationT += dur; chunk = chunk->fNextChunk; } // Convert this duration from track to movie time scale: double scaleFactor = fOurSink.movieTimeScale()/(double)fQTTimeScale; fQTDurationM = (unsigned)(fQTDurationT*scaleFactor); if (fQTDurationM > fOurSink.fMaxTrackDurationM) { fOurSink.fMaxTrackDurationM = fQTDurationM; } } void SubsessionIOState::afterGettingFrame(unsigned packetDataSize, struct timeval presentationTime) { // Begin by checking whether there was a gap in the RTP stream. // If so, try to compensate for this (if desired): unsigned short rtpSeqNum = fOurSubsession.rtpSource()->curPacketRTPSeqNum(); if (fOurSink.fPacketLossCompensate && fPrevBuffer->bytesInUse() > 0) { short seqNumGap = rtpSeqNum - fLastPacketRTPSeqNum; for (short i = 1; i < seqNumGap; ++i) { // Insert a copy of the previous frame, to compensate for the loss: useFrame(*fPrevBuffer); } } fLastPacketRTPSeqNum = rtpSeqNum; // Now, continue working with the frame that we just got if (fBuffer->bytesInUse() == 0) { fBuffer->setPresentationTime(presentationTime); } fBuffer->addBytes(packetDataSize); // If our RTP source is a "QuickTimeGenericRTPSource", then // use its 'qtState' to set some parameters that we need: if (fQTMediaDataAtomCreator == &QuickTimeFileSink::addAtom_genericMedia){ QuickTimeGenericRTPSource* rtpSource = (QuickTimeGenericRTPSource*)fOurSubsession.rtpSource(); QuickTimeGenericRTPSource::QTState& qtState = rtpSource->qtState; fQTTimeScale = qtState.timescale; if (qtState.width != 0) { fOurSink.fMovieWidth = qtState.width; } if (qtState.height != 0) { fOurSink.fMovieHeight = qtState.height; } // Also, if the media type in the "sdAtom" is one that we recognize // to have a special parameters, then fix this here: if (qtState.sdAtomSize >= 8) { char const* atom = qtState.sdAtom; unsigned mediaType = fourChar(atom[4],atom[5],atom[6],atom[7]); switch (mediaType) { case fourChar('a','g','s','m'): { fQTBytesPerFrame = 33; fQTSamplesPerFrame = 160; break; } case fourChar('Q','c','l','p'): { fQTBytesPerFrame = 35; fQTSamplesPerFrame = 160; break; } case fourChar('H','c','l','p'): { fQTBytesPerFrame = 17; fQTSamplesPerFrame = 160; break; } case fourChar('h','2','6','3'): { fQTTimeUnitsPerSample = fQTTimeScale/fOurSink.fMovieFPS; break; } } } } else if (fQTMediaDataAtomCreator == &QuickTimeFileSink::addAtom_Qclp) { // For QCELP data, make a note of the frame size (even though it's the // same as the packet data size), because it varies depending on the // 'rate' of the stream, and this size gets used later when setting up // the 'Qclp' QuickTime atom: fQTBytesPerFrame = packetDataSize; } useFrame(*fBuffer); if (fOurSink.fPacketLossCompensate) { // Save this frame, in case we need it for recovery: SubsessionBuffer* tmp = fPrevBuffer; // assert: != NULL fPrevBuffer = fBuffer; fBuffer = tmp; } fBuffer->reset(); // for the next input // Now, try getting more frames: fOurSink.continuePlaying(); } void SubsessionIOState::useFrame(SubsessionBuffer& buffer) { unsigned char* const frameSource = buffer.dataStart(); unsigned const frameSize = buffer.bytesInUse(); struct timeval const& presentationTime = buffer.presentationTime(); int64_t const destFileOffset = TellFile64(fOurSink.fOutFid); unsigned sampleNumberOfFrameStart = fQTTotNumSamples + 1; Boolean avcHack = fQTMediaDataAtomCreator == &QuickTimeFileSink::addAtom_avc1; // If we're not syncing streams, or this subsession is not video, then // just give this frame a fixed duration: if (!fOurSink.fSyncStreams || fQTcomponentSubtype != fourChar('v','i','d','e')) { unsigned const frameDuration = fQTTimeUnitsPerSample*fQTSamplesPerFrame; unsigned frameSizeToUse = frameSize; if (avcHack) frameSizeToUse += 4; // H.264/AVC gets the frame size prefix fQTTotNumSamples += useFrame1(frameSizeToUse, presentationTime, frameDuration, destFileOffset); } else { // For synced video streams, we use the difference between successive // frames' presentation times as the 'frame duration'. So, record // information about the *previous* frame: struct timeval const& ppt = fPrevFrameState.presentationTime; //abbrev if (ppt.tv_sec != 0 || ppt.tv_usec != 0) { // There has been a previous frame. double duration = (presentationTime.tv_sec - ppt.tv_sec) + (presentationTime.tv_usec - ppt.tv_usec)/1000000.0; if (duration < 0.0) duration = 0.0; unsigned frameDuration = (unsigned)((2*duration*fQTTimeScale+1)/2); // round unsigned frameSizeToUse = fPrevFrameState.frameSize; if (avcHack) frameSizeToUse += 4; // H.264/AVC gets the frame size prefix unsigned numSamples = useFrame1(frameSizeToUse, ppt, frameDuration, fPrevFrameState.destFileOffset); fQTTotNumSamples += numSamples; sampleNumberOfFrameStart = fQTTotNumSamples + 1; } if (avcHack && (*frameSource == H264_IDR_FRAME)) { SyncFrame* newSyncFrame = new SyncFrame(fQTTotNumSamples + 1); if (fTailSyncFrame == NULL) { fHeadSyncFrame = newSyncFrame; } else { fTailSyncFrame->nextSyncFrame = newSyncFrame; } fTailSyncFrame = newSyncFrame; } // Remember the current frame for next time: fPrevFrameState.frameSize = frameSize; fPrevFrameState.presentationTime = presentationTime; fPrevFrameState.destFileOffset = destFileOffset; } if (avcHack) fOurSink.addWord(frameSize); // Write the data into the file: fwrite(frameSource, 1, frameSize, fOurSink.fOutFid); // If we have a hint track, then write to it also: if (hasHintTrack()) { // Because presentation times are used for RTP packet timestamps, // we don't starting writing to the hint track until we've been synced: if (!fHaveBeenSynced) { fHaveBeenSynced = fOurSubsession.rtpSource()->hasBeenSynchronizedUsingRTCP(); } if (fHaveBeenSynced) { fHintTrackForUs->useFrameForHinting(frameSize, presentationTime, sampleNumberOfFrameStart); } } } void SubsessionIOState::useFrameForHinting(unsigned frameSize, struct timeval presentationTime, unsigned startSampleNumber) { // At this point, we have a single, combined frame - not individual packets. // For the hint track, we need to split the frame back up into separate packets. // However, for some RTP sources, then we also need to reuse the special // header bytes that were at the start of each of the RTP packets. Boolean hack263 = strcmp(fOurSubsession.codecName(), "H263-1998") == 0; Boolean hackm4a_generic = strcmp(fOurSubsession.mediumName(), "audio") == 0 && strcmp(fOurSubsession.codecName(), "MPEG4-GENERIC") == 0; Boolean hackm4a_latm = strcmp(fOurSubsession.mediumName(), "audio") == 0 && strcmp(fOurSubsession.codecName(), "MP4A-LATM") == 0; Boolean hackm4a = hackm4a_generic || hackm4a_latm; Boolean haveSpecialHeaders = (hack263 || hackm4a_generic); // If there has been a previous frame, then output a 'hint sample' for it. // (We use the current frame's presentation time to compute the previous // hint sample's duration.) RTPSource* const rs = fOurSubsession.rtpSource(); // abbrev struct timeval const& ppt = fPrevFrameState.presentationTime; //abbrev if (ppt.tv_sec != 0 || ppt.tv_usec != 0) { double duration = (presentationTime.tv_sec - ppt.tv_sec) + (presentationTime.tv_usec - ppt.tv_usec)/1000000.0; if (duration < 0.0) duration = 0.0; unsigned msDuration = (unsigned)(duration*1000); // milliseconds if (msDuration > fHINF.dmax) fHINF.dmax = msDuration; unsigned hintSampleDuration = (unsigned)((2*duration*fQTTimeScale+1)/2); // round if (hackm4a) { // Because multiple AAC frames can appear in a RTP packet, the presentation // times of the second and subsequent frames will not be accurate. // So, use the known "hintSampleDuration" instead: hintSampleDuration = fTrackHintedByUs->fQTTimeUnitsPerSample; // Also, if the 'time scale' was different from the RTP timestamp frequency, // (as can happen with aacPlus), then we need to scale "hintSampleDuration" // accordingly: if (fTrackHintedByUs->fQTTimeScale != fOurSubsession.rtpTimestampFrequency()) { unsigned const scalingFactor = fOurSubsession.rtpTimestampFrequency()/fTrackHintedByUs->fQTTimeScale ; hintSampleDuration *= scalingFactor; } } int64_t const hintSampleDestFileOffset = TellFile64(fOurSink.fOutFid); unsigned const maxPacketSize = 1450; unsigned short numPTEntries = (fPrevFrameState.frameSize + (maxPacketSize-1))/maxPacketSize; // normal case unsigned char* immediateDataPtr = NULL; unsigned immediateDataBytesRemaining = 0; if (haveSpecialHeaders) { // special case numPTEntries = fPrevFrameState.numSpecialHeaders; immediateDataPtr = fPrevFrameState.specialHeaderBytes; immediateDataBytesRemaining = fPrevFrameState.specialHeaderBytesLength; } unsigned hintSampleSize = fOurSink.addHalfWord(numPTEntries);// Entry count hintSampleSize += fOurSink.addHalfWord(0x0000); // Reserved unsigned offsetWithinSample = 0; for (unsigned i = 0; i < numPTEntries; ++i) { // Output a Packet Table entry (representing a single RTP packet): unsigned short numDTEntries = 1; unsigned short seqNum = fPrevFrameState.seqNum++; // Note: This assumes that the input stream had no packets lost ##### unsigned rtpHeader = fPrevFrameState.rtpHeader; if (i+1 < numPTEntries) { // This is not the last RTP packet, so clear the marker bit: rtpHeader &=~ (1<<23); } unsigned dataFrameSize = (i+1 < numPTEntries) ? maxPacketSize : fPrevFrameState.frameSize - i*maxPacketSize; // normal case unsigned sampleNumber = fPrevFrameState.startSampleNumber; unsigned char immediateDataLen = 0; if (haveSpecialHeaders) { // special case ++numDTEntries; // to include a Data Table entry for the special hdr if (immediateDataBytesRemaining > 0) { if (hack263) { immediateDataLen = *immediateDataPtr++; --immediateDataBytesRemaining; if (immediateDataLen > immediateDataBytesRemaining) { // shouldn't happen (length byte was bad) immediateDataLen = immediateDataBytesRemaining; } } else { immediateDataLen = fPrevFrameState.specialHeaderBytesLength; } } dataFrameSize = fPrevFrameState.packetSizes[i] - immediateDataLen; if (hack263) { Boolean PbitSet = immediateDataLen >= 1 && (immediateDataPtr[0]&0x4) != 0; if (PbitSet) { offsetWithinSample += 2; // to omit the two leading 0 bytes } } } // Output the Packet Table: hintSampleSize += fOurSink.addWord(0); // Relative transmission time hintSampleSize += fOurSink.addWord(rtpHeader|seqNum); // RTP header info + RTP sequence number hintSampleSize += fOurSink.addHalfWord(0x0000); // Flags hintSampleSize += fOurSink.addHalfWord(numDTEntries); // Entry count unsigned totalPacketSize = 0; // Output the Data Table: if (haveSpecialHeaders) { // use the "Immediate Data" format (1): hintSampleSize += fOurSink.addByte(1); // Source unsigned char len = immediateDataLen > 14 ? 14 : immediateDataLen; hintSampleSize += fOurSink.addByte(len); // Length totalPacketSize += len; fHINF.dimm += len; unsigned char j; for (j = 0; j < len; ++j) { hintSampleSize += fOurSink.addByte(immediateDataPtr[j]); // Data } for (j = len; j < 14; ++j) { hintSampleSize += fOurSink.addByte(0); // Data (padding) } immediateDataPtr += immediateDataLen; immediateDataBytesRemaining -= immediateDataLen; } // use the "Sample Data" format (2): hintSampleSize += fOurSink.addByte(2); // Source hintSampleSize += fOurSink.addByte(0); // Track ref index hintSampleSize += fOurSink.addHalfWord(dataFrameSize); // Length totalPacketSize += dataFrameSize; fHINF.dmed += dataFrameSize; hintSampleSize += fOurSink.addWord(sampleNumber); // Sample number hintSampleSize += fOurSink.addWord(offsetWithinSample); // Offset // Get "bytes|samples per compression block" from the hinted track: unsigned short const bytesPerCompressionBlock = fTrackHintedByUs->fQTBytesPerFrame; unsigned short const samplesPerCompressionBlock = fTrackHintedByUs->fQTSamplesPerFrame; hintSampleSize += fOurSink.addHalfWord(bytesPerCompressionBlock); hintSampleSize += fOurSink.addHalfWord(samplesPerCompressionBlock); offsetWithinSample += dataFrameSize;// for the next iteration (if any) // Tally statistics for this packet: fHINF.nump += 1; fHINF.tpyl += totalPacketSize; totalPacketSize += 12; // add in the size of the RTP header fHINF.trpy += totalPacketSize; if (totalPacketSize > fHINF.pmax) fHINF.pmax = totalPacketSize; } // Make note of this completed hint sample frame: fQTTotNumSamples += useFrame1(hintSampleSize, ppt, hintSampleDuration, hintSampleDestFileOffset); } // Remember this frame for next time: fPrevFrameState.frameSize = frameSize; fPrevFrameState.presentationTime = presentationTime; fPrevFrameState.startSampleNumber = startSampleNumber; fPrevFrameState.rtpHeader = rs->curPacketMarkerBit()<<23 | (rs->rtpPayloadFormat()&0x7F)<<16; if (hack263) { H263plusVideoRTPSource* rs_263 = (H263plusVideoRTPSource*)rs; fPrevFrameState.numSpecialHeaders = rs_263->fNumSpecialHeaders; fPrevFrameState.specialHeaderBytesLength = rs_263->fSpecialHeaderBytesLength; unsigned i; for (i = 0; i < rs_263->fSpecialHeaderBytesLength; ++i) { fPrevFrameState.specialHeaderBytes[i] = rs_263->fSpecialHeaderBytes[i]; } for (i = 0; i < rs_263->fNumSpecialHeaders; ++i) { fPrevFrameState.packetSizes[i] = rs_263->fPacketSizes[i]; } } else if (hackm4a_generic) { // Synthesize a special header, so that this frame can be in its own RTP packet. unsigned const sizeLength = fOurSubsession.fmtp_sizelength(); unsigned const indexLength = fOurSubsession.fmtp_indexlength(); if (sizeLength + indexLength != 16) { envir() << "Warning: unexpected 'sizeLength' " << sizeLength << " and 'indexLength' " << indexLength << "seen when creating hint track\n"; } fPrevFrameState.numSpecialHeaders = 1; fPrevFrameState.specialHeaderBytesLength = 4; fPrevFrameState.specialHeaderBytes[0] = 0; // AU_headers_length (high byte) fPrevFrameState.specialHeaderBytes[1] = 16; // AU_headers_length (low byte) fPrevFrameState.specialHeaderBytes[2] = ((frameSize<>8; fPrevFrameState.specialHeaderBytes[3] = (frameSize<extendChunk(destFileOffset, sourceDataSize, frameSize, frameDuration, presentationTime); } if (newTailChunk != fTailChunk) { // This data created a new chunk, rather than extending the old one ++fNumChunks; fTailChunk = newTailChunk; } return numSamples; } void SubsessionIOState::onSourceClosure() { fOurSourceIsActive = False; fOurSink.onSourceClosure1(); } Boolean SubsessionIOState::syncOK(struct timeval presentationTime) { QuickTimeFileSink& s = fOurSink; // abbreviation if (!s.fSyncStreams) return True; // we don't care if (s.fNumSyncedSubsessions < s.fNumSubsessions) { // Not all subsessions have yet been synced. Check whether ours was // one of the unsynced ones, and, if so, whether it is now synced: if (!fHaveBeenSynced) { // We weren't synchronized before if (fOurSubsession.rtpSource()->hasBeenSynchronizedUsingRTCP()) { // H264 ? if (fQTMediaDataAtomCreator == &QuickTimeFileSink::addAtom_avc1) { // special case: audio + H264 video: wait until audio is in sync if ((s.fNumSubsessions == 2) && (s.fNumSyncedSubsessions < (s.fNumSubsessions - 1))) return False; // if audio is in sync, wait for the next IDR frame to start unsigned char* const frameSource = fBuffer->dataStart(); if (*frameSource != H264_IDR_FRAME) return False; } // But now we are fHaveBeenSynced = True; fSyncTime = presentationTime; ++s.fNumSyncedSubsessions; if (timevalGE(fSyncTime, s.fNewestSyncTime)) { s.fNewestSyncTime = fSyncTime; } } } } // Check again whether all subsessions have been synced: if (s.fNumSyncedSubsessions < s.fNumSubsessions) return False; // Allow this data if it is more recent than the newest sync time: return timevalGE(presentationTime, s.fNewestSyncTime); } void SubsessionIOState::setHintTrack(SubsessionIOState* hintedTrack, SubsessionIOState* hintTrack) { if (hintedTrack != NULL) hintedTrack->fHintTrackForUs = hintTrack; if (hintTrack != NULL) hintTrack->fTrackHintedByUs = hintedTrack; } SyncFrame::SyncFrame(unsigned frameNum) : nextSyncFrame(NULL), sfFrameNum(frameNum) { } void Count64::operator+=(unsigned arg) { unsigned newLo = lo + arg; if (newLo < lo) { // lo has overflowed ++hi; } lo = newLo; } ChunkDescriptor ::ChunkDescriptor(int64_t offsetInFile, unsigned size, unsigned frameSize, unsigned frameDuration, struct timeval presentationTime) : fNextChunk(NULL), fOffsetInFile(offsetInFile), fNumFrames(size/frameSize), fFrameSize(frameSize), fFrameDuration(frameDuration), fPresentationTime(presentationTime) { } ChunkDescriptor* ChunkDescriptor ::extendChunk(int64_t newOffsetInFile, unsigned newSize, unsigned newFrameSize, unsigned newFrameDuration, struct timeval newPresentationTime) { // First, check whether the new space is just at the end of this // existing chunk: if (newOffsetInFile == fOffsetInFile + fNumFrames*fFrameSize) { // We can extend this existing chunk, provided that the frame size // and frame duration have not changed: if (newFrameSize == fFrameSize && newFrameDuration == fFrameDuration) { fNumFrames += newSize/fFrameSize; return this; } } // We'll allocate a new ChunkDescriptor, and link it to the end of us: ChunkDescriptor* newDescriptor = new ChunkDescriptor(newOffsetInFile, newSize, newFrameSize, newFrameDuration, newPresentationTime); fNextChunk = newDescriptor; return newDescriptor; } ////////// QuickTime-specific implementation ////////// unsigned QuickTimeFileSink::addWord64(u_int64_t word) { addByte((unsigned char)(word>>56)); addByte((unsigned char)(word>>48)); addByte((unsigned char)(word>>40)); addByte((unsigned char)(word>>32)); addByte((unsigned char)(word>>24)); addByte((unsigned char)(word>>16)); addByte((unsigned char)(word>>8)); addByte((unsigned char)(word)); return 8; } unsigned QuickTimeFileSink::addWord(unsigned word) { addByte(word>>24); addByte(word>>16); addByte(word>>8); addByte(word); return 4; } unsigned QuickTimeFileSink::addHalfWord(unsigned short halfWord) { addByte((unsigned char)(halfWord>>8)); addByte((unsigned char)halfWord); return 2; } unsigned QuickTimeFileSink::addZeroWords(unsigned numWords) { for (unsigned i = 0; i < numWords; ++i) { addWord(0); } return numWords*4; } unsigned QuickTimeFileSink::add4ByteString(char const* str) { addByte(str[0]); addByte(str[1]); addByte(str[2]); addByte(str[3]); return 4; } unsigned QuickTimeFileSink::addArbitraryString(char const* str, Boolean oneByteLength) { unsigned size = 0; if (oneByteLength) { // Begin with a byte containing the string length: unsigned strLength = strlen(str); if (strLength >= 256) { envir() << "QuickTimeFileSink::addArbitraryString(\"" << str << "\") saw string longer than we know how to handle (" << strLength << "\n"; } size += addByte((unsigned char)strLength); } while (*str != '\0') { size += addByte(*str++); } return size; } unsigned QuickTimeFileSink::addAtomHeader(char const* atomName) { // Output a placeholder for the 4-byte size: addWord(0); // Output the 4-byte atom name: add4ByteString(atomName); return 8; } unsigned QuickTimeFileSink::addAtomHeader64(char const* atomName) { // Output 64Bit size marker addWord(1); // Output the 4-byte atom name: add4ByteString(atomName); addWord64(0); return 16; } void QuickTimeFileSink::setWord(int64_t filePosn, unsigned size) { do { if (SeekFile64(fOutFid, filePosn, SEEK_SET) < 0) break; addWord(size); if (SeekFile64(fOutFid, 0, SEEK_END) < 0) break; // go back to where we were return; } while (0); // One of the SeekFile64()s failed, probable because we're not a seekable file envir() << "QuickTimeFileSink::setWord(): SeekFile64 failed (err " << envir().getErrno() << ")\n"; } void QuickTimeFileSink::setWord64(int64_t filePosn, u_int64_t size) { do { if (SeekFile64(fOutFid, filePosn, SEEK_SET) < 0) break; addWord64(size); if (SeekFile64(fOutFid, 0, SEEK_END) < 0) break; // go back to where we were return; } while (0); // One of the SeekFile64()s failed, probable because we're not a seekable file envir() << "QuickTimeFileSink::setWord64(): SeekFile64 failed (err " << envir().getErrno() << ")\n"; } // Methods for writing particular atoms. Note the following macros: #define addAtom(name) \ unsigned QuickTimeFileSink::addAtom_##name() { \ int64_t initFilePosn = TellFile64(fOutFid); \ unsigned size = addAtomHeader("" #name "") #define addAtomEnd \ setWord(initFilePosn, size); \ return size; \ } addAtom(ftyp); size += add4ByteString("mp42"); size += addWord(0x00000000); size += add4ByteString("mp42"); size += add4ByteString("isom"); addAtomEnd; addAtom(moov); size += addAtom_mvhd(); if (fGenerateMP4Format) { size += addAtom_iods(); } // Add a 'trak' atom for each subsession: // (For some unknown reason, QuickTime Player (5.0 at least) // doesn't display the movie correctly unless the audio track // (if present) appears before the video track. So ensure this here.) MediaSubsessionIterator iter(fInputSession); MediaSubsession* subsession; while ((subsession = iter.next()) != NULL) { fCurrentIOState = (SubsessionIOState*)(subsession->miscPtr); if (fCurrentIOState == NULL) continue; if (strcmp(subsession->mediumName(), "audio") != 0) continue; size += addAtom_trak(); if (fCurrentIOState->hasHintTrack()) { // This track has a hint track; output it also: fCurrentIOState = fCurrentIOState->fHintTrackForUs; size += addAtom_trak(); } } iter.reset(); while ((subsession = iter.next()) != NULL) { fCurrentIOState = (SubsessionIOState*)(subsession->miscPtr); if (fCurrentIOState == NULL) continue; if (strcmp(subsession->mediumName(), "audio") == 0) continue; size += addAtom_trak(); if (fCurrentIOState->hasHintTrack()) { // This track has a hint track; output it also: fCurrentIOState = fCurrentIOState->fHintTrackForUs; size += addAtom_trak(); } } addAtomEnd; addAtom(mvhd); size += addWord(0x00000000); // Version + Flags size += addWord(fAppleCreationTime); // Creation time size += addWord(fAppleCreationTime); // Modification time // For the "Time scale" field, use the largest RTP timestamp frequency // that we saw in any of the subsessions. size += addWord(movieTimeScale()); // Time scale unsigned const duration = fMaxTrackDurationM; fMVHD_durationPosn = TellFile64(fOutFid); size += addWord(duration); // Duration size += addWord(0x00010000); // Preferred rate size += addWord(0x01000000); // Preferred volume + Reserved[0] size += addZeroWords(2); // Reserved[1-2] size += addWord(0x00010000); // matrix top left corner size += addZeroWords(3); // matrix size += addWord(0x00010000); // matrix center size += addZeroWords(3); // matrix size += addWord(0x40000000); // matrix bottom right corner size += addZeroWords(6); // various time fields size += addWord(SubsessionIOState::fCurrentTrackNumber+1);// Next track ID addAtomEnd; addAtom(iods); size += addWord(0x00000000); // Version + Flags size += addWord(0x10808080); size += addWord(0x07004FFF); size += addWord(0xFF0FFFFF); addAtomEnd; addAtom(trak); size += addAtom_tkhd(); // If we're synchronizing the media streams (or are a hint track), // add an edit list that helps do this: if (fCurrentIOState->fHeadChunk != NULL && (fSyncStreams || fCurrentIOState->isHintTrack())) { size += addAtom_edts(); } // If we're generating a hint track, add a 'tref' atom: if (fCurrentIOState->isHintTrack()) size += addAtom_tref(); size += addAtom_mdia(); // If we're generating a hint track, add a 'udta' atom: if (fCurrentIOState->isHintTrack()) size += addAtom_udta(); addAtomEnd; addAtom(tkhd); if (fCurrentIOState->fQTEnableTrack) { size += addWord(0x0000000F); // Version + Flags } else { // Disable this track in the movie: size += addWord(0x00000000); // Version + Flags } size += addWord(fAppleCreationTime); // Creation time size += addWord(fAppleCreationTime); // Modification time size += addWord(fCurrentIOState->fTrackID); // Track ID size += addWord(0x00000000); // Reserved unsigned const duration = fCurrentIOState->fQTDurationM; // movie units fCurrentIOState->fTKHD_durationPosn = TellFile64(fOutFid); size += addWord(duration); // Duration size += addZeroWords(3); // Reserved+Layer+Alternate grp size += addWord(0x01000000); // Volume + Reserved size += addWord(0x00010000); // matrix top left corner size += addZeroWords(3); // matrix size += addWord(0x00010000); // matrix center size += addZeroWords(3); // matrix size += addWord(0x40000000); // matrix bottom right corner if (strcmp(fCurrentIOState->fOurSubsession.mediumName(), "video") == 0) { size += addWord(fMovieWidth<<16); // Track width size += addWord(fMovieHeight<<16); // Track height } else { size += addZeroWords(2); // not video: leave width and height fields zero } addAtomEnd; addAtom(edts); size += addAtom_elst(); addAtomEnd; #define addEdit1(duration,trackPosition) do { \ unsigned trackDuration \ = (unsigned) ((2*(duration)*movieTimeScale()+1)/2); \ /* in movie time units */ \ size += addWord(trackDuration); /* Track duration */ \ totalDurationOfEdits += trackDuration; \ size += addWord(trackPosition); /* Media time */ \ size += addWord(0x00010000); /* Media rate (1x) */ \ ++numEdits; \ } while (0) #define addEdit(duration) addEdit1((duration),editTrackPosition) #define addEmptyEdit(duration) addEdit1((duration),(~0)) addAtom(elst); size += addWord(0x00000000); // Version + Flags // Add a dummy "Number of entries" field // (and remember its position). We'll fill this field in later: int64_t numEntriesPosition = TellFile64(fOutFid); size += addWord(0); // dummy for "Number of entries" unsigned numEdits = 0; unsigned totalDurationOfEdits = 0; // in movie time units // Run through our chunks, looking at their presentation times. // From these, figure out the edits that need to be made to keep // the track media data in sync with the presentation times. double const syncThreshold = 0.1; // 100 ms // don't allow the track to get out of sync by more than this struct timeval editStartTime = fFirstDataTime; unsigned editTrackPosition = 0; unsigned currentTrackPosition = 0; double trackDurationOfEdit = 0.0; unsigned chunkDuration = 0; ChunkDescriptor* chunk = fCurrentIOState->fHeadChunk; while (chunk != NULL) { struct timeval const& chunkStartTime = chunk->fPresentationTime; double movieDurationOfEdit = (chunkStartTime.tv_sec - editStartTime.tv_sec) + (chunkStartTime.tv_usec - editStartTime.tv_usec)/1000000.0; trackDurationOfEdit = (currentTrackPosition-editTrackPosition) / (double)(fCurrentIOState->fQTTimeScale); double outOfSync = movieDurationOfEdit - trackDurationOfEdit; if (outOfSync > syncThreshold) { // The track's data is too short, so end this edit, add a new // 'empty' edit after it, and start a new edit // (at the current track posn.): if (trackDurationOfEdit > 0.0) addEdit(trackDurationOfEdit); addEmptyEdit(outOfSync); editStartTime = chunkStartTime; editTrackPosition = currentTrackPosition; } else if (outOfSync < -syncThreshold) { // The track's data is too long, so end this edit, and start // a new edit (pointing at the current track posn.): if (movieDurationOfEdit > 0.0) addEdit(movieDurationOfEdit); editStartTime = chunkStartTime; editTrackPosition = currentTrackPosition; } // Note the duration of this chunk: unsigned numChannels = fCurrentIOState->fOurSubsession.numChannels(); chunkDuration = chunk->fNumFrames*chunk->fFrameDuration/numChannels; currentTrackPosition += chunkDuration; chunk = chunk->fNextChunk; } // Write out the final edit trackDurationOfEdit += (double)chunkDuration/fCurrentIOState->fQTTimeScale; if (trackDurationOfEdit > 0.0) addEdit(trackDurationOfEdit); // Now go back and fill in the "Number of entries" field: setWord(numEntriesPosition, numEdits); // Also, if the sum of all of the edit durations exceeds the // track duration that we already computed (from sample durations), // then reset the track duration to this new value: if (totalDurationOfEdits > fCurrentIOState->fQTDurationM) { fCurrentIOState->fQTDurationM = totalDurationOfEdits; setWord(fCurrentIOState->fTKHD_durationPosn, totalDurationOfEdits); // Also, check whether the overall movie duration needs to change: if (totalDurationOfEdits > fMaxTrackDurationM) { fMaxTrackDurationM = totalDurationOfEdits; setWord(fMVHD_durationPosn, totalDurationOfEdits); } // Also, convert to track time scale: double scaleFactor = fCurrentIOState->fQTTimeScale/(double)movieTimeScale(); fCurrentIOState->fQTDurationT = (unsigned)(totalDurationOfEdits*scaleFactor); } addAtomEnd; addAtom(tref); size += addAtom_hint(); addAtomEnd; addAtom(hint); SubsessionIOState* hintedTrack = fCurrentIOState->fTrackHintedByUs; // Assert: hintedTrack != NULL size += addWord(hintedTrack->fTrackID); addAtomEnd; addAtom(mdia); size += addAtom_mdhd(); size += addAtom_hdlr(); size += addAtom_minf(); addAtomEnd; addAtom(mdhd); size += addWord(0x00000000); // Version + Flags size += addWord(fAppleCreationTime); // Creation time size += addWord(fAppleCreationTime); // Modification time unsigned const timeScale = fCurrentIOState->fQTTimeScale; size += addWord(timeScale); // Time scale unsigned const duration = fCurrentIOState->fQTDurationT; // track units size += addWord(duration); // Duration size += addWord(0x00000000); // Language+Quality addAtomEnd; addAtom(hdlr); size += addWord(0x00000000); // Version + Flags size += add4ByteString("mhlr"); // Component type size += addWord(fCurrentIOState->fQTcomponentSubtype); // Component subtype size += add4ByteString("appl"); // Component manufacturer size += addWord(0x00000000); // Component flags size += addWord(0x00000000); // Component flags mask size += addArbitraryString(fCurrentIOState->fQTcomponentName); // Component name addAtomEnd; addAtom(minf); SubsessionIOState::atomCreationFunc mediaInformationAtomCreator = fCurrentIOState->fQTMediaInformationAtomCreator; size += (this->*mediaInformationAtomCreator)(); size += addAtom_hdlr2(); size += addAtom_dinf(); size += addAtom_stbl(); addAtomEnd; addAtom(smhd); size += addZeroWords(2); // Version+Flags+Balance+Reserved addAtomEnd; addAtom(vmhd); size += addWord(0x00000001); // Version + Flags size += addWord(0x00408000); // Graphics mode + Opcolor[red] size += addWord(0x80008000); // Opcolor[green} + Opcolor[blue] addAtomEnd; addAtom(gmhd); size += addAtom_gmin(); addAtomEnd; addAtom(gmin); size += addWord(0x00000000); // Version + Flags // The following fields probably aren't used for hint tracks, so just // use values that I've seen in other files: size += addWord(0x00408000); // Graphics mode + Opcolor (1st 2 bytes) size += addWord(0x80008000); // Opcolor (last 4 bytes) size += addWord(0x00000000); // Balance + Reserved addAtomEnd; unsigned QuickTimeFileSink::addAtom_hdlr2() { int64_t initFilePosn = TellFile64(fOutFid); unsigned size = addAtomHeader("hdlr"); size += addWord(0x00000000); // Version + Flags size += add4ByteString("dhlr"); // Component type size += add4ByteString("alis"); // Component subtype size += add4ByteString("appl"); // Component manufacturer size += addZeroWords(2); // Component flags+Component flags mask size += addArbitraryString("Apple Alias Data Handler"); // Component name addAtomEnd; addAtom(dinf); size += addAtom_dref(); addAtomEnd; addAtom(dref); size += addWord(0x00000000); // Version + Flags size += addWord(0x00000001); // Number of entries size += addAtom_alis(); addAtomEnd; addAtom(alis); size += addWord(0x00000001); // Version + Flags addAtomEnd; addAtom(stbl); size += addAtom_stsd(); size += addAtom_stts(); if (fCurrentIOState->fQTcomponentSubtype == fourChar('v','i','d','e')) { size += addAtom_stss(); // only for video streams } size += addAtom_stsc(); size += addAtom_stsz(); size += addAtom_co64(); addAtomEnd; addAtom(stsd); size += addWord(0x00000000); // Version+Flags size += addWord(0x00000001); // Number of entries SubsessionIOState::atomCreationFunc mediaDataAtomCreator = fCurrentIOState->fQTMediaDataAtomCreator; size += (this->*mediaDataAtomCreator)(); addAtomEnd; unsigned QuickTimeFileSink::addAtom_genericMedia() { int64_t initFilePosn = TellFile64(fOutFid); // Our source is assumed to be a "QuickTimeGenericRTPSource" // Use its "sdAtom" state for our contents: QuickTimeGenericRTPSource* rtpSource = (QuickTimeGenericRTPSource*) fCurrentIOState->fOurSubsession.rtpSource(); QuickTimeGenericRTPSource::QTState& qtState = rtpSource->qtState; char const* from = qtState.sdAtom; unsigned size = qtState.sdAtomSize; for (unsigned i = 0; i < size; ++i) addByte(from[i]); addAtomEnd; unsigned QuickTimeFileSink::addAtom_soundMediaGeneral() { int64_t initFilePosn = TellFile64(fOutFid); unsigned size = addAtomHeader(fCurrentIOState->fQTAudioDataType); // General sample description fields: size += addWord(0x00000000); // Reserved size += addWord(0x00000001); // Reserved+Data reference index // Sound sample description fields: unsigned short const version = fCurrentIOState->fQTSoundSampleVersion; size += addWord(version<<16); // Version+Revision level size += addWord(0x00000000); // Vendor unsigned short numChannels = (unsigned short)(fCurrentIOState->fOurSubsession.numChannels()); size += addHalfWord(numChannels); // Number of channels size += addHalfWord(0x0010); // Sample size // size += addWord(0x00000000); // Compression ID+Packet size size += addWord(0xfffe0000); // Compression ID+Packet size ##### unsigned const sampleRateFixedPoint = fCurrentIOState->fQTTimeScale << 16; size += addWord(sampleRateFixedPoint); // Sample rate addAtomEnd; unsigned QuickTimeFileSink::addAtom_Qclp() { // The beginning of this atom looks just like a general Sound Media atom, // except with a version field of 1: int64_t initFilePosn = TellFile64(fOutFid); fCurrentIOState->fQTAudioDataType = "Qclp"; fCurrentIOState->fQTSoundSampleVersion = 1; unsigned size = addAtom_soundMediaGeneral(); // Next, add the four fields that are particular to version 1: // (Later, parameterize these #####) size += addWord(0x000000a0); // samples per packet size += addWord(0x00000000); // ??? size += addWord(0x00000000); // ??? size += addWord(0x00000002); // bytes per sample (uncompressed) // Other special fields are in a 'wave' atom that follows: size += addAtom_wave(); addAtomEnd; addAtom(wave); size += addAtom_frma(); if (strcmp(fCurrentIOState->fQTAudioDataType, "Qclp") == 0) { size += addWord(0x00000014); // ??? size += add4ByteString("Qclp"); // ??? if (fCurrentIOState->fQTBytesPerFrame == 35) { size += addAtom_Fclp(); // full-rate QCELP } else { size += addAtom_Hclp(); // half-rate QCELP } // what about other QCELP 'rates'??? ##### size += addWord(0x00000008); // ??? size += addWord(0x00000000); // ??? size += addWord(0x00000000); // ??? size += addWord(0x00000008); // ??? } else if (strcmp(fCurrentIOState->fQTAudioDataType, "mp4a") == 0) { size += addWord(0x0000000c); // ??? size += add4ByteString("mp4a"); // ??? size += addWord(0x00000000); // ??? size += addAtom_esds(); // ESDescriptor size += addWord(0x00000008); // ??? size += addWord(0x00000000); // ??? } addAtomEnd; addAtom(frma); size += add4ByteString(fCurrentIOState->fQTAudioDataType); // ??? addAtomEnd; addAtom(Fclp); size += addWord(0x00000000); // ??? addAtomEnd; addAtom(Hclp); size += addWord(0x00000000); // ??? addAtomEnd; unsigned QuickTimeFileSink::addAtom_mp4a() { unsigned size = 0; // The beginning of this atom looks just like a general Sound Media atom, // except with a version field of 1: int64_t initFilePosn = TellFile64(fOutFid); fCurrentIOState->fQTAudioDataType = "mp4a"; if (fGenerateMP4Format) { fCurrentIOState->fQTSoundSampleVersion = 0; size = addAtom_soundMediaGeneral(); size += addAtom_esds(); } else { fCurrentIOState->fQTSoundSampleVersion = 1; size = addAtom_soundMediaGeneral(); // Next, add the four fields that are particular to version 1: // (Later, parameterize these #####) size += addWord(fCurrentIOState->fQTTimeUnitsPerSample); size += addWord(0x00000001); // ??? size += addWord(0x00000001); // ??? size += addWord(0x00000002); // bytes per sample (uncompressed) // Other special fields are in a 'wave' atom that follows: size += addAtom_wave(); } addAtomEnd; addAtom(esds); //##### MediaSubsession& subsession = fCurrentIOState->fOurSubsession; if (strcmp(subsession.mediumName(), "audio") == 0) { // MPEG-4 audio size += addWord(0x00000000); // ??? size += addWord(0x03808080); // ??? size += addWord(0x2a000000); // ??? size += addWord(0x04808080); // ??? size += addWord(0x1c401500); // ??? size += addWord(0x18000000); // ??? size += addWord(0x6d600000); // ??? size += addWord(0x6d600580); // ??? size += addByte(0x80); size += addByte(0x80); // ??? } else if (strcmp(subsession.mediumName(), "video") == 0) { // MPEG-4 video size += addWord(0x00000000); // ??? size += addWord(0x03330000); // ??? size += addWord(0x1f042b20); // ??? size += addWord(0x1104fd46); // ??? size += addWord(0x000d4e10); // ??? size += addWord(0x000d4e10); // ??? size += addByte(0x05); // ??? } // Add the source's 'config' information: unsigned configSize; unsigned char* config = parseGeneralConfigStr(subsession.fmtp_config(), configSize); size += addByte(configSize); for (unsigned i = 0; i < configSize; ++i) { size += addByte(config[i]); } delete[] config; if (strcmp(subsession.mediumName(), "audio") == 0) { // MPEG-4 audio size += addWord(0x06808080); // ??? size += addHalfWord(0x0102); // ??? } else { // MPEG-4 video size += addHalfWord(0x0601); // ??? size += addByte(0x02); // ??? } //##### addAtomEnd; addAtom(srcq); //##### size += addWord(0x00000040); // ??? //##### addAtomEnd; addAtom(h263); // General sample description fields: size += addWord(0x00000000); // Reserved size += addWord(0x00000001); // Reserved+Data reference index // Video sample description fields: size += addWord(0x00020001); // Version+Revision level size += add4ByteString("appl"); // Vendor size += addWord(0x00000000); // Temporal quality size += addWord(0x000002fc); // Spatial quality unsigned const widthAndHeight = (fMovieWidth<<16)|fMovieHeight; size += addWord(widthAndHeight); // Width+height size += addWord(0x00480000); // Horizontal resolution size += addWord(0x00480000); // Vertical resolution size += addWord(0x00000000); // Data size size += addWord(0x00010548); // Frame count+Compressor name (start) // "H.263" size += addWord(0x2e323633); // Compressor name (continued) size += addZeroWords(6); // Compressor name (continued - zero) size += addWord(0x00000018); // Compressor name (final)+Depth size += addHalfWord(0xffff); // Color table id addAtomEnd; addAtom(avc1); // General sample description fields: size += addWord(0x00000000); // Reserved size += addWord(0x00000001); // Reserved+Data reference index // Video sample description fields: size += addWord(0x00000000); // Version+Revision level size += add4ByteString("appl"); // Vendor size += addWord(0x00000000); // Temporal quality size += addWord(0x00000000); // Spatial quality unsigned const widthAndHeight = (fMovieWidth<<16)|fMovieHeight; size += addWord(widthAndHeight); // Width+height size += addWord(0x00480000); // Horizontal resolution size += addWord(0x00480000); // Vertical resolution size += addWord(0x00000000); // Data size size += addWord(0x00010548); // Frame count+Compressor name (start) // "H.264" size += addWord(0x2e323634); // Compressor name (continued) size += addZeroWords(6); // Compressor name (continued - zero) size += addWord(0x00000018); // Compressor name (final)+Depth size += addHalfWord(0xffff); // Color table id size += addAtom_avcC(); addAtomEnd; addAtom(avcC); // Begin by Base-64 decoding the "sprop" parameter sets strings: char* psets = strDup(fCurrentIOState->fOurSubsession.fmtp_spropparametersets()); if (psets == NULL) return 0; size_t comma_pos = strcspn(psets, ","); psets[comma_pos] = '\0'; char const* sps_b64 = psets; char const* pps_b64 = &psets[comma_pos+1]; unsigned sps_count; unsigned char* sps_data = base64Decode(sps_b64, sps_count, false); unsigned pps_count; unsigned char* pps_data = base64Decode(pps_b64, pps_count, false); // Then add the decoded data: size += addByte(0x01); // configuration version size += addByte(sps_data[1]); // profile size += addByte(sps_data[2]); // profile compat size += addByte(sps_data[3]); // level size += addByte(0xff); /* 0b11111100 | lengthsize = 0x11 */ size += addByte(0xe0 | (sps_count > 0 ? 1 : 0) ); if (sps_count > 0) { size += addHalfWord(sps_count); for (unsigned i = 0; i < sps_count; i++) { size += addByte(sps_data[i]); } } size += addByte(pps_count > 0 ? 1 : 0); if (pps_count > 0) { size += addHalfWord(pps_count); for (unsigned i = 0; i < pps_count; i++) { size += addByte(pps_data[i]); } } // Finally, delete the data that we allocated: delete[] pps_data; delete[] sps_data; delete[] psets; addAtomEnd; addAtom(mp4v); // General sample description fields: size += addWord(0x00000000); // Reserved size += addWord(0x00000001); // Reserved+Data reference index // Video sample description fields: size += addWord(0x00020001); // Version+Revision level size += add4ByteString("appl"); // Vendor size += addWord(0x00000200); // Temporal quality size += addWord(0x00000400); // Spatial quality unsigned const widthAndHeight = (fMovieWidth<<16)|fMovieHeight; size += addWord(widthAndHeight); // Width+height size += addWord(0x00480000); // Horizontal resolution size += addWord(0x00480000); // Vertical resolution size += addWord(0x00000000); // Data size size += addWord(0x00010c4d); // Frame count+Compressor name (start) // "MPEG-4 Video" size += addWord(0x5045472d); // Compressor name (continued) size += addWord(0x34205669); // Compressor name (continued) size += addWord(0x64656f00); // Compressor name (continued) size += addZeroWords(4); // Compressor name (continued - zero) size += addWord(0x00000018); // Compressor name (final)+Depth size += addHalfWord(0xffff); // Color table id size += addAtom_esds(); // ESDescriptor size += addWord(0x00000000); // ??? addAtomEnd; unsigned QuickTimeFileSink::addAtom_rtp() { int64_t initFilePosn = TellFile64(fOutFid); unsigned size = addAtomHeader("rtp "); size += addWord(0x00000000); // Reserved (1st 4 bytes) size += addWord(0x00000001); // Reserved (last 2 bytes) + Data ref index size += addWord(0x00010001); // Hint track version + Last compat htv size += addWord(1450); // Max packet size size += addAtom_tims(); addAtomEnd; addAtom(tims); size += addWord(fCurrentIOState->fOurSubsession.rtpTimestampFrequency()); addAtomEnd; addAtom(stts); // Time-to-Sample size += addWord(0x00000000); // Version+flags // First, add a dummy "Number of entries" field // (and remember its position). We'll fill this field in later: int64_t numEntriesPosition = TellFile64(fOutFid); size += addWord(0); // dummy for "Number of entries" // Then, run through the chunk descriptors, and enter the entries // in this (compressed) Time-to-Sample table: unsigned numEntries = 0, numSamplesSoFar = 0; unsigned prevSampleDuration = 0; unsigned const samplesPerFrame = fCurrentIOState->fQTSamplesPerFrame; ChunkDescriptor* chunk = fCurrentIOState->fHeadChunk; while (chunk != NULL) { unsigned const sampleDuration = chunk->fFrameDuration/samplesPerFrame; if (sampleDuration != prevSampleDuration) { // This chunk will start a new table entry, // so write out the old one (if any): if (chunk != fCurrentIOState->fHeadChunk) { ++numEntries; size += addWord(numSamplesSoFar); // Sample count size += addWord(prevSampleDuration); // Sample duration numSamplesSoFar = 0; } } unsigned const numSamples = chunk->fNumFrames*samplesPerFrame; numSamplesSoFar += numSamples; prevSampleDuration = sampleDuration; chunk = chunk->fNextChunk; } // Then, write out the last entry: ++numEntries; size += addWord(numSamplesSoFar); // Sample count size += addWord(prevSampleDuration); // Sample duration // Now go back and fill in the "Number of entries" field: setWord(numEntriesPosition, numEntries); addAtomEnd; addAtom(stss); // Sync-Sample size += addWord(0x00000000); // Version+flags // First, add a dummy "Number of entries" field // (and remember its position). We'll fill this field in later: int64_t numEntriesPosition = TellFile64(fOutFid); size += addWord(0); // dummy for "Number of entries" unsigned numEntries = 0, numSamplesSoFar = 0; if (fCurrentIOState->fHeadSyncFrame != NULL) { SyncFrame* currentSyncFrame = fCurrentIOState->fHeadSyncFrame; while(currentSyncFrame != NULL) { ++numEntries; size += addWord(currentSyncFrame->sfFrameNum); currentSyncFrame = currentSyncFrame->nextSyncFrame; } } else { // Then, run through the chunk descriptors, counting up the total nuber of samples: unsigned const samplesPerFrame = fCurrentIOState->fQTSamplesPerFrame; ChunkDescriptor* chunk = fCurrentIOState->fHeadChunk; while (chunk != NULL) { unsigned const numSamples = chunk->fNumFrames*samplesPerFrame; numSamplesSoFar += numSamples; chunk = chunk->fNextChunk; } // Then, write out the sample numbers that we deem correspond to 'sync samples': unsigned i; for (i = 0; i < numSamplesSoFar; i += 12) { // For an explanation of the constant "12", see http://lists.live555.com/pipermail/live-devel/2009-July/010969.html // (Perhaps we should really try to keep track of which 'samples' ('frames' for video) really are 'key frames'?) size += addWord(i+1); ++numEntries; } // Then, write out the last entry (if we haven't already done so): if (i != (numSamplesSoFar - 1)) { size += addWord(numSamplesSoFar); ++numEntries; } } // Now go back and fill in the "Number of entries" field: setWord(numEntriesPosition, numEntries); addAtomEnd; addAtom(stsc); // Sample-to-Chunk size += addWord(0x00000000); // Version+flags // First, add a dummy "Number of entries" field // (and remember its position). We'll fill this field in later: int64_t numEntriesPosition = TellFile64(fOutFid); size += addWord(0); // dummy for "Number of entries" // Then, run through the chunk descriptors, and enter the entries // in this (compressed) Sample-to-Chunk table: unsigned numEntries = 0, chunkNumber = 0; unsigned prevSamplesPerChunk = ~0; unsigned const samplesPerFrame = fCurrentIOState->fQTSamplesPerFrame; ChunkDescriptor* chunk = fCurrentIOState->fHeadChunk; while (chunk != NULL) { ++chunkNumber; unsigned const samplesPerChunk = chunk->fNumFrames*samplesPerFrame; if (samplesPerChunk != prevSamplesPerChunk) { // This chunk will be a new table entry: ++numEntries; size += addWord(chunkNumber); // Chunk number size += addWord(samplesPerChunk); // Samples per chunk size += addWord(0x00000001); // Sample description ID prevSamplesPerChunk = samplesPerChunk; } chunk = chunk->fNextChunk; } // Now go back and fill in the "Number of entries" field: setWord(numEntriesPosition, numEntries); addAtomEnd; addAtom(stsz); // Sample Size size += addWord(0x00000000); // Version+flags // Begin by checking whether our chunks all have the same // 'bytes-per-sample'. This determines whether this atom's table // has just a single entry, or multiple entries. Boolean haveSingleEntryTable = True; double firstBPS = 0.0; ChunkDescriptor* chunk = fCurrentIOState->fHeadChunk; while (chunk != NULL) { double bps = (double)(chunk->fFrameSize)/(fCurrentIOState->fQTSamplesPerFrame); if (bps < 1.0) { // I don't think a multiple-entry table would make sense in // this case, so assume a single entry table ??? ##### break; } if (firstBPS == 0.0) { firstBPS = bps; } else if (bps != firstBPS) { haveSingleEntryTable = False; break; } chunk = chunk->fNextChunk; } unsigned sampleSize; if (haveSingleEntryTable) { if (fCurrentIOState->isHintTrack() && fCurrentIOState->fHeadChunk != NULL) { sampleSize = fCurrentIOState->fHeadChunk->fFrameSize / fCurrentIOState->fQTSamplesPerFrame; } else { // The following doesn't seem right, but seems to do the right thing: sampleSize = fCurrentIOState->fQTTimeUnitsPerSample; //??? } } else { sampleSize = 0; // indicates a multiple-entry table } size += addWord(sampleSize); // Sample size unsigned const totNumSamples = fCurrentIOState->fQTTotNumSamples; size += addWord(totNumSamples); // Number of entries if (!haveSingleEntryTable) { // Multiple-entry table: // Run through the chunk descriptors, entering the sample sizes: ChunkDescriptor* chunk = fCurrentIOState->fHeadChunk; while (chunk != NULL) { unsigned numSamples = chunk->fNumFrames*(fCurrentIOState->fQTSamplesPerFrame); unsigned sampleSize = chunk->fFrameSize/(fCurrentIOState->fQTSamplesPerFrame); for (unsigned i = 0; i < numSamples; ++i) { size += addWord(sampleSize); } chunk = chunk->fNextChunk; } } addAtomEnd; addAtom(co64); // Chunk Offset size += addWord(0x00000000); // Version+flags size += addWord(fCurrentIOState->fNumChunks); // Number of entries // Run through the chunk descriptors, entering the file offsets: ChunkDescriptor* chunk = fCurrentIOState->fHeadChunk; while (chunk != NULL) { size += addWord64(chunk->fOffsetInFile); chunk = chunk->fNextChunk; } addAtomEnd; addAtom(udta); size += addAtom_name(); size += addAtom_hnti(); size += addAtom_hinf(); addAtomEnd; addAtom(name); char description[100]; sprintf(description, "Hinted %s track", fCurrentIOState->fOurSubsession.mediumName()); size += addArbitraryString(description, False); // name of object addAtomEnd; addAtom(hnti); size += addAtom_sdp(); addAtomEnd; unsigned QuickTimeFileSink::addAtom_sdp() { int64_t initFilePosn = TellFile64(fOutFid); unsigned size = addAtomHeader("sdp "); // Add this subsession's SDP lines: char const* sdpLines = fCurrentIOState->fOurSubsession.savedSDPLines(); // We need to change any "a=control:trackID=" values to be this // track's actual track id: char* newSDPLines = new char[strlen(sdpLines)+100/*overkill*/]; char const* searchStr = "a=control:trackid="; Boolean foundSearchString = False; char const *p1, *p2, *p3; for (p1 = sdpLines; *p1 != '\0'; ++p1) { for (p2 = p1,p3 = searchStr; tolower(*p2) == *p3; ++p2,++p3) {} if (*p3 == '\0') { // We found the end of the search string, at p2. int beforeTrackNumPosn = p2-sdpLines; // Look for the subsequent track number, and skip over it: int trackNumLength; if (sscanf(p2, " %*d%n", &trackNumLength) < 0) break; int afterTrackNumPosn = beforeTrackNumPosn + trackNumLength; // Replace the old track number with the correct one: int i; for (i = 0; i < beforeTrackNumPosn; ++i) newSDPLines[i] = sdpLines[i]; sprintf(&newSDPLines[i], "%d", fCurrentIOState->fTrackID); i = afterTrackNumPosn; int j = i + strlen(&newSDPLines[i]); while (1) { if ((newSDPLines[j] = sdpLines[i]) == '\0') break; ++i; ++j; } foundSearchString = True; break; } } if (!foundSearchString) { // Because we didn't find a "a=control:trackID=" line, // add one of our own: sprintf(newSDPLines, "%s%s%d\r\n", sdpLines, searchStr, fCurrentIOState->fTrackID); } size += addArbitraryString(newSDPLines, False); delete[] newSDPLines; addAtomEnd; addAtom(hinf); size += addAtom_totl(); size += addAtom_npck(); size += addAtom_tpay(); size += addAtom_trpy(); size += addAtom_nump(); size += addAtom_tpyl(); // Is 'maxr' required? ##### size += addAtom_dmed(); size += addAtom_dimm(); size += addAtom_drep(); size += addAtom_tmin(); size += addAtom_tmax(); size += addAtom_pmax(); size += addAtom_dmax(); size += addAtom_payt(); addAtomEnd; addAtom(totl); size += addWord(fCurrentIOState->fHINF.trpy.lo); addAtomEnd; addAtom(npck); size += addWord(fCurrentIOState->fHINF.nump.lo); addAtomEnd; addAtom(tpay); size += addWord(fCurrentIOState->fHINF.tpyl.lo); addAtomEnd; addAtom(trpy); size += addWord(fCurrentIOState->fHINF.trpy.hi); size += addWord(fCurrentIOState->fHINF.trpy.lo); addAtomEnd; addAtom(nump); size += addWord(fCurrentIOState->fHINF.nump.hi); size += addWord(fCurrentIOState->fHINF.nump.lo); addAtomEnd; addAtom(tpyl); size += addWord(fCurrentIOState->fHINF.tpyl.hi); size += addWord(fCurrentIOState->fHINF.tpyl.lo); addAtomEnd; addAtom(dmed); size += addWord(fCurrentIOState->fHINF.dmed.hi); size += addWord(fCurrentIOState->fHINF.dmed.lo); addAtomEnd; addAtom(dimm); size += addWord(fCurrentIOState->fHINF.dimm.hi); size += addWord(fCurrentIOState->fHINF.dimm.lo); addAtomEnd; addAtom(drep); size += addWord(0); size += addWord(0); addAtomEnd; addAtom(tmin); size += addWord(0); addAtomEnd; addAtom(tmax); size += addWord(0); addAtomEnd; addAtom(pmax); size += addWord(fCurrentIOState->fHINF.pmax); addAtomEnd; addAtom(dmax); size += addWord(fCurrentIOState->fHINF.dmax); addAtomEnd; addAtom(payt); MediaSubsession& ourSubsession = fCurrentIOState->fOurSubsession; RTPSource* rtpSource = ourSubsession.rtpSource(); size += addWord(rtpSource->rtpPayloadFormat()); // Also, add a 'rtpmap' string: / unsigned rtpmapStringLength = strlen(ourSubsession.codecName()) + 20; char* rtpmapString = new char[rtpmapStringLength]; sprintf(rtpmapString, "%s/%d", ourSubsession.codecName(), rtpSource->timestampFrequency()); size += addArbitraryString(rtpmapString); delete[] rtpmapString; addAtomEnd; // A dummy atom (with name "????"): unsigned QuickTimeFileSink::addAtom_dummy() { int64_t initFilePosn = TellFile64(fOutFid); unsigned size = addAtomHeader("????"); addAtomEnd; live/liveMedia/MPEG2IndexFromTransportStream.cpp000444 001751 000000 00000057162 12265042432 022074 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A filter that produces a sequence of I-frame indices from a MPEG-2 Transport Stream // Implementation #include "MPEG2IndexFromTransportStream.hh" ////////// IndexRecord definition ////////// enum RecordType { RECORD_UNPARSED = 0, RECORD_VSH = 1, // a MPEG Video Sequence Header RECORD_GOP = 2, RECORD_PIC_NON_IFRAME = 3, // includes slices RECORD_PIC_IFRAME = 4, // includes slices RECORD_NAL_H264_SPS = 5, // H.264 RECORD_NAL_H264_PPS = 6, // H.264 RECORD_NAL_H264_SEI = 7, // H.264 RECORD_NAL_H264_NON_IFRAME = 8, // H.264 RECORD_NAL_H264_IFRAME = 9, // H.264 RECORD_NAL_H264_OTHER = 10, // H.264 RECORD_NAL_H265_VPS = 11, // H.265 RECORD_NAL_H265_SPS = 12, // H.265 RECORD_NAL_H265_PPS = 13, // H.265 RECORD_NAL_H265_NON_IFRAME = 14, // H.265 RECORD_NAL_H265_IFRAME = 15, // H.265 RECORD_NAL_H265_OTHER = 16, // H.265 RECORD_JUNK }; class IndexRecord { public: IndexRecord(u_int8_t startOffset, u_int8_t size, unsigned long transportPacketNumber, float pcr); virtual ~IndexRecord(); RecordType& recordType() { return fRecordType; } void setFirstFlag() { fRecordType = (RecordType)(((u_int8_t)fRecordType) | 0x80); } u_int8_t startOffset() const { return fStartOffset; } u_int8_t& size() { return fSize; } float pcr() const { return fPCR; } unsigned long transportPacketNumber() const { return fTransportPacketNumber; } IndexRecord* next() const { return fNext; } void addAfter(IndexRecord* prev); void unlink(); private: // Index records are maintained in a doubly-linked list: IndexRecord* fNext; IndexRecord* fPrev; RecordType fRecordType; u_int8_t fStartOffset; // within the Transport Stream packet u_int8_t fSize; // in bytes, following "fStartOffset". // Note: fStartOffset + fSize <= TRANSPORT_PACKET_SIZE float fPCR; unsigned long fTransportPacketNumber; }; #ifdef DEBUG static char const* recordTypeStr[] = { "UNPARSED", "VSH", "GOP", "PIC(non-I-frame)", "PIC(I-frame)", "SPS (H.264)", "PPS (H.264)", "SEI (H.264)", "H.264 non-I-frame", "H.264 I-frame", "other NAL unit (H.264)", "VPS (H.265)", "SPS (H.265)", "PPS (H.265)", "H.265 non-I-frame", "H.265 I-frame", "other NAL unit (H.265)", "JUNK" }; UsageEnvironment& operator<<(UsageEnvironment& env, IndexRecord& r) { return env << "[" << ((r.recordType()&0x80) != 0 ? "1" : "") << recordTypeStr[r.recordType()&0x7F] << ":" << (unsigned)r.transportPacketNumber() << ":" << r.startOffset() << "(" << r.size() << ")@" << r.pcr() << "]"; } #endif ////////// MPEG2IFrameIndexFromTransportStream implementation ////////// MPEG2IFrameIndexFromTransportStream* MPEG2IFrameIndexFromTransportStream::createNew(UsageEnvironment& env, FramedSource* inputSource) { return new MPEG2IFrameIndexFromTransportStream(env, inputSource); } // The largest expected frame size (in bytes): #define MAX_FRAME_SIZE 400000 // Make our parse buffer twice as large as this, to ensure that at least one // complete frame will fit inside it: #define PARSE_BUFFER_SIZE (2*MAX_FRAME_SIZE) // The PID used for the PAT (as defined in the MPEG Transport Stream standard): #define PAT_PID 0 MPEG2IFrameIndexFromTransportStream ::MPEG2IFrameIndexFromTransportStream(UsageEnvironment& env, FramedSource* inputSource) : FramedFilter(env, inputSource), fIsH264(False), fIsH265(False), fInputTransportPacketCounter((unsigned)-1), fClosureNumber(0), fLastContinuityCounter(~0), fFirstPCR(0.0), fLastPCR(0.0), fHaveSeenFirstPCR(False), fPMT_PID(0x10), fVideo_PID(0xE0), // default values fParseBufferSize(PARSE_BUFFER_SIZE), fParseBufferFrameStart(0), fParseBufferParseEnd(4), fParseBufferDataEnd(0), fHeadIndexRecord(NULL), fTailIndexRecord(NULL) { fParseBuffer = new unsigned char[fParseBufferSize]; } MPEG2IFrameIndexFromTransportStream::~MPEG2IFrameIndexFromTransportStream() { delete fHeadIndexRecord; delete[] fParseBuffer; } void MPEG2IFrameIndexFromTransportStream::doGetNextFrame() { // Begin by trying to deliver an index record (for an already-parsed frame) // to the client: if (deliverIndexRecord()) return; // No more index records are left to deliver, so try to parse a new frame: if (parseFrame()) { // success - try again doGetNextFrame(); return; } // We need to read some more Transport Stream packets. Check whether we have room: if (fParseBufferSize - fParseBufferDataEnd < TRANSPORT_PACKET_SIZE) { // There's no room left. Compact the buffer, and check again: compactParseBuffer(); if (fParseBufferSize - fParseBufferDataEnd < TRANSPORT_PACKET_SIZE) { envir() << "ERROR: parse buffer full; increase MAX_FRAME_SIZE\n"; // Treat this as if the input source ended: handleInputClosure1(); return; } } // Arrange to read a new Transport Stream packet: fInputSource->getNextFrame(fInputBuffer, sizeof fInputBuffer, afterGettingFrame, this, handleInputClosure, this); } void MPEG2IFrameIndexFromTransportStream ::afterGettingFrame(void* clientData, unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime, unsigned durationInMicroseconds) { MPEG2IFrameIndexFromTransportStream* source = (MPEG2IFrameIndexFromTransportStream*)clientData; source->afterGettingFrame1(frameSize, numTruncatedBytes, presentationTime, durationInMicroseconds); } #define TRANSPORT_SYNC_BYTE 0x47 void MPEG2IFrameIndexFromTransportStream ::afterGettingFrame1(unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime, unsigned durationInMicroseconds) { if (frameSize < TRANSPORT_PACKET_SIZE || fInputBuffer[0] != TRANSPORT_SYNC_BYTE) { if (fInputBuffer[0] != TRANSPORT_SYNC_BYTE) { envir() << "Bad TS sync byte: 0x" << fInputBuffer[0] << "\n"; } // Handle this as if the source ended: handleInputClosure1(); return; } ++fInputTransportPacketCounter; // Figure out how much of this Transport Packet contains PES data: u_int8_t adaptation_field_control = (fInputBuffer[3]&0x30)>>4; u_int8_t totalHeaderSize = adaptation_field_control == 1 ? 4 : 5 + fInputBuffer[4]; // Check for a PCR: if (totalHeaderSize > 5 && (fInputBuffer[5]&0x10) != 0) { // There's a PCR: u_int32_t pcrBaseHigh = (fInputBuffer[6]<<24)|(fInputBuffer[7]<<16) |(fInputBuffer[8]<<8)|fInputBuffer[9]; float pcr = pcrBaseHigh/45000.0f; if ((fInputBuffer[10]&0x80) != 0) pcr += 1/90000.0f; // add in low-bit (if set) unsigned short pcrExt = ((fInputBuffer[10]&0x01)<<8) | fInputBuffer[11]; pcr += pcrExt/27000000.0f; if (!fHaveSeenFirstPCR) { fFirstPCR = pcr; fHaveSeenFirstPCR = True; } else if (pcr < fLastPCR) { // The PCR timestamp has gone backwards. Display a warning about this // (because it indicates buggy Transport Stream data), and compensate for it. envir() << "\nWarning: At about " << fLastPCR-fFirstPCR << " seconds into the file, the PCR timestamp decreased - from " << fLastPCR << " to " << pcr << "\n"; fFirstPCR -= (fLastPCR - pcr); } fLastPCR = pcr; } // Get the PID from the packet, and check for special tables: the PAT and PMT: u_int16_t PID = ((fInputBuffer[1]&0x1F)<<8) | fInputBuffer[2]; if (PID == PAT_PID) { analyzePAT(&fInputBuffer[totalHeaderSize], TRANSPORT_PACKET_SIZE-totalHeaderSize); } else if (PID == fPMT_PID) { analyzePMT(&fInputBuffer[totalHeaderSize], TRANSPORT_PACKET_SIZE-totalHeaderSize); } // Ignore transport packets for non-video programs, // or packets with no data, or packets that duplicate the previous packet: u_int8_t continuity_counter = fInputBuffer[3]&0x0F; if ((PID != fVideo_PID) || !(adaptation_field_control == 1 || adaptation_field_control == 3) || continuity_counter == fLastContinuityCounter) { doGetNextFrame(); return; } fLastContinuityCounter = continuity_counter; // Also, if this is the start of a PES packet, then skip over the PES header: Boolean payload_unit_start_indicator = (fInputBuffer[1]&0x40) != 0; if (payload_unit_start_indicator) { // Note: The following works only for MPEG-2 data ##### u_int8_t PES_header_data_length = fInputBuffer[totalHeaderSize+8]; totalHeaderSize += 9 + PES_header_data_length; if (totalHeaderSize >= TRANSPORT_PACKET_SIZE) { envir() << "Unexpectedly large PES header size: " << PES_header_data_length << "\n"; // Handle this as if the source ended: handleInputClosure1(); return; } } // The remaining data is Video Elementary Stream data. Add it to our parse buffer: unsigned vesSize = TRANSPORT_PACKET_SIZE - totalHeaderSize; memmove(&fParseBuffer[fParseBufferDataEnd], &fInputBuffer[totalHeaderSize], vesSize); fParseBufferDataEnd += vesSize; // And add a new index record noting where it came from: addToTail(new IndexRecord(totalHeaderSize, vesSize, fInputTransportPacketCounter, fLastPCR - fFirstPCR)); // Try again: doGetNextFrame(); } void MPEG2IFrameIndexFromTransportStream::handleInputClosure(void* clientData) { MPEG2IFrameIndexFromTransportStream* source = (MPEG2IFrameIndexFromTransportStream*)clientData; source->handleInputClosure1(); } #define VIDEO_SEQUENCE_START_CODE 0xB3 // MPEG-1 or 2 #define VISUAL_OBJECT_SEQUENCE_START_CODE 0xB0 // MPEG-4 #define GROUP_START_CODE 0xB8 // MPEG-1 or 2 #define GROUP_VOP_START_CODE 0xB3 // MPEG-4 #define PICTURE_START_CODE 0x00 // MPEG-1 or 2 #define VOP_START_CODE 0xB6 // MPEG-4 void MPEG2IFrameIndexFromTransportStream::handleInputClosure1() { if (++fClosureNumber == 1 && fParseBufferDataEnd > fParseBufferFrameStart && fParseBufferDataEnd <= fParseBufferSize - 4) { // This is the first time we saw EOF, and there's still data remaining to be // parsed. Hack: Append a Picture Header code to the end of the unparsed // data, and try again. This should use up all of the unparsed data. fParseBuffer[fParseBufferDataEnd++] = 0; fParseBuffer[fParseBufferDataEnd++] = 0; fParseBuffer[fParseBufferDataEnd++] = 1; fParseBuffer[fParseBufferDataEnd++] = PICTURE_START_CODE; // Try again: doGetNextFrame(); } else { // Handle closure in the regular way: FramedSource::handleClosure(this); } } void MPEG2IFrameIndexFromTransportStream ::analyzePAT(unsigned char* pkt, unsigned size) { // Get the PMT_PID: while (size >= 17) { // The table is large enough u_int16_t program_number = (pkt[9]<<8) | pkt[10]; if (program_number != 0) { fPMT_PID = ((pkt[11]&0x1F)<<8) | pkt[12]; return; } pkt += 4; size -= 4; } } void MPEG2IFrameIndexFromTransportStream ::analyzePMT(unsigned char* pkt, unsigned size) { // Scan the "elementary_PID"s in the map, until we see the first video stream. // First, get the "section_length", to get the table's size: u_int16_t section_length = ((pkt[2]&0x0F)<<8) | pkt[3]; if ((unsigned)(4+section_length) < size) size = (4+section_length); // Then, skip any descriptors following the "program_info_length": if (size < 22) return; // not enough data unsigned program_info_length = ((pkt[11]&0x0F)<<8) | pkt[12]; pkt += 13; size -= 13; if (size < program_info_length) return; // not enough data pkt += program_info_length; size -= program_info_length; // Look at each ("stream_type","elementary_PID") pair, looking for a video stream: while (size >= 9) { u_int8_t stream_type = pkt[0]; u_int16_t elementary_PID = ((pkt[1]&0x1F)<<8) | pkt[2]; if (stream_type == 1 || stream_type == 2 || stream_type == 0x1B/*H.264 video*/ || stream_type == 0x24/*H.265 video*/) { if (stream_type == 0x1B) fIsH264 = True; else if (stream_type == 0x24) fIsH265 = True; fVideo_PID = elementary_PID; return; } u_int16_t ES_info_length = ((pkt[3]&0x0F)<<8) | pkt[4]; pkt += 5; size -= 5; if (size < ES_info_length) return; // not enough data pkt += ES_info_length; size -= ES_info_length; } } Boolean MPEG2IFrameIndexFromTransportStream::deliverIndexRecord() { IndexRecord* head = fHeadIndexRecord; if (head == NULL) return False; // Check whether the head record has been parsed yet: if (head->recordType() == RECORD_UNPARSED) return False; // Remove the head record (the one whose data we'll be delivering): IndexRecord* next = head->next(); head->unlink(); if (next == head) { fHeadIndexRecord = fTailIndexRecord = NULL; } else { fHeadIndexRecord = next; } if (head->recordType() == RECORD_JUNK) { // Don't actually deliver the data to the client: delete head; // Try to deliver the next record instead: return deliverIndexRecord(); } // Deliver data from the head record: #ifdef DEBUG envir() << "delivering: " << *head << "\n"; #endif if (fMaxSize < 11) { fFrameSize = 0; } else { fTo[0] = (u_int8_t)(head->recordType()); fTo[1] = head->startOffset(); fTo[2] = head->size(); // Deliver the PCR, as 24 bits (integer part; little endian) + 8 bits (fractional part) float pcr = head->pcr(); unsigned pcr_int = (unsigned)pcr; u_int8_t pcr_frac = (u_int8_t)(256*(pcr-pcr_int)); fTo[3] = (unsigned char)(pcr_int); fTo[4] = (unsigned char)(pcr_int>>8); fTo[5] = (unsigned char)(pcr_int>>16); fTo[6] = (unsigned char)(pcr_frac); // Deliver the transport packet number (in little-endian order): unsigned long tpn = head->transportPacketNumber(); fTo[7] = (unsigned char)(tpn); fTo[8] = (unsigned char)(tpn>>8); fTo[9] = (unsigned char)(tpn>>16); fTo[10] = (unsigned char)(tpn>>24); fFrameSize = 11; } // Free the (former) head record (as we're now done with it): delete head; // Complete delivery to the client: afterGetting(this); return True; } Boolean MPEG2IFrameIndexFromTransportStream::parseFrame() { // At this point, we have a queue of >=0 (unparsed) index records, representing // the data in the parse buffer from "fParseBufferFrameStart" // to "fParseBufferDataEnd". We now parse through this data, looking for // a complete 'frame', where a 'frame', in this case, means: // for MPEG video: a Video Sequence Header, GOP Header, Picture Header, or Slice // for H.264 or H.265 video: a NAL unit // Inspect the frame's initial 4-byte code, to make sure it starts with a system code: if (fParseBufferDataEnd-fParseBufferFrameStart < 4) return False; // not enough data unsigned numInitialBadBytes = 0; unsigned char const* p = &fParseBuffer[fParseBufferFrameStart]; if (!(p[0] == 0 && p[1] == 0 && p[2] == 1)) { // There's no system code at the beginning. Parse until we find one: if (fParseBufferParseEnd == fParseBufferFrameStart + 4) { // Start parsing from the beginning of the frame data: fParseBufferParseEnd = fParseBufferFrameStart; } unsigned char nextCode; if (!parseToNextCode(nextCode)) return False; numInitialBadBytes = fParseBufferParseEnd - fParseBufferFrameStart; fParseBufferFrameStart = fParseBufferParseEnd; fParseBufferParseEnd += 4; // skip over the code that we just saw p = &fParseBuffer[fParseBufferFrameStart]; } unsigned char curCode = p[3]; if (fIsH264) curCode &= 0x1F; // nal_unit_type else if (fIsH265) curCode = (curCode&0x7E)>>1; RecordType curRecordType; unsigned char nextCode; if (fIsH264) { switch (curCode) { case 1: // Coded slice of a non-IDR picture curRecordType = RECORD_NAL_H264_NON_IFRAME; if (!parseToNextCode(nextCode)) return False; break; case 5: // Coded slice of an IDR picture curRecordType = RECORD_NAL_H264_IFRAME; if (!parseToNextCode(nextCode)) return False; break; case 6: // Supplemental enhancement information (SEI) curRecordType = RECORD_NAL_H264_SEI; if (!parseToNextCode(nextCode)) return False; break; case 7: // Sequence parameter set (SPS) curRecordType = RECORD_NAL_H264_SPS; if (!parseToNextCode(nextCode)) return False; break; case 8: // Picture parameter set (PPS) curRecordType = RECORD_NAL_H264_PPS; if (!parseToNextCode(nextCode)) return False; break; default: curRecordType = RECORD_NAL_H264_OTHER; if (!parseToNextCode(nextCode)) return False; break; } } else if (fIsH265) { switch (curCode) { case 19: // Coded slice segment of an IDR picture case 20: // Coded slice segment of an IDR picture curRecordType = RECORD_NAL_H265_IFRAME; if (!parseToNextCode(nextCode)) return False; break; case 32: // Video parameter set (VPS) curRecordType = RECORD_NAL_H265_VPS; if (!parseToNextCode(nextCode)) return False; break; case 33: // Sequence parameter set (SPS) curRecordType = RECORD_NAL_H265_SPS; if (!parseToNextCode(nextCode)) return False; break; case 34: // Picture parameter set (PPS) curRecordType = RECORD_NAL_H265_PPS; if (!parseToNextCode(nextCode)) return False; break; default: curRecordType = (curCode <= 31) ? RECORD_NAL_H265_NON_IFRAME : RECORD_NAL_H265_OTHER; if (!parseToNextCode(nextCode)) return False; break; } } else { // MPEG-1, 2, or 4 switch (curCode) { case VIDEO_SEQUENCE_START_CODE: case VISUAL_OBJECT_SEQUENCE_START_CODE: curRecordType = RECORD_VSH; while (1) { if (!parseToNextCode(nextCode)) return False; if (nextCode == GROUP_START_CODE || nextCode == PICTURE_START_CODE || nextCode == VOP_START_CODE) break; fParseBufferParseEnd += 4; // skip over the code that we just saw } break; case GROUP_START_CODE: curRecordType = RECORD_GOP; while (1) { if (!parseToNextCode(nextCode)) return False; if (nextCode == PICTURE_START_CODE || nextCode == VOP_START_CODE) break; fParseBufferParseEnd += 4; // skip over the code that we just saw } break; default: // picture curRecordType = RECORD_PIC_NON_IFRAME; // may get changed to IFRAME later while (1) { if (!parseToNextCode(nextCode)) return False; if (nextCode == VIDEO_SEQUENCE_START_CODE || nextCode == VISUAL_OBJECT_SEQUENCE_START_CODE || nextCode == GROUP_START_CODE || nextCode == GROUP_VOP_START_CODE || nextCode == PICTURE_START_CODE || nextCode == VOP_START_CODE) break; fParseBufferParseEnd += 4; // skip over the code that we just saw } break; } } if (curRecordType == RECORD_PIC_NON_IFRAME) { if (curCode == VOP_START_CODE) { // MPEG-4 if ((fParseBuffer[fParseBufferFrameStart+4]&0xC0) == 0) { // This is actually an I-frame. Note it as such: curRecordType = RECORD_PIC_IFRAME; } } else { // MPEG-1 or 2 if ((fParseBuffer[fParseBufferFrameStart+5]&0x38) == 0x08) { // This is actually an I-frame. Note it as such: curRecordType = RECORD_PIC_IFRAME; } } } // There is now a parsed 'frame', from "fParseBufferFrameStart" // to "fParseBufferParseEnd". Tag the corresponding index records to note this: unsigned frameSize = fParseBufferParseEnd - fParseBufferFrameStart + numInitialBadBytes; #ifdef DEBUG envir() << "parsed " << recordTypeStr[curRecordType] << "; length " << frameSize << "\n"; #endif for (IndexRecord* r = fHeadIndexRecord; ; r = r->next()) { if (numInitialBadBytes >= r->size()) { r->recordType() = RECORD_JUNK; numInitialBadBytes -= r->size(); } else { r->recordType() = curRecordType; } if (r == fHeadIndexRecord) r->setFirstFlag(); // indicates that this is the first record for this frame if (r->size() > frameSize) { // This record contains extra data that's not part of the frame. // Shorten this record, and move the extra data to a new record // that comes afterwards: u_int8_t newOffset = r->startOffset() + frameSize; u_int8_t newSize = r->size() - frameSize; r->size() = frameSize; #ifdef DEBUG envir() << "tagged record (modified): " << *r << "\n"; #endif IndexRecord* newRecord = new IndexRecord(newOffset, newSize, r->transportPacketNumber(), r->pcr()); newRecord->addAfter(r); if (fTailIndexRecord == r) fTailIndexRecord = newRecord; #ifdef DEBUG envir() << "added extra record: " << *newRecord << "\n"; #endif } else { #ifdef DEBUG envir() << "tagged record: " << *r << "\n"; #endif } frameSize -= r->size(); if (frameSize == 0) break; if (r == fTailIndexRecord) { // this shouldn't happen envir() << "!!!!!Internal consistency error!!!!!\n"; return False; } } // Finally, update our parse state (to skip over the now-parsed data): fParseBufferFrameStart = fParseBufferParseEnd; fParseBufferParseEnd += 4; // to skip over the next code (that we found) return True; } Boolean MPEG2IFrameIndexFromTransportStream ::parseToNextCode(unsigned char& nextCode) { unsigned char const* p = &fParseBuffer[fParseBufferParseEnd]; unsigned char const* end = &fParseBuffer[fParseBufferDataEnd]; while (p <= end-4) { if (p[2] > 1) p += 3; // common case (optimized) else if (p[2] == 0) ++p; else if (p[0] == 0 && p[1] == 0) { // && p[2] == 1 // We found a code here: nextCode = p[3]; fParseBufferParseEnd = p - &fParseBuffer[0]; // where we've gotten to return True; } else p += 3; } fParseBufferParseEnd = p - &fParseBuffer[0]; // where we've gotten to return False; // no luck this time } void MPEG2IFrameIndexFromTransportStream::compactParseBuffer() { #ifdef DEBUG envir() << "Compacting parse buffer: [" << fParseBufferFrameStart << "," << fParseBufferParseEnd << "," << fParseBufferDataEnd << "]"; #endif memmove(&fParseBuffer[0], &fParseBuffer[fParseBufferFrameStart], fParseBufferDataEnd - fParseBufferFrameStart); fParseBufferDataEnd -= fParseBufferFrameStart; fParseBufferParseEnd -= fParseBufferFrameStart; fParseBufferFrameStart = 0; #ifdef DEBUG envir() << "-> [" << fParseBufferFrameStart << "," << fParseBufferParseEnd << "," << fParseBufferDataEnd << "]\n"; #endif } void MPEG2IFrameIndexFromTransportStream::addToTail(IndexRecord* newIndexRecord) { #ifdef DEBUG envir() << "adding new: " << *newIndexRecord << "\n"; #endif if (fTailIndexRecord == NULL) { fHeadIndexRecord = fTailIndexRecord = newIndexRecord; } else { newIndexRecord->addAfter(fTailIndexRecord); fTailIndexRecord = newIndexRecord; } } ////////// IndexRecord implementation ////////// IndexRecord::IndexRecord(u_int8_t startOffset, u_int8_t size, unsigned long transportPacketNumber, float pcr) : fNext(this), fPrev(this), fRecordType(RECORD_UNPARSED), fStartOffset(startOffset), fSize(size), fPCR(pcr), fTransportPacketNumber(transportPacketNumber) { } IndexRecord::~IndexRecord() { IndexRecord* nextRecord = next(); unlink(); if (nextRecord != this) delete nextRecord; } void IndexRecord::addAfter(IndexRecord* prev) { fNext = prev->fNext; fPrev = prev; prev->fNext->fPrev = this; prev->fNext = this; } void IndexRecord::unlink() { fNext->fPrev = fPrev; fPrev->fNext = fNext; fNext = fPrev = this; } live/liveMedia/OnDemandServerMediaSubsession.cpp000444 001751 000000 00000050211 12265042432 022233 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A 'ServerMediaSubsession' object that creates new, unicast, "RTPSink"s // on demand. // Implementation #include "OnDemandServerMediaSubsession.hh" #include OnDemandServerMediaSubsession ::OnDemandServerMediaSubsession(UsageEnvironment& env, Boolean reuseFirstSource, portNumBits initialPortNum) : ServerMediaSubsession(env), fSDPLines(NULL), fReuseFirstSource(reuseFirstSource), fInitialPortNum(initialPortNum), fLastStreamToken(NULL) { fDestinationsHashTable = HashTable::create(ONE_WORD_HASH_KEYS); gethostname(fCNAME, sizeof fCNAME); fCNAME[sizeof fCNAME-1] = '\0'; // just in case } OnDemandServerMediaSubsession::~OnDemandServerMediaSubsession() { delete[] fSDPLines; // Clean out the destinations hash table: while (1) { Destinations* destinations = (Destinations*)(fDestinationsHashTable->RemoveNext()); if (destinations == NULL) break; delete destinations; } delete fDestinationsHashTable; } char const* OnDemandServerMediaSubsession::sdpLines() { if (fSDPLines == NULL) { // We need to construct a set of SDP lines that describe this // subsession (as a unicast stream). To do so, we first create // dummy (unused) source and "RTPSink" objects, // whose parameters we use for the SDP lines: unsigned estBitrate; FramedSource* inputSource = createNewStreamSource(0, estBitrate); if (inputSource == NULL) return NULL; // file not found struct in_addr dummyAddr; dummyAddr.s_addr = 0; Groupsock dummyGroupsock(envir(), dummyAddr, 0, 0); unsigned char rtpPayloadType = 96 + trackNumber()-1; // if dynamic RTPSink* dummyRTPSink = createNewRTPSink(&dummyGroupsock, rtpPayloadType, inputSource); setSDPLinesFromRTPSink(dummyRTPSink, inputSource, estBitrate); Medium::close(dummyRTPSink); closeStreamSource(inputSource); } return fSDPLines; } void OnDemandServerMediaSubsession ::getStreamParameters(unsigned clientSessionId, netAddressBits clientAddress, Port const& clientRTPPort, Port const& clientRTCPPort, int tcpSocketNum, unsigned char rtpChannelId, unsigned char rtcpChannelId, netAddressBits& destinationAddress, u_int8_t& /*destinationTTL*/, Boolean& isMulticast, Port& serverRTPPort, Port& serverRTCPPort, void*& streamToken) { if (destinationAddress == 0) destinationAddress = clientAddress; struct in_addr destinationAddr; destinationAddr.s_addr = destinationAddress; isMulticast = False; if (fLastStreamToken != NULL && fReuseFirstSource) { // Special case: Rather than creating a new 'StreamState', // we reuse the one that we've already created: serverRTPPort = ((StreamState*)fLastStreamToken)->serverRTPPort(); serverRTCPPort = ((StreamState*)fLastStreamToken)->serverRTCPPort(); ++((StreamState*)fLastStreamToken)->referenceCount(); streamToken = fLastStreamToken; } else { // Normal case: Create a new media source: unsigned streamBitrate; FramedSource* mediaSource = createNewStreamSource(clientSessionId, streamBitrate); // Create 'groupsock' and 'sink' objects for the destination, // using previously unused server port numbers: RTPSink* rtpSink; BasicUDPSink* udpSink; Groupsock* rtpGroupsock; Groupsock* rtcpGroupsock; portNumBits serverPortNum; if (clientRTCPPort.num() == 0) { // We're streaming raw UDP (not RTP). Create a single groupsock: NoReuse dummy(envir()); // ensures that we skip over ports that are already in use for (serverPortNum = fInitialPortNum; ; ++serverPortNum) { struct in_addr dummyAddr; dummyAddr.s_addr = 0; serverRTPPort = serverPortNum; rtpGroupsock = new Groupsock(envir(), dummyAddr, serverRTPPort, 255); if (rtpGroupsock->socketNum() >= 0) break; // success } rtcpGroupsock = NULL; rtpSink = NULL; udpSink = BasicUDPSink::createNew(envir(), rtpGroupsock); } else { // Normal case: We're streaming RTP (over UDP or TCP). Create a pair of // groupsocks (RTP and RTCP), with adjacent port numbers (RTP port number even): NoReuse dummy(envir()); // ensures that we skip over ports that are already in use for (portNumBits serverPortNum = fInitialPortNum; ; serverPortNum += 2) { struct in_addr dummyAddr; dummyAddr.s_addr = 0; serverRTPPort = serverPortNum; rtpGroupsock = new Groupsock(envir(), dummyAddr, serverRTPPort, 255); if (rtpGroupsock->socketNum() < 0) { delete rtpGroupsock; continue; // try again } serverRTCPPort = serverPortNum+1; rtcpGroupsock = new Groupsock(envir(), dummyAddr, serverRTCPPort, 255); if (rtcpGroupsock->socketNum() < 0) { delete rtpGroupsock; delete rtcpGroupsock; continue; // try again } break; // success } unsigned char rtpPayloadType = 96 + trackNumber()-1; // if dynamic rtpSink = createNewRTPSink(rtpGroupsock, rtpPayloadType, mediaSource); udpSink = NULL; } // Turn off the destinations for each groupsock. They'll get set later // (unless TCP is used instead): if (rtpGroupsock != NULL) rtpGroupsock->removeAllDestinations(); if (rtcpGroupsock != NULL) rtcpGroupsock->removeAllDestinations(); if (rtpGroupsock != NULL) { // Try to use a big send buffer for RTP - at least 0.1 second of // specified bandwidth and at least 50 KB unsigned rtpBufSize = streamBitrate * 25 / 2; // 1 kbps * 0.1 s = 12.5 bytes if (rtpBufSize < 50 * 1024) rtpBufSize = 50 * 1024; increaseSendBufferTo(envir(), rtpGroupsock->socketNum(), rtpBufSize); } // Set up the state of the stream. The stream will get started later: streamToken = fLastStreamToken = new StreamState(*this, serverRTPPort, serverRTCPPort, rtpSink, udpSink, streamBitrate, mediaSource, rtpGroupsock, rtcpGroupsock); } // Record these destinations as being for this client session id: Destinations* destinations; if (tcpSocketNum < 0) { // UDP destinations = new Destinations(destinationAddr, clientRTPPort, clientRTCPPort); } else { // TCP destinations = new Destinations(tcpSocketNum, rtpChannelId, rtcpChannelId); } fDestinationsHashTable->Add((char const*)clientSessionId, destinations); } void OnDemandServerMediaSubsession::startStream(unsigned clientSessionId, void* streamToken, TaskFunc* rtcpRRHandler, void* rtcpRRHandlerClientData, unsigned short& rtpSeqNum, unsigned& rtpTimestamp, ServerRequestAlternativeByteHandler* serverRequestAlternativeByteHandler, void* serverRequestAlternativeByteHandlerClientData) { StreamState* streamState = (StreamState*)streamToken; Destinations* destinations = (Destinations*)(fDestinationsHashTable->Lookup((char const*)clientSessionId)); if (streamState != NULL) { streamState->startPlaying(destinations, rtcpRRHandler, rtcpRRHandlerClientData, serverRequestAlternativeByteHandler, serverRequestAlternativeByteHandlerClientData); RTPSink* rtpSink = streamState->rtpSink(); // alias if (rtpSink != NULL) { rtpSeqNum = rtpSink->currentSeqNo(); rtpTimestamp = rtpSink->presetNextTimestamp(); } } } void OnDemandServerMediaSubsession::pauseStream(unsigned /*clientSessionId*/, void* streamToken) { // Pausing isn't allowed if multiple clients are receiving data from // the same source: if (fReuseFirstSource) return; StreamState* streamState = (StreamState*)streamToken; if (streamState != NULL) streamState->pause(); } void OnDemandServerMediaSubsession::seekStream(unsigned /*clientSessionId*/, void* streamToken, double& seekNPT, double streamDuration, u_int64_t& numBytes) { numBytes = 0; // by default: unknown // Seeking isn't allowed if multiple clients are receiving data from the same source: if (fReuseFirstSource) return; StreamState* streamState = (StreamState*)streamToken; if (streamState != NULL && streamState->mediaSource() != NULL) { seekStreamSource(streamState->mediaSource(), seekNPT, streamDuration, numBytes); streamState->startNPT() = (float)seekNPT; RTPSink* rtpSink = streamState->rtpSink(); // alias if (rtpSink != NULL) rtpSink->resetPresentationTimes(); } } void OnDemandServerMediaSubsession::seekStream(unsigned /*clientSessionId*/, void* streamToken, char*& absStart, char*& absEnd) { // Seeking isn't allowed if multiple clients are receiving data from the same source: if (fReuseFirstSource) return; StreamState* streamState = (StreamState*)streamToken; if (streamState != NULL && streamState->mediaSource() != NULL) { seekStreamSource(streamState->mediaSource(), absStart, absEnd); } } void OnDemandServerMediaSubsession::nullSeekStream(unsigned /*clientSessionId*/, void* streamToken) { StreamState* streamState = (StreamState*)streamToken; if (streamState != NULL && streamState->mediaSource() != NULL) { // Because we're not seeking here, get the current NPT, and remember it as the new 'start' NPT: streamState->startNPT() = getCurrentNPT(streamToken); RTPSink* rtpSink = streamState->rtpSink(); // alias if (rtpSink != NULL) rtpSink->resetPresentationTimes(); } } void OnDemandServerMediaSubsession::setStreamScale(unsigned /*clientSessionId*/, void* streamToken, float scale) { // Changing the scale factor isn't allowed if multiple clients are receiving data // from the same source: if (fReuseFirstSource) return; StreamState* streamState = (StreamState*)streamToken; if (streamState != NULL && streamState->mediaSource() != NULL) { setStreamSourceScale(streamState->mediaSource(), scale); } } float OnDemandServerMediaSubsession::getCurrentNPT(void* streamToken) { do { if (streamToken == NULL) break; StreamState* streamState = (StreamState*)streamToken; RTPSink* rtpSink = streamState->rtpSink(); if (rtpSink == NULL) break; return streamState->startNPT() + (rtpSink->mostRecentPresentationTime().tv_sec - rtpSink->initialPresentationTime().tv_sec) + (rtpSink->mostRecentPresentationTime().tv_sec - rtpSink->initialPresentationTime().tv_sec)/1000000.0f; } while (0); return 0.0; } FramedSource* OnDemandServerMediaSubsession::getStreamSource(void* streamToken) { if (streamToken == NULL) return NULL; StreamState* streamState = (StreamState*)streamToken; return streamState->mediaSource(); } void OnDemandServerMediaSubsession::deleteStream(unsigned clientSessionId, void*& streamToken) { StreamState* streamState = (StreamState*)streamToken; // Look up (and remove) the destinations for this client session: Destinations* destinations = (Destinations*)(fDestinationsHashTable->Lookup((char const*)clientSessionId)); if (destinations != NULL) { fDestinationsHashTable->Remove((char const*)clientSessionId); // Stop streaming to these destinations: if (streamState != NULL) streamState->endPlaying(destinations); } // Delete the "StreamState" structure if it's no longer being used: if (streamState != NULL) { if (streamState->referenceCount() > 0) --streamState->referenceCount(); if (streamState->referenceCount() == 0) { delete streamState; streamToken = NULL; } } // Finally, delete the destinations themselves: delete destinations; } char const* OnDemandServerMediaSubsession ::getAuxSDPLine(RTPSink* rtpSink, FramedSource* /*inputSource*/) { // Default implementation: return rtpSink == NULL ? NULL : rtpSink->auxSDPLine(); } void OnDemandServerMediaSubsession::seekStreamSource(FramedSource* /*inputSource*/, double& /*seekNPT*/, double /*streamDuration*/, u_int64_t& numBytes) { // Default implementation: Do nothing } void OnDemandServerMediaSubsession::seekStreamSource(FramedSource* /*inputSource*/, char*& absStart, char*& absEnd) { // Default implementation: do nothing (but delete[] and assign "absStart" and "absEnd" to NULL, to show that we don't handle this) delete[] absStart; absStart = NULL; delete[] absEnd; absEnd = NULL; } void OnDemandServerMediaSubsession ::setStreamSourceScale(FramedSource* /*inputSource*/, float /*scale*/) { // Default implementation: Do nothing } void OnDemandServerMediaSubsession::closeStreamSource(FramedSource *inputSource) { Medium::close(inputSource); } void OnDemandServerMediaSubsession ::setSDPLinesFromRTPSink(RTPSink* rtpSink, FramedSource* inputSource, unsigned estBitrate) { if (rtpSink == NULL) return; char const* mediaType = rtpSink->sdpMediaType(); unsigned char rtpPayloadType = rtpSink->rtpPayloadType(); AddressString ipAddressStr(fServerAddressForSDP); char* rtpmapLine = rtpSink->rtpmapLine(); char const* rangeLine = rangeSDPLine(); char const* auxSDPLine = getAuxSDPLine(rtpSink, inputSource); if (auxSDPLine == NULL) auxSDPLine = ""; char const* const sdpFmt = "m=%s %u RTP/AVP %d\r\n" "c=IN IP4 %s\r\n" "b=AS:%u\r\n" "%s" "%s" "%s" "a=control:%s\r\n"; unsigned sdpFmtSize = strlen(sdpFmt) + strlen(mediaType) + 5 /* max short len */ + 3 /* max char len */ + strlen(ipAddressStr.val()) + 20 /* max int len */ + strlen(rtpmapLine) + strlen(rangeLine) + strlen(auxSDPLine) + strlen(trackId()); char* sdpLines = new char[sdpFmtSize]; sprintf(sdpLines, sdpFmt, mediaType, // m= fPortNumForSDP, // m= rtpPayloadType, // m= ipAddressStr.val(), // c= address estBitrate, // b=AS: rtpmapLine, // a=rtpmap:... (if present) rangeLine, // a=range:... (if present) auxSDPLine, // optional extra SDP line trackId()); // a=control: delete[] (char*)rangeLine; delete[] rtpmapLine; fSDPLines = strDup(sdpLines); delete[] sdpLines; } ////////// StreamState implementation ////////// static void afterPlayingStreamState(void* clientData) { StreamState* streamState = (StreamState*)clientData; if (streamState->streamDuration() == 0.0) { // When the input stream ends, tear it down. This will cause a RTCP "BYE" // to be sent to each client, teling it that the stream has ended. // (Because the stream didn't have a known duration, there was no other // way for clients to know when the stream ended.) streamState->reclaim(); } // Otherwise, keep the stream alive, in case a client wants to // subsequently re-play the stream starting from somewhere other than the end. // (This can be done only on streams that have a known duration.) } StreamState::StreamState(OnDemandServerMediaSubsession& master, Port const& serverRTPPort, Port const& serverRTCPPort, RTPSink* rtpSink, BasicUDPSink* udpSink, unsigned totalBW, FramedSource* mediaSource, Groupsock* rtpGS, Groupsock* rtcpGS) : fMaster(master), fAreCurrentlyPlaying(False), fReferenceCount(1), fServerRTPPort(serverRTPPort), fServerRTCPPort(serverRTCPPort), fRTPSink(rtpSink), fUDPSink(udpSink), fStreamDuration(master.duration()), fTotalBW(totalBW), fRTCPInstance(NULL) /* created later */, fMediaSource(mediaSource), fStartNPT(0.0), fRTPgs(rtpGS), fRTCPgs(rtcpGS) { } StreamState::~StreamState() { reclaim(); } void StreamState ::startPlaying(Destinations* dests, TaskFunc* rtcpRRHandler, void* rtcpRRHandlerClientData, ServerRequestAlternativeByteHandler* serverRequestAlternativeByteHandler, void* serverRequestAlternativeByteHandlerClientData) { if (dests == NULL) return; if (fRTCPInstance == NULL && fRTPSink != NULL) { // Create (and start) a 'RTCP instance' for this RTP sink: fRTCPInstance = RTCPInstance::createNew(fRTPSink->envir(), fRTCPgs, fTotalBW, (unsigned char*)fMaster.fCNAME, fRTPSink, NULL /* we're a server */); // Note: This starts RTCP running automatically } if (dests->isTCP) { // Change RTP and RTCP to use the TCP socket instead of UDP: if (fRTPSink != NULL) { fRTPSink->addStreamSocket(dests->tcpSocketNum, dests->rtpChannelId); RTPInterface ::setServerRequestAlternativeByteHandler(fRTPSink->envir(), dests->tcpSocketNum, serverRequestAlternativeByteHandler, serverRequestAlternativeByteHandlerClientData); // So that we continue to handle RTSP commands from the client } if (fRTCPInstance != NULL) { fRTCPInstance->addStreamSocket(dests->tcpSocketNum, dests->rtcpChannelId); fRTCPInstance->setSpecificRRHandler(dests->tcpSocketNum, dests->rtcpChannelId, rtcpRRHandler, rtcpRRHandlerClientData); } } else { // Tell the RTP and RTCP 'groupsocks' about this destination // (in case they don't already have it): if (fRTPgs != NULL) fRTPgs->addDestination(dests->addr, dests->rtpPort); if (fRTCPgs != NULL) fRTCPgs->addDestination(dests->addr, dests->rtcpPort); if (fRTCPInstance != NULL) { fRTCPInstance->setSpecificRRHandler(dests->addr.s_addr, dests->rtcpPort, rtcpRRHandler, rtcpRRHandlerClientData); } } if (fRTCPInstance != NULL) { // Hack: Send an initial RTCP "SR" packet, before the initial RTP packet, so that receivers will (likely) be able to // get RTCP-synchronized presentation times immediately: fRTCPInstance->sendReport(); } if (!fAreCurrentlyPlaying && fMediaSource != NULL) { if (fRTPSink != NULL) { fRTPSink->startPlaying(*fMediaSource, afterPlayingStreamState, this); fAreCurrentlyPlaying = True; } else if (fUDPSink != NULL) { fUDPSink->startPlaying(*fMediaSource, afterPlayingStreamState, this); fAreCurrentlyPlaying = True; } } } void StreamState::pause() { if (fRTPSink != NULL) fRTPSink->stopPlaying(); if (fUDPSink != NULL) fUDPSink->stopPlaying(); fAreCurrentlyPlaying = False; } void StreamState::endPlaying(Destinations* dests) { #if 0 // The following code is temporarily disabled, because it erroneously sends RTCP "BYE"s to all clients if multiple // clients are streaming from the same data source (i.e., if "reuseFirstSource" is True), and we don't want that to happen // if we're being called as a result of a single one of these clients having sent a "TEARDOWN" (rather than the whole stream // having been closed, for all clients). // This will be fixed for real later. if (fRTCPInstance != NULL) { // Hack: Explicitly send a RTCP "BYE" packet now, because the code below will prevent that from happening later, // when "fRTCPInstance" gets deleted: fRTCPInstance->sendBYE(); } #endif if (dests->isTCP) { if (fRTPSink != NULL) { fRTPSink->removeStreamSocket(dests->tcpSocketNum, dests->rtpChannelId); } if (fRTCPInstance != NULL) { fRTCPInstance->removeStreamSocket(dests->tcpSocketNum, dests->rtcpChannelId); fRTCPInstance->unsetSpecificRRHandler(dests->tcpSocketNum, dests->rtcpChannelId); } } else { // Tell the RTP and RTCP 'groupsocks' to stop using these destinations: if (fRTPgs != NULL) fRTPgs->removeDestination(dests->addr, dests->rtpPort); if (fRTCPgs != NULL) fRTCPgs->removeDestination(dests->addr, dests->rtcpPort); if (fRTCPInstance != NULL) { fRTCPInstance->unsetSpecificRRHandler(dests->addr.s_addr, dests->rtcpPort); } } } void StreamState::reclaim() { // Delete allocated media objects Medium::close(fRTCPInstance) /* will send a RTCP BYE */; fRTCPInstance = NULL; Medium::close(fRTPSink); fRTPSink = NULL; Medium::close(fUDPSink); fUDPSink = NULL; fMaster.closeStreamSource(fMediaSource); fMediaSource = NULL; if (fMaster.fLastStreamToken == this) fMaster.fLastStreamToken = NULL; delete fRTPgs; fRTPgs = NULL; delete fRTCPgs; fRTCPgs = NULL; } live/liveMedia/AC3AudioFileServerMediaSubsession.cpp000444 001751 000000 00000004566 12265042432 022712 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A 'ServerMediaSubsession' object that creates new, unicast, "RTPSink"s // on demand, from an AC3 audio file. // Implementation #include "AC3AudioFileServerMediaSubsession.hh" #include "ByteStreamFileSource.hh" #include "AC3AudioStreamFramer.hh" #include "AC3AudioRTPSink.hh" AC3AudioFileServerMediaSubsession* AC3AudioFileServerMediaSubsession::createNew(UsageEnvironment& env, char const* fileName, Boolean reuseFirstSource) { return new AC3AudioFileServerMediaSubsession(env, fileName, reuseFirstSource); } AC3AudioFileServerMediaSubsession ::AC3AudioFileServerMediaSubsession(UsageEnvironment& env, char const* fileName, Boolean reuseFirstSource) : FileServerMediaSubsession(env, fileName, reuseFirstSource) { } AC3AudioFileServerMediaSubsession::~AC3AudioFileServerMediaSubsession() { } FramedSource* AC3AudioFileServerMediaSubsession ::createNewStreamSource(unsigned /*clientSessionId*/, unsigned& estBitrate) { estBitrate = 48; // kbps, estimate ByteStreamFileSource* fileSource = ByteStreamFileSource::createNew(envir(), fFileName); if (fileSource == NULL) return NULL; return AC3AudioStreamFramer::createNew(envir(), fileSource); } RTPSink* AC3AudioFileServerMediaSubsession ::createNewRTPSink(Groupsock* rtpGroupsock, unsigned char rtpPayloadTypeIfDynamic, FramedSource* inputSource) { AC3AudioStreamFramer* audioSource = (AC3AudioStreamFramer*)inputSource; return AC3AudioRTPSink::createNew(envir(), rtpGroupsock, rtpPayloadTypeIfDynamic, audioSource->samplingRate()); } live/liveMedia/RTSPClient.cpp000444 001751 000000 00000230164 12265042432 016277 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A generic RTSP client // Implementation #include "RTSPClient.hh" #include "RTSPCommon.hh" #include "Base64.hh" #include "Locale.hh" #include #include "ourMD5.hh" ////////// RTSPClient implementation ////////// RTSPClient* RTSPClient::createNew(UsageEnvironment& env, char const* rtspURL, int verbosityLevel, char const* applicationName, portNumBits tunnelOverHTTPPortNum, int socketNumToServer) { return new RTSPClient(env, rtspURL, verbosityLevel, applicationName, tunnelOverHTTPPortNum, socketNumToServer); } unsigned RTSPClient::sendDescribeCommand(responseHandler* responseHandler, Authenticator* authenticator) { if (authenticator != NULL) fCurrentAuthenticator = *authenticator; return sendRequest(new RequestRecord(++fCSeq, "DESCRIBE", responseHandler)); } unsigned RTSPClient::sendOptionsCommand(responseHandler* responseHandler, Authenticator* authenticator) { if (authenticator != NULL) fCurrentAuthenticator = *authenticator; return sendRequest(new RequestRecord(++fCSeq, "OPTIONS", responseHandler)); } unsigned RTSPClient::sendAnnounceCommand(char const* sdpDescription, responseHandler* responseHandler, Authenticator* authenticator) { if (authenticator != NULL) fCurrentAuthenticator = *authenticator; return sendRequest(new RequestRecord(++fCSeq, "ANNOUNCE", responseHandler, NULL, NULL, False, 0.0, 0.0, 0.0, sdpDescription)); } unsigned RTSPClient::sendSetupCommand(MediaSubsession& subsession, responseHandler* responseHandler, Boolean streamOutgoing, Boolean streamUsingTCP, Boolean forceMulticastOnUnspecified, Authenticator* authenticator) { if (fTunnelOverHTTPPortNum != 0) streamUsingTCP = True; // RTSP-over-HTTP tunneling uses TCP (by definition) if (authenticator != NULL) fCurrentAuthenticator = *authenticator; u_int32_t booleanFlags = 0; if (streamUsingTCP) booleanFlags |= 0x1; if (streamOutgoing) booleanFlags |= 0x2; if (forceMulticastOnUnspecified) booleanFlags |= 0x4; return sendRequest(new RequestRecord(++fCSeq, "SETUP", responseHandler, NULL, &subsession, booleanFlags)); } unsigned RTSPClient::sendPlayCommand(MediaSession& session, responseHandler* responseHandler, double start, double end, float scale, Authenticator* authenticator) { if (authenticator != NULL) fCurrentAuthenticator = *authenticator; sendDummyUDPPackets(session); // hack to improve NAT traversal return sendRequest(new RequestRecord(++fCSeq, "PLAY", responseHandler, &session, NULL, 0, start, end, scale)); } unsigned RTSPClient::sendPlayCommand(MediaSubsession& subsession, responseHandler* responseHandler, double start, double end, float scale, Authenticator* authenticator) { if (authenticator != NULL) fCurrentAuthenticator = *authenticator; sendDummyUDPPackets(subsession); // hack to improve NAT traversal return sendRequest(new RequestRecord(++fCSeq, "PLAY", responseHandler, NULL, &subsession, 0, start, end, scale)); } unsigned RTSPClient::sendPlayCommand(MediaSession& session, responseHandler* responseHandler, char const* absStartTime, char const* absEndTime, float scale, Authenticator* authenticator) { if (authenticator != NULL) fCurrentAuthenticator = *authenticator; sendDummyUDPPackets(session); // hack to improve NAT traversal return sendRequest(new RequestRecord(++fCSeq, responseHandler, absStartTime, absEndTime, scale, &session, NULL)); } unsigned RTSPClient::sendPlayCommand(MediaSubsession& subsession, responseHandler* responseHandler, char const* absStartTime, char const* absEndTime, float scale, Authenticator* authenticator) { if (authenticator != NULL) fCurrentAuthenticator = *authenticator; sendDummyUDPPackets(subsession); // hack to improve NAT traversal return sendRequest(new RequestRecord(++fCSeq, responseHandler, absStartTime, absEndTime, scale, NULL, &subsession)); } unsigned RTSPClient::sendPauseCommand(MediaSession& session, responseHandler* responseHandler, Authenticator* authenticator) { if (authenticator != NULL) fCurrentAuthenticator = *authenticator; return sendRequest(new RequestRecord(++fCSeq, "PAUSE", responseHandler, &session)); } unsigned RTSPClient::sendPauseCommand(MediaSubsession& subsession, responseHandler* responseHandler, Authenticator* authenticator) { if (authenticator != NULL) fCurrentAuthenticator = *authenticator; return sendRequest(new RequestRecord(++fCSeq, "PAUSE", responseHandler, NULL, &subsession)); } unsigned RTSPClient::sendRecordCommand(MediaSession& session, responseHandler* responseHandler, Authenticator* authenticator) { if (authenticator != NULL) fCurrentAuthenticator = *authenticator; return sendRequest(new RequestRecord(++fCSeq, "RECORD", responseHandler, &session)); } unsigned RTSPClient::sendRecordCommand(MediaSubsession& subsession, responseHandler* responseHandler, Authenticator* authenticator) { if (authenticator != NULL) fCurrentAuthenticator = *authenticator; return sendRequest(new RequestRecord(++fCSeq, "RECORD", responseHandler, NULL, &subsession)); } unsigned RTSPClient::sendTeardownCommand(MediaSession& session, responseHandler* responseHandler, Authenticator* authenticator) { if (authenticator != NULL) fCurrentAuthenticator = *authenticator; return sendRequest(new RequestRecord(++fCSeq, "TEARDOWN", responseHandler, &session)); } unsigned RTSPClient::sendTeardownCommand(MediaSubsession& subsession, responseHandler* responseHandler, Authenticator* authenticator) { if (authenticator != NULL) fCurrentAuthenticator = *authenticator; return sendRequest(new RequestRecord(++fCSeq, "TEARDOWN", responseHandler, NULL, &subsession)); } unsigned RTSPClient::sendSetParameterCommand(MediaSession& session, responseHandler* responseHandler, char const* parameterName, char const* parameterValue, Authenticator* authenticator) { if (authenticator != NULL) fCurrentAuthenticator = *authenticator; char* paramString = new char[strlen(parameterName) + strlen(parameterValue) + 10]; sprintf(paramString, "%s: %s\r\n", parameterName, parameterValue); unsigned result = sendRequest(new RequestRecord(++fCSeq, "SET_PARAMETER", responseHandler, &session, NULL, False, 0.0, 0.0, 0.0, paramString)); delete[] paramString; return result; } unsigned RTSPClient::sendGetParameterCommand(MediaSession& session, responseHandler* responseHandler, char const* parameterName, Authenticator* authenticator) { if (authenticator != NULL) fCurrentAuthenticator = *authenticator; // We assume that: // parameterName is NULL means: Send no body in the request. // parameterName is "" means: Send only \r\n in the request body. // parameterName is non-empty means: Send "\r\n" as the request body. unsigned parameterNameLen = parameterName == NULL ? 0 : strlen(parameterName); char* paramString = new char[parameterNameLen + 3]; // the 3 is for \r\n + the '\0' byte if (parameterName == NULL) { paramString[0] = '\0'; } else { sprintf(paramString, "%s\r\n", parameterName); } unsigned result = sendRequest(new RequestRecord(++fCSeq, "GET_PARAMETER", responseHandler, &session, NULL, False, 0.0, 0.0, 0.0, paramString)); delete[] paramString; return result; } void RTSPClient::sendDummyUDPPackets(MediaSession& session, unsigned numDummyPackets) { MediaSubsessionIterator iter(session); MediaSubsession* subsession; while ((subsession = iter.next()) != NULL) { sendDummyUDPPackets(*subsession, numDummyPackets); } } void RTSPClient::sendDummyUDPPackets(MediaSubsession& subsession, unsigned numDummyPackets) { // Hack: To increase the likelihood of UDP packets from the server reaching us, // if we're behind a NAT, send a few 'dummy' UDP packets to the server now. // (We do this on both our RTP port and our RTCP port.) Groupsock* gs1 = NULL; Groupsock* gs2 = NULL; if (subsession.rtpSource() != NULL) gs1 = subsession.rtpSource()->RTPgs(); if (subsession.rtcpInstance() != NULL) gs2 = subsession.rtcpInstance()->RTCPgs(); u_int32_t const dummy = 0xFEEDFACE; for (unsigned i = 0; i < numDummyPackets; ++i) { if (gs1 != NULL) gs1->output(envir(), 255, (unsigned char*)&dummy, sizeof dummy); if (gs2 != NULL) gs2->output(envir(), 255, (unsigned char*)&dummy, sizeof dummy); } } Boolean RTSPClient::changeResponseHandler(unsigned cseq, responseHandler* newResponseHandler) { // Look for the matching request record in each of our 'pending requests' queues: RequestRecord* request; if ((request = fRequestsAwaitingConnection.findByCSeq(cseq)) != NULL || (request = fRequestsAwaitingHTTPTunneling.findByCSeq(cseq)) != NULL || (request = fRequestsAwaitingResponse.findByCSeq(cseq)) != NULL) { request->handler() = newResponseHandler; return True; } return False; } Boolean RTSPClient::lookupByName(UsageEnvironment& env, char const* instanceName, RTSPClient*& resultClient) { resultClient = NULL; // unless we succeed Medium* medium; if (!Medium::lookupByName(env, instanceName, medium)) return False; if (!medium->isRTSPClient()) { env.setResultMsg(instanceName, " is not a RTSP client"); return False; } resultClient = (RTSPClient*)medium; return True; } Boolean RTSPClient::parseRTSPURL(UsageEnvironment& env, char const* url, char*& username, char*& password, NetAddress& address, portNumBits& portNum, char const** urlSuffix) { do { // Parse the URL as "rtsp://[[:]@][:][/]" char const* prefix = "rtsp://"; unsigned const prefixLength = 7; if (_strncasecmp(url, prefix, prefixLength) != 0) { env.setResultMsg("URL is not of the form \"", prefix, "\""); break; } unsigned const parseBufferSize = 100; char parseBuffer[parseBufferSize]; char const* from = &url[prefixLength]; // Check whether "[:]@" occurs next. // We do this by checking whether '@' appears before the end of the URL, or before the first '/'. username = password = NULL; // default return values char const* colonPasswordStart = NULL; char const* p; for (p = from; *p != '\0' && *p != '/'; ++p) { if (*p == ':' && colonPasswordStart == NULL) { colonPasswordStart = p; } else if (*p == '@') { // We found (and perhaps ). Copy them into newly-allocated result strings: if (colonPasswordStart == NULL) colonPasswordStart = p; char const* usernameStart = from; unsigned usernameLen = colonPasswordStart - usernameStart; username = new char[usernameLen + 1] ; // allow for the trailing '\0' for (unsigned i = 0; i < usernameLen; ++i) username[i] = usernameStart[i]; username[usernameLen] = '\0'; char const* passwordStart = colonPasswordStart; if (passwordStart < p) ++passwordStart; // skip over the ':' unsigned passwordLen = p - passwordStart; password = new char[passwordLen + 1]; // allow for the trailing '\0' for (unsigned j = 0; j < passwordLen; ++j) password[j] = passwordStart[j]; password[passwordLen] = '\0'; from = p + 1; // skip over the '@' break; } } // Next, parse char* to = &parseBuffer[0]; unsigned i; for (i = 0; i < parseBufferSize; ++i) { if (*from == '\0' || *from == ':' || *from == '/') { // We've completed parsing the address *to = '\0'; break; } *to++ = *from++; } if (i == parseBufferSize) { env.setResultMsg("URL is too long"); break; } NetAddressList addresses(parseBuffer); if (addresses.numAddresses() == 0) { env.setResultMsg("Failed to find network address for \"", parseBuffer, "\""); break; } address = *(addresses.firstAddress()); portNum = 554; // default value char nextChar = *from; if (nextChar == ':') { int portNumInt; if (sscanf(++from, "%d", &portNumInt) != 1) { env.setResultMsg("No port number follows ':'"); break; } if (portNumInt < 1 || portNumInt > 65535) { env.setResultMsg("Bad port number"); break; } portNum = (portNumBits)portNumInt; while (*from >= '0' && *from <= '9') ++from; // skip over port number } // The remainder of the URL is the suffix: if (urlSuffix != NULL) *urlSuffix = from; return True; } while (0); return False; } void RTSPClient::setUserAgentString(char const* userAgentName) { if (userAgentName == NULL) return; // Change the existing user agent header string: char const* const formatStr = "User-Agent: %s\r\n"; unsigned const headerSize = strlen(formatStr) + strlen(userAgentName); delete[] fUserAgentHeaderStr; fUserAgentHeaderStr = new char[headerSize]; sprintf(fUserAgentHeaderStr, formatStr, userAgentName); fUserAgentHeaderStrLen = strlen(fUserAgentHeaderStr); } unsigned RTSPClient::responseBufferSize = 20000; // default value; you can reassign this in your application if you need to RTSPClient::RTSPClient(UsageEnvironment& env, char const* rtspURL, int verbosityLevel, char const* applicationName, portNumBits tunnelOverHTTPPortNum, int socketNumToServer) : Medium(env), fVerbosityLevel(verbosityLevel), fCSeq(1), fServerAddress(0), fTunnelOverHTTPPortNum(tunnelOverHTTPPortNum), fUserAgentHeaderStr(NULL), fUserAgentHeaderStrLen(0), fInputSocketNum(-1), fOutputSocketNum(-1), fBaseURL(NULL), fTCPStreamIdCount(0), fLastSessionId(NULL), fSessionTimeoutParameter(0), fSessionCookieCounter(0), fHTTPTunnelingConnectionIsPending(False) { setBaseURL(rtspURL); fResponseBuffer = new char[responseBufferSize+1]; resetResponseBuffer(); if (socketNumToServer >= 0) { // This socket number is (assumed to be) already connected to the server. // Use it, and arrange to handle responses to requests sent on it: fInputSocketNum = fOutputSocketNum = socketNumToServer; envir().taskScheduler().setBackgroundHandling(fInputSocketNum, SOCKET_READABLE|SOCKET_EXCEPTION, (TaskScheduler::BackgroundHandlerProc*)&incomingDataHandler, this); } // Set the "User-Agent:" header to use in each request: char const* const libName = "LIVE555 Streaming Media v"; char const* const libVersionStr = LIVEMEDIA_LIBRARY_VERSION_STRING; char const* libPrefix; char const* libSuffix; if (applicationName == NULL || applicationName[0] == '\0') { applicationName = libPrefix = libSuffix = ""; } else { libPrefix = " ("; libSuffix = ")"; } unsigned userAgentNameSize = strlen(applicationName) + strlen(libPrefix) + strlen(libName) + strlen(libVersionStr) + strlen(libSuffix) + 1; char* userAgentName = new char[userAgentNameSize]; sprintf(userAgentName, "%s%s%s%s%s", applicationName, libPrefix, libName, libVersionStr, libSuffix); setUserAgentString(userAgentName); delete[] userAgentName; } RTSPClient::~RTSPClient() { RTPInterface::clearServerRequestAlternativeByteHandler(envir(), fInputSocketNum); // in case we were receiving RTP-over-TCP reset(); delete[] fResponseBuffer; delete[] fUserAgentHeaderStr; } void RTSPClient::reset() { resetTCPSockets(); resetResponseBuffer(); fServerAddress = 0; setBaseURL(NULL); fCurrentAuthenticator.reset(); delete[] fLastSessionId; fLastSessionId = NULL; } void RTSPClient::setBaseURL(char const* url) { delete[] fBaseURL; fBaseURL = strDup(url); } int RTSPClient::grabSocket() { int inputSocket = fInputSocketNum; fInputSocketNum = -1; return inputSocket; } unsigned RTSPClient::sendRequest(RequestRecord* request) { char* cmd = NULL; do { Boolean connectionIsPending = False; if (!fRequestsAwaitingConnection.isEmpty()) { // A connection is currently pending (with at least one enqueued request). Enqueue this request also: connectionIsPending = True; } else if (fInputSocketNum < 0) { // we need to open a connection int connectResult = openConnection(); if (connectResult < 0) break; // an error occurred else if (connectResult == 0) { // A connection is pending connectionIsPending = True; } // else the connection succeeded. Continue sending the command. } if (connectionIsPending) { fRequestsAwaitingConnection.enqueue(request); return request->cseq(); } // If requested (and we're not already doing it, or have done it), set up the special protocol for tunneling RTSP-over-HTTP: if (fTunnelOverHTTPPortNum != 0 && strcmp(request->commandName(), "GET") != 0 && fOutputSocketNum == fInputSocketNum) { if (!setupHTTPTunneling1()) break; fRequestsAwaitingHTTPTunneling.enqueue(request); return request->cseq(); } // Construct and send the command: // First, construct command-specific headers that we need: char* cmdURL = fBaseURL; // by default Boolean cmdURLWasAllocated = False; char const* protocolStr = "RTSP/1.0"; // by default char* extraHeaders = (char*)""; // by default Boolean extraHeadersWereAllocated = False; char* contentLengthHeader = (char*)""; // by default Boolean contentLengthHeaderWasAllocated = False; if (!setRequestFields(request, cmdURL, cmdURLWasAllocated, protocolStr, extraHeaders, extraHeadersWereAllocated)) { break; } char const* contentStr = request->contentStr(); // by default if (contentStr == NULL) contentStr = ""; unsigned contentStrLen = strlen(contentStr); if (contentStrLen > 0) { char const* contentLengthHeaderFmt = "Content-Length: %d\r\n"; unsigned contentLengthHeaderSize = strlen(contentLengthHeaderFmt) + 20 /* max int len */; contentLengthHeader = new char[contentLengthHeaderSize]; sprintf(contentLengthHeader, contentLengthHeaderFmt, contentStrLen); contentLengthHeaderWasAllocated = True; } char* authenticatorStr = createAuthenticatorString(request->commandName(), fBaseURL); char const* const cmdFmt = "%s %s %s\r\n" "CSeq: %d\r\n" "%s" "%s" "%s" "%s" "\r\n" "%s"; unsigned cmdSize = strlen(cmdFmt) + strlen(request->commandName()) + strlen(cmdURL) + strlen(protocolStr) + 20 /* max int len */ + strlen(authenticatorStr) + fUserAgentHeaderStrLen + strlen(extraHeaders) + strlen(contentLengthHeader) + contentStrLen; cmd = new char[cmdSize]; sprintf(cmd, cmdFmt, request->commandName(), cmdURL, protocolStr, request->cseq(), authenticatorStr, fUserAgentHeaderStr, extraHeaders, contentLengthHeader, contentStr); delete[] authenticatorStr; if (cmdURLWasAllocated) delete[] cmdURL; if (extraHeadersWereAllocated) delete[] extraHeaders; if (contentLengthHeaderWasAllocated) delete[] contentLengthHeader; if (fVerbosityLevel >= 1) envir() << "Sending request: " << cmd << "\n"; if (fTunnelOverHTTPPortNum != 0 && strcmp(request->commandName(), "GET") != 0 && strcmp(request->commandName(), "POST") != 0) { // When we're tunneling RTSP-over-HTTP, we Base-64-encode the request before we send it. // (However, we don't do this for the HTTP "GET" and "POST" commands that we use to set up the tunnel.) char* origCmd = cmd; cmd = base64Encode(origCmd, strlen(cmd)); if (fVerbosityLevel >= 1) envir() << "\tThe request was base-64 encoded to: " << cmd << "\n\n"; delete[] origCmd; } if (send(fOutputSocketNum, cmd, strlen(cmd), 0) < 0) { char const* errFmt = "%s send() failed: "; unsigned const errLength = strlen(errFmt) + strlen(request->commandName()); char* err = new char[errLength]; sprintf(err, errFmt, request->commandName()); envir().setResultErrMsg(err); delete[] err; break; } // The command send succeeded, so enqueue the request record, so that its response (when it comes) can be handled. // However, note that we do not expect a response to a POST command with RTSP-over-HTTP, so don't enqueue that. int cseq = request->cseq(); if (fTunnelOverHTTPPortNum == 0 || strcmp(request->commandName(), "POST") != 0) { fRequestsAwaitingResponse.enqueue(request); } else { delete request; } delete[] cmd; return cseq; } while (0); // An error occurred, so call the response handler immediately (indicating the error): delete[] cmd; handleRequestError(request); delete request; return 0; } static char* createSessionString(char const* sessionId) { char* sessionStr; if (sessionId != NULL) { sessionStr = new char[20+strlen(sessionId)]; sprintf(sessionStr, "Session: %s\r\n", sessionId); } else { sessionStr = strDup(""); } return sessionStr; } static char* createScaleString(float scale, float currentScale) { char buf[100]; if (scale == 1.0f && currentScale == 1.0f) { // This is the default value; we don't need a "Scale:" header: buf[0] = '\0'; } else { Locale l("C", Numeric); sprintf(buf, "Scale: %f\r\n", scale); } return strDup(buf); } static char* createRangeString(double start, double end, char const* absStartTime, char const* absEndTime) { char buf[100]; if (absStartTime != NULL) { // Create a "Range:" header that specifies 'absolute' time values: if (absEndTime == NULL) { // There's no end time: snprintf(buf, sizeof buf, "Range: clock=%s-\r\n", absStartTime); } else { // There's both a start and an end time; include them both in the "Range:" hdr snprintf(buf, sizeof buf, "Range: clock=%s-%s\r\n", absStartTime, absEndTime); } } else { // Create a "Range:" header that specifies relative (i.e., NPT) time values: if (start < 0) { // We're resuming from a PAUSE; there's no "Range:" header at all buf[0] = '\0'; } else if (end < 0) { // There's no end time: Locale l("C", Numeric); sprintf(buf, "Range: npt=%.3f-\r\n", start); } else { // There's both a start and an end time; include them both in the "Range:" hdr Locale l("C", Numeric); sprintf(buf, "Range: npt=%.3f-%.3f\r\n", start, end); } } return strDup(buf); } Boolean RTSPClient::setRequestFields(RequestRecord* request, char*& cmdURL, Boolean& cmdURLWasAllocated, char const*& protocolStr, char*& extraHeaders, Boolean& extraHeadersWereAllocated ) { // Set various fields that will appear in our outgoing request, depending upon the particular command that we are sending. if (strcmp(request->commandName(), "DESCRIBE") == 0) { extraHeaders = (char*)"Accept: application/sdp\r\n"; } else if (strcmp(request->commandName(), "OPTIONS") == 0) { // If we're currently part of a session, create a "Session:" header (in case the server wants this to indicate // client 'liveness); this makes up our 'extra headers': extraHeaders = createSessionString(fLastSessionId); extraHeadersWereAllocated = True; } else if (strcmp(request->commandName(), "ANNOUNCE") == 0) { extraHeaders = (char*)"Content-Type: application/sdp\r\n"; } else if (strcmp(request->commandName(), "SETUP") == 0) { MediaSubsession& subsession = *request->subsession(); Boolean streamUsingTCP = (request->booleanFlags()&0x1) != 0; Boolean streamOutgoing = (request->booleanFlags()&0x2) != 0; Boolean forceMulticastOnUnspecified = (request->booleanFlags()&0x4) != 0; char const *prefix, *separator, *suffix; constructSubsessionURL(subsession, prefix, separator, suffix); char const* transportFmt; if (strcmp(subsession.protocolName(), "UDP") == 0) { suffix = ""; transportFmt = "Transport: RAW/RAW/UDP%s%s%s=%d-%d\r\n"; } else { transportFmt = "Transport: RTP/AVP%s%s%s=%d-%d\r\n"; } cmdURL = new char[strlen(prefix) + strlen(separator) + strlen(suffix) + 1]; cmdURLWasAllocated = True; sprintf(cmdURL, "%s%s%s", prefix, separator, suffix); // Construct a "Transport:" header. char const* transportTypeStr; char const* modeStr = streamOutgoing ? ";mode=receive" : ""; // Note: I think the above is nonstandard, but DSS wants it this way char const* portTypeStr; portNumBits rtpNumber, rtcpNumber; if (streamUsingTCP) { // streaming over the RTSP connection transportTypeStr = "/TCP;unicast"; portTypeStr = ";interleaved"; rtpNumber = fTCPStreamIdCount++; rtcpNumber = fTCPStreamIdCount++; } else { // normal RTP streaming unsigned connectionAddress = subsession.connectionEndpointAddress(); Boolean requestMulticastStreaming = IsMulticastAddress(connectionAddress) || (connectionAddress == 0 && forceMulticastOnUnspecified); transportTypeStr = requestMulticastStreaming ? ";multicast" : ";unicast"; portTypeStr = ";client_port"; rtpNumber = subsession.clientPortNum(); if (rtpNumber == 0) { envir().setResultMsg("Client port number unknown\n"); delete[] cmdURL; return False; } rtcpNumber = rtpNumber + 1; } unsigned transportSize = strlen(transportFmt) + strlen(transportTypeStr) + strlen(modeStr) + strlen(portTypeStr) + 2*5 /* max port len */; char* transportStr = new char[transportSize]; sprintf(transportStr, transportFmt, transportTypeStr, modeStr, portTypeStr, rtpNumber, rtcpNumber); // When sending more than one "SETUP" request, include a "Session:" header in the 2nd and later commands: char* sessionStr = createSessionString(fLastSessionId); // The "Transport:" and "Session:" (if present) headers make up the 'extra headers': extraHeaders = new char[transportSize + strlen(sessionStr)]; extraHeadersWereAllocated = True; sprintf(extraHeaders, "%s%s", transportStr, sessionStr); delete[] transportStr; delete[] sessionStr; } else if (strcmp(request->commandName(), "GET") == 0 || strcmp(request->commandName(), "POST") == 0) { // We will be sending a HTTP (not a RTSP) request. // Begin by re-parsing our RTSP URL, to get the stream name (which we'll use as our 'cmdURL' // in the subsequent request), and the server address (which we'll use in a "Host:" header): char* username; char* password; NetAddress destAddress; portNumBits urlPortNum; if (!parseRTSPURL(envir(), fBaseURL, username, password, destAddress, urlPortNum, (char const**)&cmdURL)) return False; if (cmdURL[0] == '\0') cmdURL = (char*)"/"; delete[] username; delete[] password; netAddressBits serverAddress = *(netAddressBits*)(destAddress.data()); AddressString serverAddressString(serverAddress); protocolStr = "HTTP/1.1"; if (strcmp(request->commandName(), "GET") == 0) { // Create a 'session cookie' string, using MD5: struct { struct timeval timestamp; unsigned counter; } seedData; gettimeofday(&seedData.timestamp, NULL); seedData.counter = ++fSessionCookieCounter; our_MD5Data((unsigned char*)(&seedData), sizeof seedData, fSessionCookie); // DSS seems to require that the 'session cookie' string be 22 bytes long: fSessionCookie[23] = '\0'; char const* const extraHeadersFmt = "Host: %s\r\n" "x-sessioncookie: %s\r\n" "Accept: application/x-rtsp-tunnelled\r\n" "Pragma: no-cache\r\n" "Cache-Control: no-cache\r\n"; unsigned extraHeadersSize = strlen(extraHeadersFmt) + strlen(serverAddressString.val()) + strlen(fSessionCookie); extraHeaders = new char[extraHeadersSize]; extraHeadersWereAllocated = True; sprintf(extraHeaders, extraHeadersFmt, serverAddressString.val(), fSessionCookie); } else { // "POST" char const* const extraHeadersFmt = "Host: %s\r\n" "x-sessioncookie: %s\r\n" "Content-Type: application/x-rtsp-tunnelled\r\n" "Pragma: no-cache\r\n" "Cache-Control: no-cache\r\n" "Content-Length: 32767\r\n" "Expires: Sun, 9 Jan 1972 00:00:00 GMT\r\n"; unsigned extraHeadersSize = strlen(extraHeadersFmt) + strlen(serverAddressString.val()) + strlen(fSessionCookie); extraHeaders = new char[extraHeadersSize]; extraHeadersWereAllocated = True; sprintf(extraHeaders, extraHeadersFmt, serverAddressString.val(), fSessionCookie); } } else { // "PLAY", "PAUSE", "TEARDOWN", "RECORD", "SET_PARAMETER", "GET_PARAMETER" // First, make sure that we have a RTSP session in progress if (fLastSessionId == NULL) { envir().setResultMsg("No RTSP session is currently in progress\n"); return False; } char const* sessionId; float originalScale; if (request->session() != NULL) { // Session-level operation cmdURL = (char*)sessionURL(*request->session()); sessionId = fLastSessionId; originalScale = request->session()->scale(); } else { // Media-level operation char const *prefix, *separator, *suffix; constructSubsessionURL(*request->subsession(), prefix, separator, suffix); cmdURL = new char[strlen(prefix) + strlen(separator) + strlen(suffix) + 1]; cmdURLWasAllocated = True; sprintf(cmdURL, "%s%s%s", prefix, separator, suffix); sessionId = request->subsession()->sessionId(); originalScale = request->subsession()->scale(); } if (strcmp(request->commandName(), "PLAY") == 0) { // Create "Session:", "Scale:", and "Range:" headers; these make up the 'extra headers': char* sessionStr = createSessionString(sessionId); char* scaleStr = createScaleString(request->scale(), originalScale); char* rangeStr = createRangeString(request->start(), request->end(), request->absStartTime(), request->absEndTime()); extraHeaders = new char[strlen(sessionStr) + strlen(scaleStr) + strlen(rangeStr) + 1]; extraHeadersWereAllocated = True; sprintf(extraHeaders, "%s%s%s", sessionStr, scaleStr, rangeStr); delete[] sessionStr; delete[] scaleStr; delete[] rangeStr; } else { // Create a "Session:" header; this makes up our 'extra headers': extraHeaders = createSessionString(sessionId); extraHeadersWereAllocated = True; } } return True; } Boolean RTSPClient::isRTSPClient() const { return True; } void RTSPClient::resetTCPSockets() { if (fInputSocketNum >= 0) { envir().taskScheduler().disableBackgroundHandling(fInputSocketNum); ::closeSocket(fInputSocketNum); if (fOutputSocketNum != fInputSocketNum) { envir().taskScheduler().disableBackgroundHandling(fOutputSocketNum); ::closeSocket(fOutputSocketNum); } } fInputSocketNum = fOutputSocketNum = -1; } void RTSPClient::resetResponseBuffer() { fResponseBytesAlreadySeen = 0; fResponseBufferBytesLeft = responseBufferSize; } int RTSPClient::openConnection() { do { // Set up a connection to the server. Begin by parsing the URL: char* username; char* password; NetAddress destAddress; portNumBits urlPortNum; char const* urlSuffix; if (!parseRTSPURL(envir(), fBaseURL, username, password, destAddress, urlPortNum, &urlSuffix)) break; portNumBits destPortNum = fTunnelOverHTTPPortNum == 0 ? urlPortNum : fTunnelOverHTTPPortNum; if (username != NULL || password != NULL) { fCurrentAuthenticator.setUsernameAndPassword(username, password); delete[] username; delete[] password; } // We don't yet have a TCP socket (or we used to have one, but it got closed). Set it up now. fInputSocketNum = fOutputSocketNum = setupStreamSocket(envir(), 0); if (fInputSocketNum < 0) break; ignoreSigPipeOnSocket(fInputSocketNum); // so that servers on the same host that get killed don't also kill us // Connect to the remote endpoint: fServerAddress = *(netAddressBits*)(destAddress.data()); int connectResult = connectToServer(fInputSocketNum, destPortNum); if (connectResult < 0) break; else if (connectResult > 0) { // The connection succeeded. Arrange to handle responses to requests sent on it: envir().taskScheduler().setBackgroundHandling(fInputSocketNum, SOCKET_READABLE|SOCKET_EXCEPTION, (TaskScheduler::BackgroundHandlerProc*)&incomingDataHandler, this); } return connectResult; } while (0); resetTCPSockets(); return -1; } int RTSPClient::connectToServer(int socketNum, portNumBits remotePortNum) { MAKE_SOCKADDR_IN(remoteName, fServerAddress, htons(remotePortNum)); if (fVerbosityLevel >= 1) { envir() << "Opening connection to " << AddressString(remoteName).val() << ", port " << remotePortNum << "...\n"; } if (connect(socketNum, (struct sockaddr*) &remoteName, sizeof remoteName) != 0) { int const err = envir().getErrno(); if (err == EINPROGRESS || err == EWOULDBLOCK) { // The connection is pending; we'll need to handle it later. Wait for our socket to be 'writable', or have an exception. envir().taskScheduler().setBackgroundHandling(socketNum, SOCKET_WRITABLE|SOCKET_EXCEPTION, (TaskScheduler::BackgroundHandlerProc*)&connectionHandler, this); return 0; } envir().setResultErrMsg("connect() failed: "); if (fVerbosityLevel >= 1) envir() << "..." << envir().getResultMsg() << "\n"; return -1; } if (fVerbosityLevel >= 1) envir() << "...local connection opened\n"; return 1; } char* RTSPClient::createAuthenticatorString(char const* cmd, char const* url) { Authenticator& auth = fCurrentAuthenticator; // alias, for brevity if (auth.realm() != NULL && auth.username() != NULL && auth.password() != NULL) { // We have a filled-in authenticator, so use it: char* authenticatorStr; if (auth.nonce() != NULL) { // Digest authentication char const* const authFmt = "Authorization: Digest username=\"%s\", realm=\"%s\", " "nonce=\"%s\", uri=\"%s\", response=\"%s\"\r\n"; char const* response = auth.computeDigestResponse(cmd, url); unsigned authBufSize = strlen(authFmt) + strlen(auth.username()) + strlen(auth.realm()) + strlen(auth.nonce()) + strlen(url) + strlen(response); authenticatorStr = new char[authBufSize]; sprintf(authenticatorStr, authFmt, auth.username(), auth.realm(), auth.nonce(), url, response); auth.reclaimDigestResponse(response); } else { // Basic authentication char const* const authFmt = "Authorization: Basic %s\r\n"; unsigned usernamePasswordLength = strlen(auth.username()) + 1 + strlen(auth.password()); char* usernamePassword = new char[usernamePasswordLength+1]; sprintf(usernamePassword, "%s:%s", auth.username(), auth.password()); char* response = base64Encode(usernamePassword, usernamePasswordLength); unsigned const authBufSize = strlen(authFmt) + strlen(response) + 1; authenticatorStr = new char[authBufSize]; sprintf(authenticatorStr, authFmt, response); delete[] response; delete[] usernamePassword; } return authenticatorStr; } // We don't have a (filled-in) authenticator. return strDup(""); } void RTSPClient::handleRequestError(RequestRecord* request) { int resultCode = -envir().getErrno(); if (resultCode == 0) { // Choose some generic error code instead: #if defined(__WIN32__) || defined(_WIN32) || defined(_QNX4) resultCode = -WSAENOTCONN; #else resultCode = -ENOTCONN; #endif } if (request->handler() != NULL) (*request->handler())(this, resultCode, strDup(envir().getResultMsg())); } Boolean RTSPClient ::parseResponseCode(char const* line, unsigned& responseCode, char const*& responseString) { if (sscanf(line, "RTSP/%*s%u", &responseCode) != 1 && sscanf(line, "HTTP/%*s%u", &responseCode) != 1) return False; // Note: We check for HTTP responses as well as RTSP responses, both in order to setup RTSP-over-HTTP tunneling, // and so that we get back a meaningful error if the client tried to mistakenly send a RTSP command to a HTTP-only server. // Use everything after the RTSP/* (or HTTP/*) as the response string: responseString = line; while (responseString[0] != '\0' && responseString[0] != ' ' && responseString[0] != '\t') ++responseString; while (responseString[0] != '\0' && (responseString[0] == ' ' || responseString[0] == '\t')) ++responseString; // skip whitespace return True; } void RTSPClient::handleIncomingRequest() { // Parse the request string into command name and 'CSeq', then 'handle' the command (by responding that we don't support it): char cmdName[RTSP_PARAM_STRING_MAX]; char urlPreSuffix[RTSP_PARAM_STRING_MAX]; char urlSuffix[RTSP_PARAM_STRING_MAX]; char cseq[RTSP_PARAM_STRING_MAX]; char sessionId[RTSP_PARAM_STRING_MAX]; unsigned contentLength; if (!parseRTSPRequestString(fResponseBuffer, fResponseBytesAlreadySeen, cmdName, sizeof cmdName, urlPreSuffix, sizeof urlPreSuffix, urlSuffix, sizeof urlSuffix, cseq, sizeof cseq, sessionId, sizeof sessionId, contentLength)) { return; } else { if (fVerbosityLevel >= 1) { envir() << "Received incoming RTSP request: " << fResponseBuffer << "\n"; } char tmpBuf[2*RTSP_PARAM_STRING_MAX]; snprintf((char*)tmpBuf, sizeof tmpBuf, "RTSP/1.0 405 Method Not Allowed\r\nCSeq: %s\r\n\r\n", cseq); send(fOutputSocketNum, tmpBuf, strlen(tmpBuf), 0); } } Boolean RTSPClient::checkForHeader(char const* line, char const* headerName, unsigned headerNameLength, char const*& headerParams) { if (_strncasecmp(line, headerName, headerNameLength) != 0) return False; // The line begins with the desired header name. Trim off any whitespace, and return the header parameters: unsigned paramIndex = headerNameLength; while (line[paramIndex] != '\0' && (line[paramIndex] == ' ' || line[paramIndex] == '\t')) ++paramIndex; if (line[paramIndex] == '\0') return False; // the header is assumed to be bad if it has no parameters headerParams = &line[paramIndex]; return True; } Boolean RTSPClient::parseTransportParams(char const* paramsStr, char*& serverAddressStr, portNumBits& serverPortNum, unsigned char& rtpChannelId, unsigned char& rtcpChannelId) { // Initialize the return parameters to 'not found' values: serverAddressStr = NULL; serverPortNum = 0; rtpChannelId = rtcpChannelId = 0xFF; if (paramsStr == NULL) return False; char* foundServerAddressStr = NULL; Boolean foundServerPortNum = False; portNumBits clientPortNum = 0; Boolean foundClientPortNum = False; Boolean foundChannelIds = False; unsigned rtpCid, rtcpCid; Boolean isMulticast = True; // by default char* foundDestinationStr = NULL; portNumBits multicastPortNumRTP, multicastPortNumRTCP; Boolean foundMulticastPortNum = False; // Run through each of the parameters, looking for ones that we handle: char const* fields = paramsStr; char* field = strDupSize(fields); while (sscanf(fields, "%[^;]", field) == 1) { if (sscanf(field, "server_port=%hu", &serverPortNum) == 1) { foundServerPortNum = True; } else if (sscanf(field, "client_port=%hu", &clientPortNum) == 1) { foundClientPortNum = True; } else if (_strncasecmp(field, "source=", 7) == 0) { delete[] foundServerAddressStr; foundServerAddressStr = strDup(field+7); } else if (sscanf(field, "interleaved=%u-%u", &rtpCid, &rtcpCid) == 2) { rtpChannelId = (unsigned char)rtpCid; rtcpChannelId = (unsigned char)rtcpCid; foundChannelIds = True; } else if (strcmp(field, "unicast") == 0) { isMulticast = False; } else if (_strncasecmp(field, "destination=", 12) == 0) { delete[] foundDestinationStr; foundDestinationStr = strDup(field+12); } else if (sscanf(field, "port=%hu-%hu", &multicastPortNumRTP, &multicastPortNumRTCP) == 2 || sscanf(field, "port=%hu", &multicastPortNumRTP) == 1) { foundMulticastPortNum = True; } fields += strlen(field); while (fields[0] == ';') ++fields; // skip over all leading ';' chars if (fields[0] == '\0') break; } delete[] field; // If we're multicast, and have a "destination=" (multicast) address, then use this // as the 'server' address (because some weird servers don't specify the multicast // address earlier, in the "DESCRIBE" response's SDP: if (isMulticast && foundDestinationStr != NULL && foundMulticastPortNum) { delete[] foundServerAddressStr; serverAddressStr = foundDestinationStr; serverPortNum = multicastPortNumRTP; return True; } delete[] foundDestinationStr; // We have a valid "Transport:" header if any of the following are true: // - We saw a "interleaved=" field, indicating RTP/RTCP-over-TCP streaming, or // - We saw a "server_port=" field, or // - We saw a "client_port=" field. // If we didn't also see a "server_port=" field, then the server port is assumed to be the same as the client port. if (foundChannelIds || foundServerPortNum || foundClientPortNum) { if (foundClientPortNum && !foundServerPortNum) { serverPortNum = clientPortNum; } serverAddressStr = foundServerAddressStr; return True; } delete[] foundServerAddressStr; return False; } Boolean RTSPClient::parseScaleParam(char const* paramStr, float& scale) { Locale l("C", Numeric); return sscanf(paramStr, "%f", &scale) == 1; } Boolean RTSPClient::parseRTPInfoParams(char const*& paramsStr, u_int16_t& seqNum, u_int32_t& timestamp) { if (paramsStr == NULL || paramsStr[0] == '\0') return False; while (paramsStr[0] == ',') ++paramsStr; // "paramsStr" now consists of a ';'-separated list of parameters, ending with ',' or '\0'. char* field = strDupSize(paramsStr); Boolean sawSeq = False, sawRtptime = False; while (sscanf(paramsStr, "%[^;,]", field) == 1) { if (sscanf(field, "seq=%hu", &seqNum) == 1) { sawSeq = True; } else if (sscanf(field, "rtptime=%u", ×tamp) == 1) { sawRtptime = True; } paramsStr += strlen(field); if (paramsStr[0] == '\0' || paramsStr[0] == ',') break; // ASSERT: paramsStr[0] == ';' ++paramsStr; // skip over the ';' } delete[] field; // For the "RTP-Info:" parameters to be useful to us, we need to have seen both the "seq=" and "rtptime=" parameters: return sawSeq && sawRtptime; } Boolean RTSPClient::handleSETUPResponse(MediaSubsession& subsession, char const* sessionParamsStr, char const* transportParamsStr, Boolean streamUsingTCP) { char* sessionId = new char[responseBufferSize]; // ensures we have enough space Boolean success = False; do { // Check for a session id: if (sessionParamsStr == NULL || sscanf(sessionParamsStr, "%[^;]", sessionId) != 1) { envir().setResultMsg("Missing or bad \"Session:\" header"); break; } subsession.setSessionId(sessionId); delete[] fLastSessionId; fLastSessionId = strDup(sessionId); // Also look for an optional "; timeout = " parameter following this: char const* afterSessionId = sessionParamsStr + strlen(sessionId); int timeoutVal; if (sscanf(afterSessionId, "; timeout = %d", &timeoutVal) == 1) { fSessionTimeoutParameter = timeoutVal; } // Parse the "Transport:" header parameters: char* serverAddressStr; portNumBits serverPortNum; unsigned char rtpChannelId, rtcpChannelId; if (!parseTransportParams(transportParamsStr, serverAddressStr, serverPortNum, rtpChannelId, rtcpChannelId)) { envir().setResultMsg("Missing or bad \"Transport:\" header"); break; } delete[] subsession.connectionEndpointName(); subsession.connectionEndpointName() = serverAddressStr; subsession.serverPortNum = serverPortNum; subsession.rtpChannelId = rtpChannelId; subsession.rtcpChannelId = rtcpChannelId; if (streamUsingTCP) { // Tell the subsession to receive RTP (and send/receive RTCP) over the RTSP stream: if (subsession.rtpSource() != NULL) { subsession.rtpSource()->setStreamSocket(fInputSocketNum, subsession.rtpChannelId); // So that we continue to receive & handle RTSP commands and responses from the server subsession.rtpSource()->enableRTCPReports() = False; // To avoid confusing the server (which won't start handling RTP/RTCP-over-TCP until "PLAY"), don't send RTCP "RR"s yet } if (subsession.rtcpInstance() != NULL) subsession.rtcpInstance()->setStreamSocket(fInputSocketNum, subsession.rtcpChannelId); RTPInterface::setServerRequestAlternativeByteHandler(envir(), fInputSocketNum, handleAlternativeRequestByte, this); } else { // Normal case. // Set the RTP and RTCP sockets' destination address and port from the information in the SETUP response (if present): netAddressBits destAddress = subsession.connectionEndpointAddress(); if (destAddress == 0) destAddress = fServerAddress; subsession.setDestinations(destAddress); } success = True; } while (0); delete[] sessionId; return success; } Boolean RTSPClient::handlePLAYResponse(MediaSession& session, MediaSubsession& subsession, char const* scaleParamsStr, char const* rangeParamsStr, char const* rtpInfoParamsStr) { Boolean scaleOK = False, rangeOK = False; do { if (&session != NULL) { // The command was on the whole session if (scaleParamsStr != NULL && !parseScaleParam(scaleParamsStr, session.scale())) break; scaleOK = True; if (rangeParamsStr != NULL && !parseRangeParam(rangeParamsStr, session.playStartTime(), session.playEndTime(), session._absStartTime(), session._absEndTime())) break; rangeOK = True; MediaSubsessionIterator iter(session); MediaSubsession* subsession; while ((subsession = iter.next()) != NULL) { u_int16_t seqNum; u_int32_t timestamp; subsession->rtpInfo.infoIsNew = False; if (parseRTPInfoParams(rtpInfoParamsStr, seqNum, timestamp)) { subsession->rtpInfo.seqNum = seqNum; subsession->rtpInfo.timestamp = timestamp; subsession->rtpInfo.infoIsNew = True; } if (subsession->rtpSource() != NULL) subsession->rtpSource()->enableRTCPReports() = True; // start sending RTCP "RR"s now } } else { // The command was on a subsession if (scaleParamsStr != NULL && !parseScaleParam(scaleParamsStr, subsession.scale())) break; scaleOK = True; if (rangeParamsStr != NULL && !parseRangeParam(rangeParamsStr, subsession._playStartTime(), subsession._playEndTime(), subsession._absStartTime(), subsession._absEndTime())) break; rangeOK = True; u_int16_t seqNum; u_int32_t timestamp; subsession.rtpInfo.infoIsNew = False; if (parseRTPInfoParams(rtpInfoParamsStr, seqNum, timestamp)) { subsession.rtpInfo.seqNum = seqNum; subsession.rtpInfo.timestamp = timestamp; subsession.rtpInfo.infoIsNew = True; } if (subsession.rtpSource() != NULL) subsession.rtpSource()->enableRTCPReports() = True; // start sending RTCP "RR"s now } return True; } while (0); // An error occurred: if (!scaleOK) { envir().setResultMsg("Bad \"Scale:\" header"); } else if (!rangeOK) { envir().setResultMsg("Bad \"Range:\" header"); } else { envir().setResultMsg("Bad \"RTP-Info:\" header"); } return False; } Boolean RTSPClient::handleTEARDOWNResponse(MediaSession& /*session*/, MediaSubsession& /*subsession*/) { // Because we don't expect to always get a response to "TEARDOWN", we don't need to do anything if we do get one: return True; } Boolean RTSPClient::handleGET_PARAMETERResponse(char const* parameterName, char*& resultValueString) { do { // If "parameterName" is non-empty, it may be (possibly followed by ':' and whitespace) at the start of the result string: if (parameterName != NULL && parameterName[0] != '\0') { if (parameterName[1] == '\0') break; // sanity check; there should have been \r\n at the end of "parameterName" unsigned parameterNameLen = strlen(parameterName); // ASSERT: parameterNameLen >= 2; parameterNameLen -= 2; // because of the trailing \r\n if (_strncasecmp(resultValueString, parameterName, parameterNameLen) == 0) { resultValueString += parameterNameLen; if (resultValueString[0] == ':') ++resultValueString; while (resultValueString[0] == ' ' || resultValueString[0] == '\t') ++resultValueString; } } // The rest of "resultValueStr" should be our desired result, but first trim off any \r and/or \n characters at the end: unsigned resultLen = strlen(resultValueString); while (resultLen > 0 && (resultValueString[resultLen-1] == '\r' || resultValueString[resultLen-1] == '\n')) --resultLen; resultValueString[resultLen] = '\0'; return True; } while (0); // An error occurred: envir().setResultMsg("Bad \"GET_PARAMETER\" response"); return False; } Boolean RTSPClient::handleAuthenticationFailure(char const* paramsStr) { if (paramsStr == NULL) return False; // There was no "WWW-Authenticate:" header; we can't proceed. // Fill in "fCurrentAuthenticator" with the information from the "WWW-Authenticate:" header: Boolean alreadyHadRealm = fCurrentAuthenticator.realm() != NULL; char* realm = strDupSize(paramsStr); char* nonce = strDupSize(paramsStr); Boolean success = True; if (sscanf(paramsStr, "Digest realm=\"%[^\"]\", nonce=\"%[^\"]\"", realm, nonce) == 2) { fCurrentAuthenticator.setRealmAndNonce(realm, nonce); } else if (sscanf(paramsStr, "Basic realm=\"%[^\"]\"", realm) == 1) { fCurrentAuthenticator.setRealmAndNonce(realm, NULL); // Basic authentication } else { success = False; // bad "WWW-Authenticate:" header } delete[] realm; delete[] nonce; if (alreadyHadRealm || fCurrentAuthenticator.username() == NULL || fCurrentAuthenticator.password() == NULL) { // We already had a 'realm', or don't have a username and/or password, // so the new "WWW-Authenticate:" header information won't help us. We remain unauthenticated. success = False; } return success; } Boolean RTSPClient::resendCommand(RequestRecord* request) { if (fVerbosityLevel >= 1) envir() << "Resending...\n"; if (request != NULL && strcmp(request->commandName(), "GET") != 0) request->cseq() = ++fCSeq; return sendRequest(request) != 0; } char const* RTSPClient::sessionURL(MediaSession const& session) const { char const* url = session.controlPath(); if (url == NULL || strcmp(url, "*") == 0) url = fBaseURL; return url; } void RTSPClient::handleAlternativeRequestByte(void* rtspClient, u_int8_t requestByte) { ((RTSPClient*)rtspClient)->handleAlternativeRequestByte1(requestByte); } void RTSPClient::handleAlternativeRequestByte1(u_int8_t requestByte) { if (requestByte == 0xFF) { // Hack: The new handler of the input TCP socket encountered an error reading it. Indicate this: handleResponseBytes(-1); } else if (requestByte == 0xFE) { // Another hack: The new handler of the input TCP socket no longer needs it, so take back control: envir().taskScheduler().setBackgroundHandling(fInputSocketNum, SOCKET_READABLE|SOCKET_EXCEPTION, (TaskScheduler::BackgroundHandlerProc*)&incomingDataHandler, this); } else { // Normal case: fResponseBuffer[fResponseBytesAlreadySeen] = requestByte; handleResponseBytes(1); } } static Boolean isAbsoluteURL(char const* url) { // Assumption: "url" is absolute if it contains a ':', before any // occurrence of '/' while (*url != '\0' && *url != '/') { if (*url == ':') return True; ++url; } return False; } void RTSPClient::constructSubsessionURL(MediaSubsession const& subsession, char const*& prefix, char const*& separator, char const*& suffix) { // Figure out what the URL describing "subsession" will look like. // The URL is returned in three parts: prefix; separator; suffix //##### NOTE: This code doesn't really do the right thing if "sessionURL()" // doesn't end with a "/", and "subsession.controlPath()" is relative. // The right thing would have been to truncate "sessionURL()" back to the // rightmost "/", and then add "subsession.controlPath()". // In practice, though, each "DESCRIBE" response typically contains // a "Content-Base:" header that consists of "sessionURL()" followed by // a "/", in which case this code ends up giving the correct result. // However, we should really fix this code to do the right thing, and // also check for and use the "Content-Base:" header appropriately. ##### prefix = sessionURL(subsession.parentSession()); if (prefix == NULL) prefix = ""; suffix = subsession.controlPath(); if (suffix == NULL) suffix = ""; if (isAbsoluteURL(suffix)) { prefix = separator = ""; } else { unsigned prefixLen = strlen(prefix); separator = (prefixLen == 0 || prefix[prefixLen-1] == '/' || suffix[0] == '/') ? "" : "/"; } } Boolean RTSPClient::setupHTTPTunneling1() { // Set up RTSP-over-HTTP tunneling, as described in // http://developer.apple.com/quicktime/icefloe/dispatch028.html and http://images.apple.com/br/quicktime/pdf/QTSS_Modules.pdf if (fVerbosityLevel >= 1) { envir() << "Requesting RTSP-over-HTTP tunneling (on port " << fTunnelOverHTTPPortNum << ")\n\n"; } // Begin by sending a HTTP "GET", to set up the server->client link. Continue when we handle the response: return sendRequest(new RequestRecord(1, "GET", responseHandlerForHTTP_GET)) != 0; } void RTSPClient::responseHandlerForHTTP_GET(RTSPClient* rtspClient, int responseCode, char* responseString) { if (rtspClient != NULL) rtspClient->responseHandlerForHTTP_GET1(responseCode, responseString); } void RTSPClient::responseHandlerForHTTP_GET1(int responseCode, char* responseString) { RequestRecord* request; do { delete[] responseString; // we don't need it (but are responsible for deleting it) if (responseCode != 0) break; // The HTTP "GET" failed. // Having successfully set up (using the HTTP "GET" command) the server->client link, set up a second TCP connection // (to the same server & port as before) for the client->server link. All future output will be to this new socket. fOutputSocketNum = setupStreamSocket(envir(), 0); if (fOutputSocketNum < 0) break; ignoreSigPipeOnSocket(fOutputSocketNum); // so that servers on the same host that killed don't also kill us fHTTPTunnelingConnectionIsPending = True; int connectResult = connectToServer(fOutputSocketNum, fTunnelOverHTTPPortNum); if (connectResult < 0) break; // an error occurred else if (connectResult == 0) { // A connection is pending. Continue setting up RTSP-over-HTTP when the connection completes. // First, move the pending requests to the 'awaiting connection' queue: while ((request = fRequestsAwaitingHTTPTunneling.dequeue()) != NULL) { fRequestsAwaitingConnection.enqueue(request); } return; } // The connection succeeded. Continue setting up RTSP-over-HTTP: if (!setupHTTPTunneling2()) break; // RTSP-over-HTTP tunneling succeeded. Resume the pending request(s): while ((request = fRequestsAwaitingHTTPTunneling.dequeue()) != NULL) { sendRequest(request); } return; } while (0); // An error occurred. Dequeue the pending request(s), and tell them about the error: fHTTPTunnelingConnectionIsPending = False; resetTCPSockets(); // do this now, in case an error handler deletes "this" RequestQueue requestQueue(fRequestsAwaitingHTTPTunneling); while ((request = requestQueue.dequeue()) != NULL) { handleRequestError(request); delete request; } } Boolean RTSPClient::setupHTTPTunneling2() { fHTTPTunnelingConnectionIsPending = False; // Send a HTTP "POST", to set up the client->server link. (Note that we won't see a reply to the "POST".) return sendRequest(new RequestRecord(1, "POST", NULL)) != 0; } void RTSPClient::connectionHandler(void* instance, int /*mask*/) { RTSPClient* client = (RTSPClient*)instance; client->connectionHandler1(); } void RTSPClient::connectionHandler1() { // Restore normal handling on our sockets: envir().taskScheduler().disableBackgroundHandling(fOutputSocketNum); envir().taskScheduler().setBackgroundHandling(fInputSocketNum, SOCKET_READABLE|SOCKET_EXCEPTION, (TaskScheduler::BackgroundHandlerProc*)&incomingDataHandler, this); // Move all requests awaiting connection into a new, temporary queue, to clear "fRequestsAwaitingConnection" // (so that "sendRequest()" doesn't get confused by "fRequestsAwaitingConnection" being nonempty, and enqueue them all over again). RequestQueue tmpRequestQueue(fRequestsAwaitingConnection); RequestRecord* request; // Find out whether the connection succeeded or failed: do { int err = 0; SOCKLEN_T len = sizeof err; if (getsockopt(fInputSocketNum, SOL_SOCKET, SO_ERROR, (char*)&err, &len) < 0 || err != 0) { envir().setResultErrMsg("Connection to server failed: ", err); if (fVerbosityLevel >= 1) envir() << "..." << envir().getResultMsg() << "\n"; break; } // The connection succeeded. If the connection came about from an attempt to set up RTSP-over-HTTP, finish this now: if (fVerbosityLevel >= 1) envir() << "...remote connection opened\n"; if (fHTTPTunnelingConnectionIsPending && !setupHTTPTunneling2()) break; // Resume sending all pending requests: while ((request = tmpRequestQueue.dequeue()) != NULL) { sendRequest(request); } return; } while (0); // An error occurred. Tell all pending requests about the error: resetTCPSockets(); // do this now, in case an error handler deletes "this" while ((request = tmpRequestQueue.dequeue()) != NULL) { handleRequestError(request); delete request; } } void RTSPClient::incomingDataHandler(void* instance, int /*mask*/) { RTSPClient* client = (RTSPClient*)instance; client->incomingDataHandler1(); } void RTSPClient::incomingDataHandler1() { struct sockaddr_in dummy; // 'from' address - not used int bytesRead = readSocket(envir(), fInputSocketNum, (unsigned char*)&fResponseBuffer[fResponseBytesAlreadySeen], fResponseBufferBytesLeft, dummy); handleResponseBytes(bytesRead); } static char* getLine(char* startOfLine) { // returns the start of the next line, or NULL if none. Note that this modifies the input string to add '\0' characters. for (char* ptr = startOfLine; *ptr != '\0'; ++ptr) { // Check for the end of line: \r\n (but also accept \r or \n by itself): if (*ptr == '\r' || *ptr == '\n') { // We found the end of the line if (*ptr == '\r') { *ptr++ = '\0'; if (*ptr == '\n') ++ptr; } else { *ptr++ = '\0'; } return ptr; } } return NULL; } void RTSPClient::handleResponseBytes(int newBytesRead) { do { if (newBytesRead >= 0 && (unsigned)newBytesRead < fResponseBufferBytesLeft) break; // data was read OK; process it below if (newBytesRead >= (int)fResponseBufferBytesLeft) { // We filled up our response buffer. Treat this as an error (for the first response handler): envir().setResultMsg("RTSP response was truncated. Increase \"RTSPClient::responseBufferSize\""); } // An error occurred while reading our TCP socket. Call all pending response handlers, indicating this error. // (However, the "RTSP response was truncated" error is applied to the first response handler only.) resetResponseBuffer(); RequestRecord* request; if (newBytesRead > 0) { // The "RTSP response was truncated" error if ((request = fRequestsAwaitingResponse.dequeue()) != NULL) { handleRequestError(request); delete request; } } else { RequestQueue requestQueue(fRequestsAwaitingResponse); resetTCPSockets(); // do this now, in case an error handler deletes "this" while ((request = requestQueue.dequeue()) != NULL) { handleRequestError(request); delete request; } } return; } while (0); fResponseBufferBytesLeft -= newBytesRead; fResponseBytesAlreadySeen += newBytesRead; fResponseBuffer[fResponseBytesAlreadySeen] = '\0'; if (fVerbosityLevel >= 1 && newBytesRead > 1) envir() << "Received " << newBytesRead << " new bytes of response data.\n"; unsigned numExtraBytesAfterResponse = 0; Boolean responseSuccess = False; // by default do { // Data was read OK. Look through the data that we've read so far, to see if it contains . // (If not, wait for more data to arrive.) Boolean endOfHeaders = False; char const* ptr = fResponseBuffer; if (fResponseBytesAlreadySeen > 3) { char const* const ptrEnd = &fResponseBuffer[fResponseBytesAlreadySeen-3]; while (ptr < ptrEnd) { if (*ptr++ == '\r' && *ptr++ == '\n' && *ptr++ == '\r' && *ptr++ == '\n') { // This is it endOfHeaders = True; break; } } } if (!endOfHeaders) return; // subsequent reads will be needed to get the complete response // Now that we have the complete response headers (ending with ), parse them to get the response code, CSeq, // and various other header parameters. To do this, we first make a copy of the received header data, because we'll be // modifying it by adding '\0' bytes. char* headerDataCopy; unsigned responseCode = 200; char const* responseStr = NULL; RequestRecord* foundRequest = NULL; char const* sessionParamsStr = NULL; char const* transportParamsStr = NULL; char const* scaleParamsStr = NULL; char const* rangeParamsStr = NULL; char const* rtpInfoParamsStr = NULL; char const* wwwAuthenticateParamsStr = NULL; char const* publicParamsStr = NULL; char* bodyStart = NULL; unsigned numBodyBytes = 0; responseSuccess = False; do { headerDataCopy = new char[responseBufferSize]; strncpy(headerDataCopy, fResponseBuffer, fResponseBytesAlreadySeen); headerDataCopy[fResponseBytesAlreadySeen] = '\0'; char* lineStart = headerDataCopy; char* nextLineStart = getLine(lineStart); if (!parseResponseCode(lineStart, responseCode, responseStr)) { // This does not appear to be a RTSP response; perhaps it's a RTSP request instead? handleIncomingRequest(); break; // we're done with this data } // Scan through the headers, handling the ones that we're interested in: Boolean reachedEndOfHeaders; unsigned cseq = 0; unsigned contentLength = 0; while (1) { reachedEndOfHeaders = True; // by default; may get changed below lineStart = nextLineStart; if (lineStart == NULL) break; nextLineStart = getLine(lineStart); if (lineStart[0] == '\0') break; // this is a blank line reachedEndOfHeaders = False; char const* headerParamsStr; if (checkForHeader(lineStart, "CSeq:", 5, headerParamsStr)) { if (sscanf(headerParamsStr, "%u", &cseq) != 1 || cseq <= 0) { envir().setResultMsg("Bad \"CSeq:\" header: \"", lineStart, "\""); break; } // Find the handler function for "cseq": RequestRecord* request; while ((request = fRequestsAwaitingResponse.dequeue()) != NULL) { if (request->cseq() < cseq) { // assumes that the CSeq counter will never wrap around // We never received (and will never receive) a response for this handler, so delete it: if (fVerbosityLevel >= 1 && strcmp(request->commandName(), "POST") != 0) { envir() << "WARNING: The server did not respond to our \"" << request->commandName() << "\" request (CSeq: " << request->cseq() << "). The server appears to be buggy (perhaps not handling pipelined requests properly).\n"; } delete request; } else if (request->cseq() == cseq) { // This is the handler that we want. Remove its record, but remember it, so that we can later call its handler: foundRequest = request; break; } else { // request->cseq() > cseq // No handler was registered for this response, so ignore it. break; } } } else if (checkForHeader(lineStart, "Content-Length:", 15, headerParamsStr)) { if (sscanf(headerParamsStr, "%u", &contentLength) != 1) { envir().setResultMsg("Bad \"Content-Length:\" header: \"", lineStart, "\""); break; } } else if (checkForHeader(lineStart, "Content-Base:", 13, headerParamsStr)) { setBaseURL(headerParamsStr); } else if (checkForHeader(lineStart, "Session:", 8, sessionParamsStr)) { } else if (checkForHeader(lineStart, "Transport:", 10, transportParamsStr)) { } else if (checkForHeader(lineStart, "Scale:", 6, scaleParamsStr)) { } else if (checkForHeader(lineStart, "Range:", 6, rangeParamsStr)) { } else if (checkForHeader(lineStart, "RTP-Info:", 9, rtpInfoParamsStr)) { } else if (checkForHeader(lineStart, "WWW-Authenticate:", 17, headerParamsStr)) { // If we've already seen a "WWW-Authenticate:" header, then we replace it with this new one only if // the new one specifies "Digest" authentication: if (wwwAuthenticateParamsStr == NULL || _strncasecmp(headerParamsStr, "Digest", 6) == 0) { wwwAuthenticateParamsStr = headerParamsStr; } } else if (checkForHeader(lineStart, "Public:", 7, publicParamsStr)) { } else if (checkForHeader(lineStart, "Allow:", 6, publicParamsStr)) { // Note: we accept "Allow:" instead of "Public:", so that "OPTIONS" requests made to HTTP servers will work. } else if (checkForHeader(lineStart, "Location:", 9, headerParamsStr)) { setBaseURL(headerParamsStr); } } if (!reachedEndOfHeaders) break; // an error occurred if (foundRequest == NULL) { // Hack: The response didn't have a "CSeq:" header; assume it's for our most recent request: foundRequest = fRequestsAwaitingResponse.dequeue(); } // If we saw a "Content-Length:" header, then make sure that we have the amount of data that it specified: unsigned bodyOffset = nextLineStart == NULL ? fResponseBytesAlreadySeen : nextLineStart - headerDataCopy; bodyStart = &fResponseBuffer[bodyOffset]; numBodyBytes = fResponseBytesAlreadySeen - bodyOffset; if (contentLength > numBodyBytes) { // We need to read more data. First, make sure we have enough space for it: unsigned numExtraBytesNeeded = contentLength - numBodyBytes; unsigned remainingBufferSize = responseBufferSize - fResponseBytesAlreadySeen; if (numExtraBytesNeeded > remainingBufferSize) { char tmpBuf[200]; sprintf(tmpBuf, "Response buffer size (%d) is too small for \"Content-Length:\" %d (need a buffer size of >= %d bytes\n", responseBufferSize, contentLength, fResponseBytesAlreadySeen + numExtraBytesNeeded); envir().setResultMsg(tmpBuf); break; } if (fVerbosityLevel >= 1) { envir() << "Have received " << fResponseBytesAlreadySeen << " total bytes of a " << (foundRequest != NULL ? foundRequest->commandName() : "(unknown)") << " RTSP response; awaiting " << numExtraBytesNeeded << " bytes more.\n"; } delete[] headerDataCopy; if (foundRequest != NULL) fRequestsAwaitingResponse.putAtHead(foundRequest);// put our request record back; we need it again return; // We need to read more data } // We now have a complete response (including all bytes specified by the "Content-Length:" header, if any). char* responseEnd = bodyStart + contentLength; numExtraBytesAfterResponse = &fResponseBuffer[fResponseBytesAlreadySeen] - responseEnd; if (fVerbosityLevel >= 1) { char saved = *responseEnd; *responseEnd = '\0'; envir() << "Received a complete " << (foundRequest != NULL ? foundRequest->commandName() : "(unknown)") << " response:\n" << fResponseBuffer << "\n"; if (numExtraBytesAfterResponse > 0) envir() << "\t(plus " << numExtraBytesAfterResponse << " additional bytes)\n"; *responseEnd = saved; } if (foundRequest != NULL) { Boolean needToResendCommand = False; // by default... if (responseCode == 200) { // Do special-case response handling for some commands: if (strcmp(foundRequest->commandName(), "SETUP") == 0) { if (!handleSETUPResponse(*foundRequest->subsession(), sessionParamsStr, transportParamsStr, foundRequest->booleanFlags()&0x1)) break; } else if (strcmp(foundRequest->commandName(), "PLAY") == 0) { if (!handlePLAYResponse(*foundRequest->session(), *foundRequest->subsession(), scaleParamsStr, rangeParamsStr, rtpInfoParamsStr)) break; } else if (strcmp(foundRequest->commandName(), "TEARDOWN") == 0) { if (!handleTEARDOWNResponse(*foundRequest->session(), *foundRequest->subsession())) break; } else if (strcmp(foundRequest->commandName(), "GET_PARAMETER") == 0) { if (!handleGET_PARAMETERResponse(foundRequest->contentStr(), bodyStart)) break; } } else if (responseCode == 401 && handleAuthenticationFailure(wwwAuthenticateParamsStr)) { // We need to resend the command, with an "Authorization:" header: needToResendCommand = True; if (strcmp(foundRequest->commandName(), "GET") == 0) { // Note: If a HTTP "GET" command (for RTSP-over-HTTP tunneling) returns "401 Unauthorized", then we resend it // (with an "Authorization:" header), just as we would for a RTSP command. However, we do so using a new TCP connection, // because some servers close the original connection after returning the "401 Unauthorized". resetTCPSockets(); // forces the opening of a new connection for the resent command } } else if (responseCode == 301 || responseCode == 302) { // redirection resetTCPSockets(); // because we need to connect somewhere else next needToResendCommand = True; } if (needToResendCommand) { resetResponseBuffer(); if (!resendCommand(foundRequest)) break; delete[] headerDataCopy; return; // without calling our response handler; the response to the resent command will do that } } responseSuccess = True; } while (0); // If we have a handler function for this response, call it. // But first, reset our response buffer, in case the handler goes to the event loop, and we end up getting called recursively: if (numExtraBytesAfterResponse > 0) { // An unusual case; usually due to having received pipelined responses. Move the extra bytes to the front of the buffer: char* responseEnd = &fResponseBuffer[fResponseBytesAlreadySeen - numExtraBytesAfterResponse]; // But first: A hack to save a copy of the response 'body', in case it's needed below for "resultString": numBodyBytes -= numExtraBytesAfterResponse; if (numBodyBytes > 0) { char saved = *responseEnd; *responseEnd = '\0'; bodyStart = strDup(bodyStart); *responseEnd = saved; } memmove(fResponseBuffer, responseEnd, numExtraBytesAfterResponse); fResponseBytesAlreadySeen = numExtraBytesAfterResponse; fResponseBufferBytesLeft = responseBufferSize - numExtraBytesAfterResponse; fResponseBuffer[numExtraBytesAfterResponse] = '\0'; } else { resetResponseBuffer(); } if (foundRequest != NULL && foundRequest->handler() != NULL) { int resultCode; char* resultString; if (responseSuccess) { if (responseCode == 200) { resultCode = 0; resultString = numBodyBytes > 0 ? strDup(bodyStart) : strDup(publicParamsStr); // Note: The "strDup(bodyStart)" call assumes that the body is encoded without interior '\0' bytes } else { resultCode = responseCode; resultString = strDup(responseStr); envir().setResultMsg(responseStr); } (*foundRequest->handler())(this, resultCode, resultString); } else { // An error occurred parsing the response, so call the handler, indicating an error: handleRequestError(foundRequest); } } delete foundRequest; delete[] headerDataCopy; if (numExtraBytesAfterResponse > 0 && numBodyBytes > 0) delete[] bodyStart; } while (numExtraBytesAfterResponse > 0 && responseSuccess); } ////////// RTSPClient::RequestRecord implementation ////////// RTSPClient::RequestRecord::RequestRecord(unsigned cseq, char const* commandName, responseHandler* handler, MediaSession* session, MediaSubsession* subsession, u_int32_t booleanFlags, double start, double end, float scale, char const* contentStr) : fNext(NULL), fCSeq(cseq), fCommandName(commandName), fSession(session), fSubsession(subsession), fBooleanFlags(booleanFlags), fStart(start), fEnd(end), fAbsStartTime(NULL), fAbsEndTime(NULL), fScale(scale), fContentStr(strDup(contentStr)), fHandler(handler) { } RTSPClient::RequestRecord::RequestRecord(unsigned cseq, responseHandler* handler, char const* absStartTime, char const* absEndTime, float scale, MediaSession* session, MediaSubsession* subsession) : fNext(NULL), fCSeq(cseq), fCommandName("PLAY"), fSession(session), fSubsession(subsession), fBooleanFlags(0), fStart(0.0f), fEnd(-1.0f), fAbsStartTime(strDup(absStartTime)), fAbsEndTime(strDup(absEndTime)), fScale(scale), fContentStr(NULL), fHandler(handler) { } RTSPClient::RequestRecord::~RequestRecord() { // Delete the rest of the list first: delete fNext; delete[] fAbsStartTime; delete[] fAbsEndTime; delete[] fContentStr; } ////////// RTSPClient::RequestQueue implementation ////////// RTSPClient::RequestQueue::RequestQueue() : fHead(NULL), fTail(NULL) { } RTSPClient::RequestQueue::RequestQueue(RequestQueue& origQueue) : fHead(NULL), fTail(NULL) { RequestRecord* request; while ((request = origQueue.dequeue()) != NULL) { enqueue(request); } } RTSPClient::RequestQueue::~RequestQueue() { delete fHead; } void RTSPClient::RequestQueue::enqueue(RequestRecord* request) { if (fTail == NULL) { fHead = request; } else { fTail->next() = request; } fTail = request; } RTSPClient::RequestRecord* RTSPClient::RequestQueue::dequeue() { RequestRecord* request = fHead; if (fHead == fTail) { fHead = NULL; fTail = NULL; } else { fHead = fHead->next(); } if (request != NULL) request->next() = NULL; return request; } void RTSPClient::RequestQueue::putAtHead(RequestRecord* request) { request->next() = fHead; fHead = request; if (fTail == NULL) { fTail = request; } } RTSPClient::RequestRecord* RTSPClient::RequestQueue::findByCSeq(unsigned cseq) { RequestRecord* request; for (request = fHead; request != NULL; request = request->next()) { if (request->cseq() == cseq) return request; } return NULL; } ////////// HandlerServerForREGISTERCommand implementation ///////// HandlerServerForREGISTERCommand* HandlerServerForREGISTERCommand ::createNew(UsageEnvironment& env, onRTSPClientCreationFunc* creationFunc, Port ourPort, UserAuthenticationDatabase* authDatabase, int verbosityLevel, char const* applicationName) { int ourSocket = setUpOurSocket(env, ourPort); if (ourSocket == -1) return NULL; return new HandlerServerForREGISTERCommand(env, creationFunc, ourSocket, ourPort, authDatabase, verbosityLevel, applicationName); } HandlerServerForREGISTERCommand ::HandlerServerForREGISTERCommand(UsageEnvironment& env, onRTSPClientCreationFunc* creationFunc, int ourSocket, Port ourPort, UserAuthenticationDatabase* authDatabase, int verbosityLevel, char const* applicationName) : RTSPServer(env, ourSocket, ourPort, authDatabase, 30/*small reclamationTestSeconds*/), fCreationFunc(creationFunc), fVerbosityLevel(verbosityLevel), fApplicationName(strDup(applicationName)) { } HandlerServerForREGISTERCommand::~HandlerServerForREGISTERCommand() { delete[] fApplicationName; } RTSPClient* HandlerServerForREGISTERCommand ::createNewRTSPClient(char const* rtspURL, int verbosityLevel, char const* applicationName, int socketNumToServer) { // Default implementation: create a basic "RTSPClient": return RTSPClient::createNew(envir(), rtspURL, verbosityLevel, applicationName, 0, socketNumToServer); } char const* HandlerServerForREGISTERCommand::allowedCommandNames() { return "OPTIONS, REGISTER"; } Boolean HandlerServerForREGISTERCommand::weImplementREGISTER(char const* proxyURLSuffix, char*& responseStr) { responseStr = NULL; return True; } void HandlerServerForREGISTERCommand::implementCmd_REGISTER(char const* url, char const* urlSuffix, int socketToRemoteServer, Boolean deliverViaTCP, char const* /*proxyURLSuffix*/) { // Create a new "RTSPClient" object, and call our 'creation function' with it: RTSPClient* newRTSPClient = createNewRTSPClient(url, fVerbosityLevel, fApplicationName, socketToRemoteServer); if (fCreationFunc != NULL) (*fCreationFunc)(newRTSPClient, deliverViaTCP); } live/liveMedia/RTSPCommon.cpp000444 001751 000000 00000030465 12265042432 016313 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // Common routines used by both RTSP clients and servers // Implementation #include "RTSPCommon.hh" #include "Locale.hh" #include #include #include // for "isxdigit() #include // for "strftime()" and "gmtime()" #if defined(__WIN32__) || defined(_WIN32) || defined(_QNX4) #else #include #define USE_SIGNALS 1 #endif static void decodeURL(char* url) { // Replace (in place) any % sequences with the appropriate 8-bit character. char* cursor = url; while (*cursor) { if ((cursor[0] == '%') && cursor[1] && isxdigit(cursor[1]) && cursor[2] && isxdigit(cursor[2])) { // We saw a % followed by 2 hex digits, so we copy the literal hex value into the URL, then advance the cursor past it: char hex[3]; hex[0] = cursor[1]; hex[1] = cursor[2]; hex[2] = '\0'; *url++ = (char)strtol(hex, NULL, 16); cursor += 3; } else { // Common case: This is a normal character or a bogus % expression, so just copy it *url++ = *cursor++; } } *url = '\0'; } Boolean parseRTSPRequestString(char const* reqStr, unsigned reqStrSize, char* resultCmdName, unsigned resultCmdNameMaxSize, char* resultURLPreSuffix, unsigned resultURLPreSuffixMaxSize, char* resultURLSuffix, unsigned resultURLSuffixMaxSize, char* resultCSeq, unsigned resultCSeqMaxSize, char* resultSessionIdStr, unsigned resultSessionIdStrMaxSize, unsigned& contentLength) { // This parser is currently rather dumb; it should be made smarter ##### // "Be liberal in what you accept": Skip over any whitespace at the start of the request: unsigned i; for (i = 0; i < reqStrSize; ++i) { char c = reqStr[i]; if (!(c == ' ' || c == '\t' || c == '\r' || c == '\n' || c == '\0')) break; } if (i == reqStrSize) return False; // The request consisted of nothing but whitespace! // Then read everything up to the next space (or tab) as the command name: Boolean parseSucceeded = False; unsigned i1 = 0; for (; i1 < resultCmdNameMaxSize-1 && i < reqStrSize; ++i,++i1) { char c = reqStr[i]; if (c == ' ' || c == '\t') { parseSucceeded = True; break; } resultCmdName[i1] = c; } resultCmdName[i1] = '\0'; if (!parseSucceeded) return False; // Skip over the prefix of any "rtsp://" or "rtsp:/" URL that follows: unsigned j = i+1; while (j < reqStrSize && (reqStr[j] == ' ' || reqStr[j] == '\t')) ++j; // skip over any additional white space for (; (int)j < (int)(reqStrSize-8); ++j) { if ((reqStr[j] == 'r' || reqStr[j] == 'R') && (reqStr[j+1] == 't' || reqStr[j+1] == 'T') && (reqStr[j+2] == 's' || reqStr[j+2] == 'S') && (reqStr[j+3] == 'p' || reqStr[j+3] == 'P') && reqStr[j+4] == ':' && reqStr[j+5] == '/') { j += 6; if (reqStr[j] == '/') { // This is a "rtsp://" URL; skip over the host:port part that follows: ++j; while (j < reqStrSize && reqStr[j] != '/' && reqStr[j] != ' ') ++j; } else { // This is a "rtsp:/" URL; back up to the "/": --j; } i = j; break; } } // Look for the URL suffix (before the following "RTSP/"): parseSucceeded = False; for (unsigned k = i+1; (int)k < (int)(reqStrSize-5); ++k) { if (reqStr[k] == 'R' && reqStr[k+1] == 'T' && reqStr[k+2] == 'S' && reqStr[k+3] == 'P' && reqStr[k+4] == '/') { while (--k >= i && reqStr[k] == ' ') {} // go back over all spaces before "RTSP/" unsigned k1 = k; while (k1 > i && reqStr[k1] != '/') --k1; // ASSERT: At this point // i: first space or slash after "host" or "host:port" // k: last non-space before "RTSP/" // k1: last slash in the range [i,k] // The URL suffix comes from [k1+1,k] // Copy "resultURLSuffix": unsigned n = 0, k2 = k1+1; if (k2 <= k) { if (k - k1 + 1 > resultURLSuffixMaxSize) return False; // there's no room while (k2 <= k) resultURLSuffix[n++] = reqStr[k2++]; } resultURLSuffix[n] = '\0'; // The URL 'pre-suffix' comes from [i+1,k1-1] // Copy "resultURLPreSuffix": n = 0; k2 = i+1; if (k2+1 <= k1) { if (k1 - i > resultURLPreSuffixMaxSize) return False; // there's no room while (k2 <= k1-1) resultURLPreSuffix[n++] = reqStr[k2++]; } resultURLPreSuffix[n] = '\0'; decodeURL(resultURLPreSuffix); i = k + 7; // to go past " RTSP/" parseSucceeded = True; break; } } if (!parseSucceeded) return False; // Look for "CSeq:" (mandatory, case insensitive), skip whitespace, // then read everything up to the next \r or \n as 'CSeq': parseSucceeded = False; for (j = i; (int)j < (int)(reqStrSize-5); ++j) { if (_strncasecmp("CSeq:", &reqStr[j], 5) == 0) { j += 5; while (j < reqStrSize && (reqStr[j] == ' ' || reqStr[j] == '\t')) ++j; unsigned n; for (n = 0; n < resultCSeqMaxSize-1 && j < reqStrSize; ++n,++j) { char c = reqStr[j]; if (c == '\r' || c == '\n') { parseSucceeded = True; break; } resultCSeq[n] = c; } resultCSeq[n] = '\0'; break; } } if (!parseSucceeded) return False; // Look for "Session:" (optional, case insensitive), skip whitespace, // then read everything up to the next \r or \n as 'Session': resultSessionIdStr[0] = '\0'; // default value (empty string) for (j = i; (int)j < (int)(reqStrSize-8); ++j) { if (_strncasecmp("Session:", &reqStr[j], 8) == 0) { j += 8; while (j < reqStrSize && (reqStr[j] == ' ' || reqStr[j] == '\t')) ++j; unsigned n; for (n = 0; n < resultSessionIdStrMaxSize-1 && j < reqStrSize; ++n,++j) { char c = reqStr[j]; if (c == '\r' || c == '\n') { break; } resultSessionIdStr[n] = c; } resultSessionIdStr[n] = '\0'; break; } } // Also: Look for "Content-Length:" (optional, case insensitive) contentLength = 0; // default value for (j = i; (int)j < (int)(reqStrSize-15); ++j) { if (_strncasecmp("Content-Length:", &(reqStr[j]), 15) == 0) { j += 15; while (j < reqStrSize && (reqStr[j] == ' ' || reqStr[j] == '\t')) ++j; unsigned num; if (sscanf(&reqStr[j], "%u", &num) == 1) { contentLength = num; } } } return True; } Boolean parseRangeParam(char const* paramStr, double& rangeStart, double& rangeEnd, char*& absStartTime, char*& absEndTime) { delete[] absStartTime; delete[] absEndTime; absStartTime = absEndTime = NULL; // by default, unless "paramStr" is a "clock=..." string double start, end; int numCharsMatched = 0; Locale l("C", Numeric); if (sscanf(paramStr, "npt = %lf - %lf", &start, &end) == 2) { rangeStart = start; rangeEnd = end; } else if (sscanf(paramStr, "npt = %lf -", &start) == 1) { if (start < 0.0) { // special case for "npt = -", which seems to match here: rangeStart = 0.0; rangeEnd = -start; } else { rangeStart = start; rangeEnd = 0.0; } } else if (strcmp(paramStr, "npt=now-") == 0) { rangeStart = 0.0; rangeEnd = 0.0; } else if (sscanf(paramStr, "clock = %n", &numCharsMatched) == 0 && numCharsMatched > 0) { rangeStart = rangeEnd = 0.0; char const* utcTimes = ¶mStr[numCharsMatched]; size_t len = strlen(utcTimes) + 1; char* as = new char[len]; char* ae = new char[len]; int sscanfResult = sscanf(utcTimes, "%[^-]-%s", as, ae); if (sscanfResult == 2) { absStartTime = as; absEndTime = ae; } else if (sscanfResult == 1) { absStartTime = as; delete[] ae; } else { delete[] as; delete[] ae; return False; } } else if (sscanf(paramStr, "smtpe = %n", &numCharsMatched) == 0 && numCharsMatched > 0) { // We accept "smtpe=" parameters, but currently do not interpret them. } else { return False; // The header is malformed } return True; } Boolean parseRangeHeader(char const* buf, double& rangeStart, double& rangeEnd, char*& absStartTime, char*& absEndTime) { // First, find "Range:" while (1) { if (*buf == '\0') return False; // not found if (_strncasecmp(buf, "Range: ", 7) == 0) break; ++buf; } // Then, run through each of the fields, looking for ones we handle: char const* fields = buf + 7; while (*fields == ' ') ++fields; return parseRangeParam(fields, rangeStart, rangeEnd, absStartTime, absEndTime); } Boolean parseScaleHeader(char const* buf, float& scale) { // Initialize the result parameter to a default value: scale = 1.0; // First, find "Scale:" while (1) { if (*buf == '\0') return False; // not found if (_strncasecmp(buf, "Scale:", 6) == 0) break; ++buf; } // Then, run through each of the fields, looking for ones we handle: char const* fields = buf + 6; while (*fields == ' ') ++fields; float sc; if (sscanf(fields, "%f", &sc) == 1) { scale = sc; } else { return False; // The header is malformed } return True; } // Used to implement "RTSPOptionIsSupported()": static Boolean isSeparator(char c) { return c == ' ' || c == ',' || c == ';' || c == ':'; } Boolean RTSPOptionIsSupported(char const* commandName, char const* optionsResponseString) { do { if (commandName == NULL || optionsResponseString == NULL) break; unsigned const commandNameLen = strlen(commandName); if (commandNameLen == 0) break; // "optionsResponseString" is assumed to be a list of command names, separated by " " and/or ",", ";", or ":" // Scan through these, looking for "commandName". while (1) { // Skip over separators: while (*optionsResponseString != '\0' && isSeparator(*optionsResponseString)) ++optionsResponseString; if (*optionsResponseString == '\0') break; // At this point, "optionsResponseString" begins with a command name (with perhaps a separator afterwads). if (strncmp(commandName, optionsResponseString, commandNameLen) == 0) { // We have at least a partial match here. optionsResponseString += commandNameLen; if (*optionsResponseString == '\0' || isSeparator(*optionsResponseString)) return True; } // No match. Skip over the rest of the command name: while (*optionsResponseString != '\0' && !isSeparator(*optionsResponseString)) ++optionsResponseString; } } while (0); return False; } char const* dateHeader() { static char buf[200]; #if !defined(_WIN32_WCE) time_t tt = time(NULL); strftime(buf, sizeof buf, "Date: %a, %b %d %Y %H:%M:%S GMT\r\n", gmtime(&tt)); #else // WinCE apparently doesn't have "time()", "strftime()", or "gmtime()", // so generate the "Date:" header a different, WinCE-specific way. // (Thanks to Pierre l'Hussiez for this code) // RSF: But where is the "Date: " string? This code doesn't look quite right... SYSTEMTIME SystemTime; GetSystemTime(&SystemTime); WCHAR dateFormat[] = L"ddd, MMM dd yyyy"; WCHAR timeFormat[] = L"HH:mm:ss GMT\r\n"; WCHAR inBuf[200]; DWORD locale = LOCALE_NEUTRAL; int ret = GetDateFormat(locale, 0, &SystemTime, (LPTSTR)dateFormat, (LPTSTR)inBuf, sizeof inBuf); inBuf[ret - 1] = ' '; ret = GetTimeFormat(locale, 0, &SystemTime, (LPTSTR)timeFormat, (LPTSTR)inBuf + ret, (sizeof inBuf) - ret); wcstombs(buf, inBuf, wcslen(inBuf)); #endif return buf; } void ignoreSigPipeOnSocket(int socketNum) { #ifdef USE_SIGNALS #ifdef SO_NOSIGPIPE int set_option = 1; setsockopt(socketNum, SOL_SOCKET, SO_NOSIGPIPE, &set_option, sizeof set_option); #else signal(SIGPIPE, SIG_IGN); #endif #endif } live/liveMedia/RTCP.cpp000444 001751 000000 00000101451 12265042432 015114 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // RTCP // Implementation #include "RTCP.hh" #include "GroupsockHelper.hh" #include "rtcp_from_spec.h" ////////// RTCPMemberDatabase ////////// class RTCPMemberDatabase { public: RTCPMemberDatabase(RTCPInstance& ourRTCPInstance) : fOurRTCPInstance(ourRTCPInstance), fNumMembers(1 /*ourself*/), fTable(HashTable::create(ONE_WORD_HASH_KEYS)) { } virtual ~RTCPMemberDatabase() { delete fTable; } Boolean isMember(unsigned ssrc) const { return fTable->Lookup((char*)(long)ssrc) != NULL; } Boolean noteMembership(unsigned ssrc, unsigned curTimeCount) { Boolean isNew = !isMember(ssrc); if (isNew) { ++fNumMembers; } // Record the current time, so we can age stale members fTable->Add((char*)(long)ssrc, (void*)(long)curTimeCount); return isNew; } Boolean remove(unsigned ssrc) { Boolean wasPresent = fTable->Remove((char*)(long)ssrc); if (wasPresent) { --fNumMembers; } return wasPresent; } unsigned numMembers() const { return fNumMembers; } void reapOldMembers(unsigned threshold); private: RTCPInstance& fOurRTCPInstance; unsigned fNumMembers; HashTable* fTable; }; void RTCPMemberDatabase::reapOldMembers(unsigned threshold) { Boolean foundOldMember; u_int32_t oldSSRC = 0; do { foundOldMember = False; HashTable::Iterator* iter = HashTable::Iterator::create(*fTable); uintptr_t timeCount; char const* key; while ((timeCount = (uintptr_t)(iter->next(key))) != 0) { #ifdef DEBUG fprintf(stderr, "reap: checking SSRC 0x%lx: %ld (threshold %d)\n", (unsigned long)key, timeCount, threshold); #endif if (timeCount < (uintptr_t)threshold) { // this SSRC is old uintptr_t ssrc = (uintptr_t)key; oldSSRC = (u_int32_t)ssrc; foundOldMember = True; } } delete iter; if (foundOldMember) { #ifdef DEBUG fprintf(stderr, "reap: removing SSRC 0x%x\n", oldSSRC); #endif fOurRTCPInstance.removeSSRC(oldSSRC, True); } } while (foundOldMember); } ////////// RTCPInstance ////////// static double dTimeNow() { struct timeval timeNow; gettimeofday(&timeNow, NULL); return (double) (timeNow.tv_sec + timeNow.tv_usec/1000000.0); } static unsigned const maxRTCPPacketSize = 1450; // bytes (1500, minus some allowance for IP, UDP, UMTP headers) static unsigned const preferredPacketSize = 1000; // bytes RTCPInstance::RTCPInstance(UsageEnvironment& env, Groupsock* RTCPgs, unsigned totSessionBW, unsigned char const* cname, RTPSink* sink, RTPSource const* source, Boolean isSSMSource) : Medium(env), fRTCPInterface(this, RTCPgs), fTotSessionBW(totSessionBW), fSink(sink), fSource(source), fIsSSMSource(isSSMSource), fCNAME(RTCP_SDES_CNAME, cname), fOutgoingReportCount(1), fAveRTCPSize(0), fIsInitial(1), fPrevNumMembers(0), fLastSentSize(0), fLastReceivedSize(0), fLastReceivedSSRC(0), fTypeOfEvent(EVENT_UNKNOWN), fTypeOfPacket(PACKET_UNKNOWN_TYPE), fHaveJustSentPacket(False), fLastPacketSentSize(0), fByeHandlerTask(NULL), fByeHandlerClientData(NULL), fSRHandlerTask(NULL), fSRHandlerClientData(NULL), fRRHandlerTask(NULL), fRRHandlerClientData(NULL), fSpecificRRHandlerTable(NULL) { #ifdef DEBUG fprintf(stderr, "RTCPInstance[%p]::RTCPInstance()\n", this); #endif if (fTotSessionBW == 0) { // not allowed! env << "RTCPInstance::RTCPInstance error: totSessionBW parameter should not be zero!\n"; fTotSessionBW = 1; } if (isSSMSource) RTCPgs->multicastSendOnly(); // don't receive multicast double timeNow = dTimeNow(); fPrevReportTime = fNextReportTime = timeNow; fKnownMembers = new RTCPMemberDatabase(*this); fInBuf = new unsigned char[maxRTCPPacketSize]; if (fKnownMembers == NULL || fInBuf == NULL) return; fNumBytesAlreadyRead = 0; // A hack to save buffer space, because RTCP packets are always small: unsigned savedMaxSize = OutPacketBuffer::maxSize; OutPacketBuffer::maxSize = maxRTCPPacketSize; fOutBuf = new OutPacketBuffer(preferredPacketSize, maxRTCPPacketSize); OutPacketBuffer::maxSize = savedMaxSize; if (fOutBuf == NULL) return; // Arrange to handle incoming reports from others: TaskScheduler::BackgroundHandlerProc* handler = (TaskScheduler::BackgroundHandlerProc*)&incomingReportHandler; fRTCPInterface.startNetworkReading(handler); // Send our first report. fTypeOfEvent = EVENT_REPORT; onExpire(this); } struct RRHandlerRecord { TaskFunc* rrHandlerTask; void* rrHandlerClientData; }; RTCPInstance::~RTCPInstance() { #ifdef DEBUG fprintf(stderr, "RTCPInstance[%p]::~RTCPInstance()\n", this); #endif // Begin by sending a BYE. We have to do this immediately, without // 'reconsideration', because "this" is going away. fTypeOfEvent = EVENT_BYE; // not used, but... sendBYE(); if (fSpecificRRHandlerTable != NULL) { AddressPortLookupTable::Iterator iter(*fSpecificRRHandlerTable); RRHandlerRecord* rrHandler; while ((rrHandler = (RRHandlerRecord*)iter.next()) != NULL) { delete rrHandler; } delete fSpecificRRHandlerTable; } delete fKnownMembers; delete fOutBuf; delete[] fInBuf; } RTCPInstance* RTCPInstance::createNew(UsageEnvironment& env, Groupsock* RTCPgs, unsigned totSessionBW, unsigned char const* cname, RTPSink* sink, RTPSource const* source, Boolean isSSMSource) { return new RTCPInstance(env, RTCPgs, totSessionBW, cname, sink, source, isSSMSource); } Boolean RTCPInstance::lookupByName(UsageEnvironment& env, char const* instanceName, RTCPInstance*& resultInstance) { resultInstance = NULL; // unless we succeed Medium* medium; if (!Medium::lookupByName(env, instanceName, medium)) return False; if (!medium->isRTCPInstance()) { env.setResultMsg(instanceName, " is not a RTCP instance"); return False; } resultInstance = (RTCPInstance*)medium; return True; } Boolean RTCPInstance::isRTCPInstance() const { return True; } unsigned RTCPInstance::numMembers() const { if (fKnownMembers == NULL) return 0; return fKnownMembers->numMembers(); } void RTCPInstance::setByeHandler(TaskFunc* handlerTask, void* clientData, Boolean handleActiveParticipantsOnly) { fByeHandlerTask = handlerTask; fByeHandlerClientData = clientData; fByeHandleActiveParticipantsOnly = handleActiveParticipantsOnly; } void RTCPInstance::setSRHandler(TaskFunc* handlerTask, void* clientData) { fSRHandlerTask = handlerTask; fSRHandlerClientData = clientData; } void RTCPInstance::setRRHandler(TaskFunc* handlerTask, void* clientData) { fRRHandlerTask = handlerTask; fRRHandlerClientData = clientData; } void RTCPInstance ::setSpecificRRHandler(netAddressBits fromAddress, Port fromPort, TaskFunc* handlerTask, void* clientData) { if (handlerTask == NULL && clientData == NULL) { unsetSpecificRRHandler(fromAddress, fromPort); return; } RRHandlerRecord* rrHandler = new RRHandlerRecord; rrHandler->rrHandlerTask = handlerTask; rrHandler->rrHandlerClientData = clientData; if (fSpecificRRHandlerTable == NULL) { fSpecificRRHandlerTable = new AddressPortLookupTable; } RRHandlerRecord* existingRecord = (RRHandlerRecord*)fSpecificRRHandlerTable->Add(fromAddress, (~0), fromPort, rrHandler); delete existingRecord; // if any } void RTCPInstance ::unsetSpecificRRHandler(netAddressBits fromAddress, Port fromPort) { if (fSpecificRRHandlerTable == NULL) return; RRHandlerRecord* rrHandler = (RRHandlerRecord*)(fSpecificRRHandlerTable->Lookup(fromAddress, (~0), fromPort)); if (rrHandler != NULL) { fSpecificRRHandlerTable->Remove(fromAddress, (~0), fromPort); delete rrHandler; } } void RTCPInstance::setStreamSocket(int sockNum, unsigned char streamChannelId) { // Turn off background read handling: fRTCPInterface.stopNetworkReading(); // Switch to RTCP-over-TCP: fRTCPInterface.setStreamSocket(sockNum, streamChannelId); // Turn background reading back on: TaskScheduler::BackgroundHandlerProc* handler = (TaskScheduler::BackgroundHandlerProc*)&incomingReportHandler; fRTCPInterface.startNetworkReading(handler); } void RTCPInstance::addStreamSocket(int sockNum, unsigned char streamChannelId) { // First, turn off background read handling for the default (UDP) socket: envir().taskScheduler().turnOffBackgroundReadHandling(fRTCPInterface.gs()->socketNum()); // Add the RTCP-over-TCP interface: fRTCPInterface.addStreamSocket(sockNum, streamChannelId); // Turn on background reading for this socket (in case it's not on already): TaskScheduler::BackgroundHandlerProc* handler = (TaskScheduler::BackgroundHandlerProc*)&incomingReportHandler; fRTCPInterface.startNetworkReading(handler); } static unsigned const IP_UDP_HDR_SIZE = 28; // overhead (bytes) of IP and UDP hdrs #define ADVANCE(n) pkt += (n); packetSize -= (n) void RTCPInstance::incomingReportHandler(RTCPInstance* instance, int /*mask*/) { instance->incomingReportHandler1(); } void RTCPInstance::incomingReportHandler1() { do { Boolean callByeHandler = False; int tcpReadStreamSocketNum = fRTCPInterface.nextTCPReadStreamSocketNum(); unsigned char tcpReadStreamChannelId = fRTCPInterface.nextTCPReadStreamChannelId(); unsigned packetSize = 0; unsigned numBytesRead; struct sockaddr_in fromAddress; Boolean packetReadWasIncomplete; if (fNumBytesAlreadyRead >= maxRTCPPacketSize) { envir() << "RTCPInstance error: Hit limit when reading incoming packet over TCP. Increase \"maxRTCPPacketSize\"\n"; break; } Boolean readResult = fRTCPInterface.handleRead(&fInBuf[fNumBytesAlreadyRead], maxRTCPPacketSize - fNumBytesAlreadyRead, numBytesRead, fromAddress, packetReadWasIncomplete); if (packetReadWasIncomplete) { fNumBytesAlreadyRead += numBytesRead; return; // more reads are needed to get the entire packet } else { // normal case: We've read the entire packet packetSize = fNumBytesAlreadyRead + numBytesRead; fNumBytesAlreadyRead = 0; // for next time } if (!readResult) break; // Ignore the packet if it was looped-back from ourself: Boolean packetWasFromOurHost = False; if (RTCPgs()->wasLoopedBackFromUs(envir(), fromAddress)) { packetWasFromOurHost = True; // However, we still want to handle incoming RTCP packets from // *other processes* on the same machine. To distinguish this // case from a true loop-back, check whether we've just sent a // packet of the same size. (This check isn't perfect, but it seems // to be the best we can do.) if (fHaveJustSentPacket && fLastPacketSentSize == packetSize) { // This is a true loop-back: fHaveJustSentPacket = False; break; // ignore this packet } } unsigned char* pkt = fInBuf; if (fIsSSMSource && !packetWasFromOurHost) { // This packet is assumed to have been received via unicast (because we're a SSM source, and SSM receivers send back RTCP "RR" // packets via unicast). 'Reflect' the packet by resending it to the multicast group, so that any other receivers can also // get to see it. // NOTE: Denial-of-service attacks are possible here. // Users of this software may wish to add their own, // application-specific mechanism for 'authenticating' the // validity of this packet before reflecting it. // NOTE: The test for "!packetWasFromOurHost" means that we won't reflect RTCP packets that come from other processes on // the same host as us. The reason for this is that the 'packet size' test above is not 100% reliable; some packets // that were truly looped back from us might not be detected as such, and this might lead to infinite forwarding/receiving // of some packets. To avoid this possibility, we only reflect RTCP packets that we know for sure originated elsewhere. // (Note, though, that if we ever re-enable the code in "Groupsock::multicastSendOnly()", then we could remove the test for // "!packetWasFromOurHost".) fRTCPInterface.sendPacket(pkt, packetSize); fHaveJustSentPacket = True; fLastPacketSentSize = packetSize; } #ifdef DEBUG fprintf(stderr, "[%p]saw incoming RTCP packet", this); if (tcpReadStreamSocketNum < 0) { // Note that "fromAddress" is valid only if we're receiving over UDP (not over TCP): fprintf(stderr, " (from address %s, port %d)", AddressString(fromAddress).val(), ntohs(fromAddress.sin_port)); } fprintf(stderr, "\n"); for (unsigned i = 0; i < packetSize; ++i) { if (i%4 == 0) fprintf(stderr, " "); fprintf(stderr, "%02x", pkt[i]); } fprintf(stderr, "\n"); #endif int totPacketSize = IP_UDP_HDR_SIZE + packetSize; // Check the RTCP packet for validity: // It must at least contain a header (4 bytes), and this header // must be version=2, with no padding bit, and a payload type of // SR (200) or RR (201): if (packetSize < 4) break; unsigned rtcpHdr = ntohl(*(u_int32_t*)pkt); if ((rtcpHdr & 0xE0FE0000) != (0x80000000 | (RTCP_PT_SR<<16))) { #ifdef DEBUG fprintf(stderr, "rejected bad RTCP packet: header 0x%08x\n", rtcpHdr); #endif break; } // Process each of the individual RTCP 'subpackets' in (what may be) // a compound RTCP packet. int typeOfPacket = PACKET_UNKNOWN_TYPE; unsigned reportSenderSSRC = 0; Boolean packetOK = False; while (1) { unsigned rc = (rtcpHdr>>24)&0x1F; unsigned pt = (rtcpHdr>>16)&0xFF; unsigned length = 4*(rtcpHdr&0xFFFF); // doesn't count hdr ADVANCE(4); // skip over the header if (length > packetSize) break; // Assume that each RTCP subpacket begins with a 4-byte SSRC: if (length < 4) break; length -= 4; reportSenderSSRC = ntohl(*(u_int32_t*)pkt); ADVANCE(4); Boolean subPacketOK = False; switch (pt) { case RTCP_PT_SR: { #ifdef DEBUG fprintf(stderr, "SR\n"); #endif if (length < 20) break; length -= 20; // Extract the NTP timestamp, and note this: unsigned NTPmsw = ntohl(*(u_int32_t*)pkt); ADVANCE(4); unsigned NTPlsw = ntohl(*(u_int32_t*)pkt); ADVANCE(4); unsigned rtpTimestamp = ntohl(*(u_int32_t*)pkt); ADVANCE(4); if (fSource != NULL) { RTPReceptionStatsDB& receptionStats = fSource->receptionStatsDB(); receptionStats.noteIncomingSR(reportSenderSSRC, NTPmsw, NTPlsw, rtpTimestamp); } ADVANCE(8); // skip over packet count, octet count // If a 'SR handler' was set, call it now: if (fSRHandlerTask != NULL) (*fSRHandlerTask)(fSRHandlerClientData); // The rest of the SR is handled like a RR (so, no "break;" here) } case RTCP_PT_RR: { #ifdef DEBUG fprintf(stderr, "RR\n"); #endif unsigned reportBlocksSize = rc*(6*4); if (length < reportBlocksSize) break; length -= reportBlocksSize; if (fSink != NULL) { // Use this information to update stats about our transmissions: RTPTransmissionStatsDB& transmissionStats = fSink->transmissionStatsDB(); for (unsigned i = 0; i < rc; ++i) { unsigned senderSSRC = ntohl(*(u_int32_t*)pkt); ADVANCE(4); // We care only about reports about our own transmission, not others' if (senderSSRC == fSink->SSRC()) { unsigned lossStats = ntohl(*(u_int32_t*)pkt); ADVANCE(4); unsigned highestReceived = ntohl(*(u_int32_t*)pkt); ADVANCE(4); unsigned jitter = ntohl(*(u_int32_t*)pkt); ADVANCE(4); unsigned timeLastSR = ntohl(*(u_int32_t*)pkt); ADVANCE(4); unsigned timeSinceLastSR = ntohl(*(u_int32_t*)pkt); ADVANCE(4); transmissionStats.noteIncomingRR(reportSenderSSRC, fromAddress, lossStats, highestReceived, jitter, timeLastSR, timeSinceLastSR); } else { ADVANCE(4*5); } } } else { ADVANCE(reportBlocksSize); } if (pt == RTCP_PT_RR) { // i.e., we didn't fall through from 'SR' // If a 'RR handler' was set, call it now: // Specific RR handler: if (fSpecificRRHandlerTable != NULL) { netAddressBits fromAddr; portNumBits fromPortNum; if (tcpReadStreamSocketNum < 0) { // Normal case: We read the RTCP packet over UDP fromAddr = fromAddress.sin_addr.s_addr; fromPortNum = ntohs(fromAddress.sin_port); } else { // Special case: We read the RTCP packet over TCP (interleaved) // Hack: Use the TCP socket and channel id to look up the handler fromAddr = tcpReadStreamSocketNum; fromPortNum = tcpReadStreamChannelId; } Port fromPort(fromPortNum); RRHandlerRecord* rrHandler = (RRHandlerRecord*)(fSpecificRRHandlerTable->Lookup(fromAddr, (~0), fromPort)); if (rrHandler != NULL) { if (rrHandler->rrHandlerTask != NULL) { (*(rrHandler->rrHandlerTask))(rrHandler->rrHandlerClientData); } } } // General RR handler: if (fRRHandlerTask != NULL) (*fRRHandlerTask)(fRRHandlerClientData); } subPacketOK = True; typeOfPacket = PACKET_RTCP_REPORT; break; } case RTCP_PT_BYE: { #ifdef DEBUG fprintf(stderr, "BYE\n"); #endif // If a 'BYE handler' was set, arrange for it to be called at the end of this routine. // (Note: We don't call it immediately, in case it happens to cause "this" to be deleted.) if (fByeHandlerTask != NULL && (!fByeHandleActiveParticipantsOnly || (fSource != NULL && fSource->receptionStatsDB().lookup(reportSenderSSRC) != NULL) || (fSink != NULL && fSink->transmissionStatsDB().lookup(reportSenderSSRC) != NULL))) { callByeHandler = True; } // We should really check for & handle >1 SSRCs being present ##### subPacketOK = True; typeOfPacket = PACKET_BYE; break; } // Later handle SDES, APP, and compound RTCP packets ##### default: #ifdef DEBUG fprintf(stderr, "UNSUPPORTED TYPE(0x%x)\n", pt); #endif subPacketOK = True; break; } if (!subPacketOK) break; // need to check for (& handle) SSRC collision! ##### #ifdef DEBUG fprintf(stderr, "validated RTCP subpacket (type %d): %d, %d, %d, 0x%08x\n", typeOfPacket, rc, pt, length, reportSenderSSRC); #endif // Skip over any remaining bytes in this subpacket: ADVANCE(length); // Check whether another RTCP 'subpacket' follows: if (packetSize == 0) { packetOK = True; break; } else if (packetSize < 4) { #ifdef DEBUG fprintf(stderr, "extraneous %d bytes at end of RTCP packet!\n", packetSize); #endif break; } rtcpHdr = ntohl(*(u_int32_t*)pkt); if ((rtcpHdr & 0xC0000000) != 0x80000000) { #ifdef DEBUG fprintf(stderr, "bad RTCP subpacket: header 0x%08x\n", rtcpHdr); #endif break; } } if (!packetOK) { #ifdef DEBUG fprintf(stderr, "rejected bad RTCP subpacket: header 0x%08x\n", rtcpHdr); #endif break; } else { #ifdef DEBUG fprintf(stderr, "validated entire RTCP packet\n"); #endif } onReceive(typeOfPacket, totPacketSize, reportSenderSSRC); // Finally, if we need to call a "BYE" handler, do so now (in case it causes "this" to get deleted): if (callByeHandler && fByeHandlerTask != NULL/*sanity check*/) { TaskFunc* byeHandler = fByeHandlerTask; fByeHandlerTask = NULL; // because we call the handler only once, by default (*byeHandler)(fByeHandlerClientData); } } while (0); } void RTCPInstance::onReceive(int typeOfPacket, int totPacketSize, unsigned ssrc) { fTypeOfPacket = typeOfPacket; fLastReceivedSize = totPacketSize; fLastReceivedSSRC = ssrc; int members = (int)numMembers(); int senders = (fSink != NULL) ? 1 : 0; OnReceive(this, // p this, // e &members, // members &fPrevNumMembers, // pmembers &senders, // senders &fAveRTCPSize, // avg_rtcp_size &fPrevReportTime, // tp dTimeNow(), // tc fNextReportTime); } void RTCPInstance::sendReport() { #ifdef DEBUG fprintf(stderr, "sending REPORT\n"); #endif // Begin by including a SR and/or RR report: if (!addReport()) return; // Then, include a SDES: addSDES(); // Send the report: sendBuiltPacket(); // Periodically clean out old members from our SSRC membership database: const unsigned membershipReapPeriod = 5; if ((++fOutgoingReportCount) % membershipReapPeriod == 0) { unsigned threshold = fOutgoingReportCount - membershipReapPeriod; fKnownMembers->reapOldMembers(threshold); } } void RTCPInstance::sendBYE() { #ifdef DEBUG fprintf(stderr, "sending BYE\n"); #endif // The packet must begin with a SR and/or RR report: (void)addReport(True); addBYE(); sendBuiltPacket(); } void RTCPInstance::sendBuiltPacket() { #ifdef DEBUG fprintf(stderr, "sending RTCP packet\n"); unsigned char* p = fOutBuf->packet(); for (unsigned i = 0; i < fOutBuf->curPacketSize(); ++i) { if (i%4 == 0) fprintf(stderr," "); fprintf(stderr, "%02x", p[i]); } fprintf(stderr, "\n"); #endif unsigned reportSize = fOutBuf->curPacketSize(); fRTCPInterface.sendPacket(fOutBuf->packet(), reportSize); fOutBuf->resetOffset(); fLastSentSize = IP_UDP_HDR_SIZE + reportSize; fHaveJustSentPacket = True; fLastPacketSentSize = reportSize; } int RTCPInstance::checkNewSSRC() { return fKnownMembers->noteMembership(fLastReceivedSSRC, fOutgoingReportCount); } void RTCPInstance::removeLastReceivedSSRC() { removeSSRC(fLastReceivedSSRC, False/*keep stats around*/); } void RTCPInstance::removeSSRC(u_int32_t ssrc, Boolean alsoRemoveStats) { fKnownMembers->remove(ssrc); if (alsoRemoveStats) { // Also, remove records of this SSRC from any reception or transmission stats if (fSource != NULL) fSource->receptionStatsDB().removeRecord(ssrc); if (fSink != NULL) fSink->transmissionStatsDB().removeRecord(ssrc); } } void RTCPInstance::onExpire(RTCPInstance* instance) { instance->onExpire1(); } // Member functions to build specific kinds of report: Boolean RTCPInstance::addReport(Boolean alwaysAdd) { // Include a SR or a RR, depending on whether we have an associated sink or source: if (fSink != NULL) { if (!alwaysAdd) { if (!fSink->enableRTCPReports()) return False; // Hack: Don't send a SR during those (brief) times when the timestamp of the // next outgoing RTP packet has been preset, to ensure that that timestamp gets // used for that outgoing packet. (David Bertrand, 2006.07.18) if (fSink->nextTimestampHasBeenPreset()) return False; } addSR(); } else if (fSource != NULL) { if (!alwaysAdd) { if (!fSource->enableRTCPReports()) return False; } addRR(); } return True; } void RTCPInstance::addSR() { // ASSERT: fSink != NULL enqueueCommonReportPrefix(RTCP_PT_SR, fSink->SSRC(), 5 /* extra words in a SR */); // Now, add the 'sender info' for our sink // Insert the NTP and RTP timestamps for the 'wallclock time': struct timeval timeNow; gettimeofday(&timeNow, NULL); fOutBuf->enqueueWord(timeNow.tv_sec + 0x83AA7E80); // NTP timestamp most-significant word (1970 epoch -> 1900 epoch) double fractionalPart = (timeNow.tv_usec/15625.0)*0x04000000; // 2^32/10^6 fOutBuf->enqueueWord((unsigned)(fractionalPart+0.5)); // NTP timestamp least-significant word unsigned rtpTimestamp = fSink->convertToRTPTimestamp(timeNow); fOutBuf->enqueueWord(rtpTimestamp); // RTP ts // Insert the packet and byte counts: fOutBuf->enqueueWord(fSink->packetCount()); fOutBuf->enqueueWord(fSink->octetCount()); enqueueCommonReportSuffix(); } void RTCPInstance::addRR() { // ASSERT: fSource != NULL enqueueCommonReportPrefix(RTCP_PT_RR, fSource->SSRC()); enqueueCommonReportSuffix(); } void RTCPInstance::enqueueCommonReportPrefix(unsigned char packetType, unsigned SSRC, unsigned numExtraWords) { unsigned numReportingSources; if (fSource == NULL) { numReportingSources = 0; // we don't receive anything } else { RTPReceptionStatsDB& allReceptionStats = fSource->receptionStatsDB(); numReportingSources = allReceptionStats.numActiveSourcesSinceLastReset(); // This must be <32, to fit in 5 bits: if (numReportingSources >= 32) { numReportingSources = 32; } // Later: support adding more reports to handle >32 sources (unlikely)##### } unsigned rtcpHdr = 0x80000000; // version 2, no padding rtcpHdr |= (numReportingSources<<24); rtcpHdr |= (packetType<<16); rtcpHdr |= (1 + numExtraWords + 6*numReportingSources); // each report block is 6 32-bit words long fOutBuf->enqueueWord(rtcpHdr); fOutBuf->enqueueWord(SSRC); } void RTCPInstance::enqueueCommonReportSuffix() { // Output the report blocks for each source: if (fSource != NULL) { RTPReceptionStatsDB& allReceptionStats = fSource->receptionStatsDB(); RTPReceptionStatsDB::Iterator iterator(allReceptionStats); while (1) { RTPReceptionStats* receptionStats = iterator.next(); if (receptionStats == NULL) break; enqueueReportBlock(receptionStats); } allReceptionStats.reset(); // because we have just generated a report } } void RTCPInstance::enqueueReportBlock(RTPReceptionStats* stats) { fOutBuf->enqueueWord(stats->SSRC()); unsigned highestExtSeqNumReceived = stats->highestExtSeqNumReceived(); unsigned totNumExpected = highestExtSeqNumReceived - stats->baseExtSeqNumReceived(); int totNumLost = totNumExpected - stats->totNumPacketsReceived(); // 'Clamp' this loss number to a 24-bit signed value: if (totNumLost > 0x007FFFFF) { totNumLost = 0x007FFFFF; } else if (totNumLost < 0) { if (totNumLost < -0x00800000) totNumLost = 0x00800000; // unlikely, but... totNumLost &= 0x00FFFFFF; } unsigned numExpectedSinceLastReset = highestExtSeqNumReceived - stats->lastResetExtSeqNumReceived(); int numLostSinceLastReset = numExpectedSinceLastReset - stats->numPacketsReceivedSinceLastReset(); unsigned char lossFraction; if (numExpectedSinceLastReset == 0 || numLostSinceLastReset < 0) { lossFraction = 0; } else { lossFraction = (unsigned char) ((numLostSinceLastReset << 8) / numExpectedSinceLastReset); } fOutBuf->enqueueWord((lossFraction<<24) | totNumLost); fOutBuf->enqueueWord(highestExtSeqNumReceived); fOutBuf->enqueueWord(stats->jitter()); unsigned NTPmsw = stats->lastReceivedSR_NTPmsw(); unsigned NTPlsw = stats->lastReceivedSR_NTPlsw(); unsigned LSR = ((NTPmsw&0xFFFF)<<16)|(NTPlsw>>16); // middle 32 bits fOutBuf->enqueueWord(LSR); // Figure out how long has elapsed since the last SR rcvd from this src: struct timeval const& LSRtime = stats->lastReceivedSR_time(); // "last SR" struct timeval timeNow, timeSinceLSR; gettimeofday(&timeNow, NULL); if (timeNow.tv_usec < LSRtime.tv_usec) { timeNow.tv_usec += 1000000; timeNow.tv_sec -= 1; } timeSinceLSR.tv_sec = timeNow.tv_sec - LSRtime.tv_sec; timeSinceLSR.tv_usec = timeNow.tv_usec - LSRtime.tv_usec; // The enqueued time is in units of 1/65536 seconds. // (Note that 65536/1000000 == 1024/15625) unsigned DLSR; if (LSR == 0) { DLSR = 0; } else { DLSR = (timeSinceLSR.tv_sec<<16) | ( (((timeSinceLSR.tv_usec<<11)+15625)/31250) & 0xFFFF); } fOutBuf->enqueueWord(DLSR); } void RTCPInstance::addSDES() { // For now we support only the CNAME item; later support more ##### // Begin by figuring out the size of the entire SDES report: unsigned numBytes = 4; // counts the SSRC, but not the header; it'll get subtracted out numBytes += fCNAME.totalSize(); // includes id and length numBytes += 1; // the special END item unsigned num4ByteWords = (numBytes + 3)/4; unsigned rtcpHdr = 0x81000000; // version 2, no padding, 1 SSRC chunk rtcpHdr |= (RTCP_PT_SDES<<16); rtcpHdr |= num4ByteWords; fOutBuf->enqueueWord(rtcpHdr); if (fSource != NULL) { fOutBuf->enqueueWord(fSource->SSRC()); } else if (fSink != NULL) { fOutBuf->enqueueWord(fSink->SSRC()); } // Add the CNAME: fOutBuf->enqueue(fCNAME.data(), fCNAME.totalSize()); // Add the 'END' item (i.e., a zero byte), plus any more needed to pad: unsigned numPaddingBytesNeeded = 4 - (fOutBuf->curPacketSize() % 4); unsigned char const zero = '\0'; while (numPaddingBytesNeeded-- > 0) fOutBuf->enqueue(&zero, 1); } void RTCPInstance::addBYE() { unsigned rtcpHdr = 0x81000000; // version 2, no padding, 1 SSRC rtcpHdr |= (RTCP_PT_BYE<<16); rtcpHdr |= 1; // 2 32-bit words total (i.e., with 1 SSRC) fOutBuf->enqueueWord(rtcpHdr); if (fSource != NULL) { fOutBuf->enqueueWord(fSource->SSRC()); } else if (fSink != NULL) { fOutBuf->enqueueWord(fSink->SSRC()); } } void RTCPInstance::schedule(double nextTime) { fNextReportTime = nextTime; double secondsToDelay = nextTime - dTimeNow(); if (secondsToDelay < 0) secondsToDelay = 0; #ifdef DEBUG fprintf(stderr, "schedule(%f->%f)\n", secondsToDelay, nextTime); #endif int64_t usToGo = (int64_t)(secondsToDelay * 1000000); nextTask() = envir().taskScheduler().scheduleDelayedTask(usToGo, (TaskFunc*)RTCPInstance::onExpire, this); } void RTCPInstance::reschedule(double nextTime) { envir().taskScheduler().unscheduleDelayedTask(nextTask()); schedule(nextTime); } void RTCPInstance::onExpire1() { // Note: fTotSessionBW is kbits per second double rtcpBW = 0.05*fTotSessionBW*1024/8; // -> bytes per second OnExpire(this, // event numMembers(), // members (fSink != NULL) ? 1 : 0, // senders rtcpBW, // rtcp_bw (fSink != NULL) ? 1 : 0, // we_sent &fAveRTCPSize, // ave_rtcp_size &fIsInitial, // initial dTimeNow(), // tc &fPrevReportTime, // tp &fPrevNumMembers // pmembers ); } ////////// SDESItem ////////// SDESItem::SDESItem(unsigned char tag, unsigned char const* value) { unsigned length = strlen((char const*)value); if (length > 0xFF) length = 0xFF; // maximum data length for a SDES item fData[0] = tag; fData[1] = (unsigned char)length; memmove(&fData[2], value, length); } unsigned SDESItem::totalSize() const { return 2 + (unsigned)fData[1]; } ////////// Implementation of routines imported by the "rtcp_from_spec" C code extern "C" void Schedule(double nextTime, event e) { RTCPInstance* instance = (RTCPInstance*)e; if (instance == NULL) return; instance->schedule(nextTime); } extern "C" void Reschedule(double nextTime, event e) { RTCPInstance* instance = (RTCPInstance*)e; if (instance == NULL) return; instance->reschedule(nextTime); } extern "C" void SendRTCPReport(event e) { RTCPInstance* instance = (RTCPInstance*)e; if (instance == NULL) return; instance->sendReport(); } extern "C" void SendBYEPacket(event e) { RTCPInstance* instance = (RTCPInstance*)e; if (instance == NULL) return; instance->sendBYE(); } extern "C" int TypeOfEvent(event e) { RTCPInstance* instance = (RTCPInstance*)e; if (instance == NULL) return EVENT_UNKNOWN; return instance->typeOfEvent(); } extern "C" int SentPacketSize(event e) { RTCPInstance* instance = (RTCPInstance*)e; if (instance == NULL) return 0; return instance->sentPacketSize(); } extern "C" int PacketType(packet p) { RTCPInstance* instance = (RTCPInstance*)p; if (instance == NULL) return PACKET_UNKNOWN_TYPE; return instance->packetType(); } extern "C" int ReceivedPacketSize(packet p) { RTCPInstance* instance = (RTCPInstance*)p; if (instance == NULL) return 0; return instance->receivedPacketSize(); } extern "C" int NewMember(packet p) { RTCPInstance* instance = (RTCPInstance*)p; if (instance == NULL) return 0; return instance->checkNewSSRC(); } extern "C" int NewSender(packet /*p*/) { return 0; // we don't yet recognize senders other than ourselves ##### } extern "C" void AddMember(packet /*p*/) { // Do nothing; all of the real work was done when NewMember() was called } extern "C" void AddSender(packet /*p*/) { // we don't yet recognize senders other than ourselves ##### } extern "C" void RemoveMember(packet p) { RTCPInstance* instance = (RTCPInstance*)p; if (instance == NULL) return; instance->removeLastReceivedSSRC(); } extern "C" void RemoveSender(packet /*p*/) { // we don't yet recognize senders other than ourselves ##### } extern "C" double drand30() { unsigned tmp = our_random()&0x3FFFFFFF; // a random 30-bit integer return tmp/(double)(1024*1024*1024); } live/liveMedia/EBMLNumber.hh000444 001751 000000 00000011247 12265042432 016054 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // EBML numbers (ids and sizes) // C++ header #ifndef _EBML_NUMBER_HH #define _EBML_NUMBER_HH #include "NetCommon.h" #include "Boolean.hh" #include #define EBML_NUMBER_MAX_LEN 8 class EBMLNumber { public: EBMLNumber(Boolean stripLeading1 = True); virtual ~EBMLNumber(); u_int64_t val() const; char* hexString() const; // used for debugging Boolean operator==(u_int64_t arg2) const { return val() == arg2; } Boolean operator!=(u_int64_t arg2) const { return !(*this == arg2); } public: Boolean stripLeading1; unsigned len; u_int8_t data[EBML_NUMBER_MAX_LEN]; }; // Definitions of some Matroska/EBML IDs (including the ones that we check for): #define MATROSKA_ID_EBML 0x1A45DFA3 #define MATROSKA_ID_VOID 0xEC #define MATROSKA_ID_CRC_32 0xBF #define MATROSKA_ID_SEGMENT 0x18538067 #define MATROSKA_ID_SEEK_HEAD 0x114D9B74 #define MATROSKA_ID_SEEK 0x4DBB #define MATROSKA_ID_SEEK_ID 0x53AB #define MATROSKA_ID_SEEK_POSITION 0x53AC #define MATROSKA_ID_INFO 0x1549A966 #define MATROSKA_ID_SEGMENT_UID 0x73A4 #define MATROSKA_ID_TIMECODE_SCALE 0x2AD7B1 #define MATROSKA_ID_DURATION 0x4489 #define MATROSKA_ID_DATE_UTC 0x4461 #define MATROSKA_ID_TITLE 0x7BA9 #define MATROSKA_ID_MUXING_APP 0x4D80 #define MATROSKA_ID_WRITING_APP 0x5741 #define MATROSKA_ID_CLUSTER 0x1F43B675 #define MATROSKA_ID_TIMECODE 0xE7 #define MATROSKA_ID_POSITION 0xA7 #define MATROSKA_ID_PREV_SIZE 0xAB #define MATROSKA_ID_SIMPLEBLOCK 0xA3 #define MATROSKA_ID_BLOCK_GROUP 0xA0 #define MATROSKA_ID_BLOCK 0xA1 #define MATROSKA_ID_BLOCK_DURATION 0x9B #define MATROSKA_ID_REFERENCE_BLOCK 0xFB #define MATROSKA_ID_TRACKS 0x1654AE6B #define MATROSKA_ID_TRACK_ENTRY 0xAE #define MATROSKA_ID_TRACK_NUMBER 0xD7 #define MATROSKA_ID_TRACK_UID 0x73C5 #define MATROSKA_ID_TRACK_TYPE 0x83 #define MATROSKA_ID_FLAG_ENABLED 0xB9 #define MATROSKA_ID_FLAG_DEFAULT 0x88 #define MATROSKA_ID_FLAG_FORCED 0x55AA #define MATROSKA_ID_FLAG_LACING 0x9C #define MATROSKA_ID_MIN_CACHE 0x6DE7 #define MATROSKA_ID_DEFAULT_DURATION 0x23E383 #define MATROSKA_ID_TRACK_TIMECODE_SCALE 0x23314F #define MATROSKA_ID_MAX_BLOCK_ADDITION_ID 0x55EE #define MATROSKA_ID_NAME 0x536E #define MATROSKA_ID_LANGUAGE 0x22B59C #define MATROSKA_ID_CODEC 0x86 #define MATROSKA_ID_CODEC_PRIVATE 0x63A2 #define MATROSKA_ID_CODEC_NAME 0x258688 #define MATROSKA_ID_CODEC_DECODE_ALL 0xAA #define MATROSKA_ID_VIDEO 0xE0 #define MATROSKA_ID_FLAG_INTERLACED 0x9A #define MATROSKA_ID_PIXEL_WIDTH 0xB0 #define MATROSKA_ID_PIXEL_HEIGHT 0xBA #define MATROSKA_ID_DISPLAY_WIDTH 0x54B0 #define MATROSKA_ID_DISPLAY_HEIGHT 0x54BA #define MATROSKA_ID_DISPLAY_UNIT 0x54B2 #define MATROSKA_ID_AUDIO 0xE1 #define MATROSKA_ID_SAMPLING_FREQUENCY 0xB5 #define MATROSKA_ID_OUTPUT_SAMPLING_FREQUENCY 0x78B5 #define MATROSKA_ID_CHANNELS 0x9F #define MATROSKA_ID_BIT_DEPTH 0x6264 #define MATROSKA_ID_CONTENT_ENCODINGS 0x6D80 #define MATROSKA_ID_CONTENT_ENCODING 0x6240 #define MATROSKA_ID_CONTENT_COMPRESSION 0x5034 #define MATROSKA_ID_CONTENT_COMP_ALGO 0x4254 #define MATROSKA_ID_CONTENT_COMP_SETTINGS 0x4255 #define MATROSKA_ID_CONTENT_ENCRYPTION 0x5035 #define MATROSKA_ID_ATTACHMENTS 0x1941A469 #define MATROSKA_ID_ATTACHED_FILE 0x61A7 #define MATROSKA_ID_FILE_DESCRIPTION 0x467E #define MATROSKA_ID_FILE_NAME 0x466E #define MATROSKA_ID_FILE_MIME_TYPE 0x4660 #define MATROSKA_ID_FILE_DATA 0x465C #define MATROSKA_ID_FILE_UID 0x46AE #define MATROSKA_ID_CUES 0x1C53BB6B #define MATROSKA_ID_CUE_POINT 0xBB #define MATROSKA_ID_CUE_TIME 0xB3 #define MATROSKA_ID_CUE_TRACK_POSITIONS 0xB7 #define MATROSKA_ID_CUE_TRACK 0xF7 #define MATROSKA_ID_CUE_CLUSTER_POSITION 0xF1 #define MATROSKA_ID_CUE_BLOCK_NUMBER 0x5378 #define MATROSKA_ID_TAGS 0x1254C367 class EBMLId: public EBMLNumber { public: EBMLId(); virtual ~EBMLId(); char const* stringName() const; // used for debugging }; class EBMLDataSize: public EBMLNumber { public: EBMLDataSize(); virtual ~EBMLDataSize(); }; #endif live/liveMedia/QuickTimeGenericRTPSource.cpp000444 001751 000000 00000023044 12265042432 021304 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // RTP Sources containing generic QuickTime stream data, as defined in // // Implementation #include "QuickTimeGenericRTPSource.hh" ///// QTGenericBufferedPacket and QTGenericBufferedPacketFactory ///// // A subclass of BufferedPacket, used to separate out // individual frames (when PCK == 2) class QTGenericBufferedPacket: public BufferedPacket { public: QTGenericBufferedPacket(QuickTimeGenericRTPSource& ourSource); virtual ~QTGenericBufferedPacket(); private: // redefined virtual functions virtual unsigned nextEnclosedFrameSize(unsigned char*& framePtr, unsigned dataSize); private: QuickTimeGenericRTPSource& fOurSource; }; class QTGenericBufferedPacketFactory: public BufferedPacketFactory { private: // redefined virtual functions virtual BufferedPacket* createNewPacket(MultiFramedRTPSource* ourSource); }; ////////// QuickTimeGenericRTPSource ////////// QuickTimeGenericRTPSource* QuickTimeGenericRTPSource::createNew(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, unsigned rtpTimestampFrequency, char const* mimeTypeString) { return new QuickTimeGenericRTPSource(env, RTPgs, rtpPayloadFormat, rtpTimestampFrequency, mimeTypeString); } QuickTimeGenericRTPSource ::QuickTimeGenericRTPSource(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, unsigned rtpTimestampFrequency, char const* mimeTypeString) : MultiFramedRTPSource(env, RTPgs, rtpPayloadFormat, rtpTimestampFrequency, new QTGenericBufferedPacketFactory), fMIMEtypeString(strDup(mimeTypeString)) { qtState.PCK = 0; qtState.timescale = 0; qtState.sdAtom = NULL; qtState.sdAtomSize = qtState.width = qtState.height = 0; } QuickTimeGenericRTPSource::~QuickTimeGenericRTPSource() { delete[] qtState.sdAtom; delete[] (char*)fMIMEtypeString; } Boolean QuickTimeGenericRTPSource ::processSpecialHeader(BufferedPacket* packet, unsigned& resultSpecialHeaderSize) { unsigned char* headerStart = packet->data(); unsigned packetSize = packet->dataSize(); // The "QuickTime Header" must be at least 4 bytes in size: // Extract the known fields from the first 4 bytes: unsigned expectedHeaderSize = 4; if (packetSize < expectedHeaderSize) return False; unsigned char VER = (headerStart[0]&0xF0)>>4; if (VER > 1) return False; // unknown header version qtState.PCK = (headerStart[0]&0x0C)>>2; #ifdef DEBUG Boolean S = (headerStart[0]&0x02) != 0; #endif Boolean Q = (headerStart[0]&0x01) != 0; Boolean L = (headerStart[1]&0x80) != 0; #ifdef DEBUG Boolean D = (headerStart[2]&0x80) != 0; unsigned short payloadId = ((headerStart[2]&0x7F)<<8)|headerStart[3]; #endif headerStart += 4; #ifdef DEBUG fprintf(stderr, "PCK: %d, S: %d, Q: %d, L: %d, D: %d, payloadId: %d\n", qtState.PCK, S, Q, L, D, payloadId); #endif if (Q) { // A "QuickTime Payload Description" follows expectedHeaderSize += 4; if (packetSize < expectedHeaderSize) return False; #ifdef DEBUG Boolean K = (headerStart[0]&0x80) != 0; Boolean F = (headerStart[0]&0x40) != 0; Boolean A = (headerStart[0]&0x20) != 0; Boolean Z = (headerStart[0]&0x10) != 0; #endif unsigned payloadDescriptionLength = (headerStart[2]<<8)|headerStart[3]; headerStart += 4; #ifdef DEBUG fprintf(stderr, "\tK: %d, F: %d, A: %d, Z: %d, payloadDescriptionLength: %d\n", K, F, A, Z, payloadDescriptionLength); #endif // Make sure "payloadDescriptionLength" is valid if (payloadDescriptionLength < 12) return False; expectedHeaderSize += (payloadDescriptionLength - 4); unsigned nonPaddedSize = expectedHeaderSize; expectedHeaderSize += 3; expectedHeaderSize -= expectedHeaderSize%4; // adds padding if (packetSize < expectedHeaderSize) return False; unsigned char padding = expectedHeaderSize - nonPaddedSize; #ifdef DEBUG unsigned mediaType = (headerStart[0]<<24)|(headerStart[1]<<16) |(headerStart[2]<<8)|headerStart[3]; #endif qtState.timescale = (headerStart[4]<<24)|(headerStart[5]<<16) |(headerStart[6]<<8)|headerStart[7]; headerStart += 8; payloadDescriptionLength -= 12; #ifdef DEBUG fprintf(stderr, "\tmediaType: '%c%c%c%c', timescale: %d, %d bytes of TLVs left\n", mediaType>>24, (mediaType&0xFF0000)>>16, (mediaType&0xFF00)>>8, mediaType&0xFF, qtState.timescale, payloadDescriptionLength); #endif while (payloadDescriptionLength > 3) { unsigned short tlvLength = (headerStart[0]<<8)|headerStart[1]; unsigned short tlvType = (headerStart[2]<<8)|headerStart[3]; payloadDescriptionLength -= 4; if (tlvLength > payloadDescriptionLength) return False; // bad TLV headerStart += 4; #ifdef DEBUG fprintf(stderr, "\t\tTLV '%c%c', length %d, leaving %d remaining bytes\n", tlvType>>8, tlvType&0xFF, tlvLength, payloadDescriptionLength - tlvLength); for (int i = 0; i < tlvLength; ++i) fprintf(stderr, "%02x:", headerStart[i]); fprintf(stderr, "\n"); #endif // Check for 'TLV's that we can use for our 'qtState' switch (tlvType) { case ('s'<<8|'d'): { // session description atom // Sanity check: the first 4 bytes of this must equal "tlvLength": unsigned atomLength = (headerStart[0]<<24)|(headerStart[1]<<16) |(headerStart[2]<<8)|(headerStart[3]); if (atomLength != (unsigned)tlvLength) break; delete[] qtState.sdAtom; qtState.sdAtom = new char[tlvLength]; memmove(qtState.sdAtom, headerStart, tlvLength); qtState.sdAtomSize = tlvLength; break; } case ('t'<<8|'w'): { // track width qtState.width = (headerStart[0]<<8)|headerStart[1]; break; } case ('t'<<8|'h'): { // track height qtState.height = (headerStart[0]<<8)|headerStart[1]; break; } } payloadDescriptionLength -= tlvLength; headerStart += tlvLength; } if (payloadDescriptionLength > 0) return False; // malformed TLV data headerStart += padding; } if (L) { // Sample-Specific info follows expectedHeaderSize += 4; if (packetSize < expectedHeaderSize) return False; unsigned ssInfoLength = (headerStart[2]<<8)|headerStart[3]; headerStart += 4; #ifdef DEBUG fprintf(stderr, "\tssInfoLength: %d\n", ssInfoLength); #endif // Make sure "ssInfoLength" is valid if (ssInfoLength < 4) return False; expectedHeaderSize += (ssInfoLength - 4); unsigned nonPaddedSize = expectedHeaderSize; expectedHeaderSize += 3; expectedHeaderSize -= expectedHeaderSize%4; // adds padding if (packetSize < expectedHeaderSize) return False; unsigned char padding = expectedHeaderSize - nonPaddedSize; ssInfoLength -= 4; while (ssInfoLength > 3) { unsigned short tlvLength = (headerStart[0]<<8)|headerStart[1]; #ifdef DEBUG unsigned short tlvType = (headerStart[2]<<8)|headerStart[3]; #endif ssInfoLength -= 4; if (tlvLength > ssInfoLength) return False; // bad TLV #ifdef DEBUG fprintf(stderr, "\t\tTLV '%c%c', length %d, leaving %d remaining bytes\n", tlvType>>8, tlvType&0xFF, tlvLength, ssInfoLength - tlvLength); for (int i = 0; i < tlvLength; ++i) fprintf(stderr, "%02x:", headerStart[4+i]); fprintf(stderr, "\n"); #endif ssInfoLength -= tlvLength; headerStart += 4 + tlvLength; } if (ssInfoLength > 0) return False; // malformed TLV data headerStart += padding; } fCurrentPacketBeginsFrame = fCurrentPacketCompletesFrame; // whether the *previous* packet ended a frame fCurrentPacketCompletesFrame = packet->rtpMarkerBit(); resultSpecialHeaderSize = expectedHeaderSize; #ifdef DEBUG fprintf(stderr, "Result special header size: %d\n", resultSpecialHeaderSize); #endif return True; } char const* QuickTimeGenericRTPSource::MIMEtype() const { if (fMIMEtypeString == NULL) return MultiFramedRTPSource::MIMEtype(); return fMIMEtypeString; } ////////// QTGenericBufferedPacket and QTGenericBufferedPacketFactory impl QTGenericBufferedPacket ::QTGenericBufferedPacket(QuickTimeGenericRTPSource& ourSource) : fOurSource(ourSource) { } QTGenericBufferedPacket::~QTGenericBufferedPacket() { } unsigned QTGenericBufferedPacket:: nextEnclosedFrameSize(unsigned char*& framePtr, unsigned dataSize) { // We use the entire packet for a frame, unless "PCK" == 2 if (fOurSource.qtState.PCK != 2) return dataSize; if (dataSize < 8) return 0; // sanity check unsigned short sampleLength = (framePtr[2]<<8)|framePtr[3]; // later, extract and use the "timestamp" field ##### framePtr += 8; dataSize -= 8; return sampleLength < dataSize ? sampleLength : dataSize; } BufferedPacket* QTGenericBufferedPacketFactory ::createNewPacket(MultiFramedRTPSource* ourSource) { return new QTGenericBufferedPacket((QuickTimeGenericRTPSource&)(*ourSource)); } live/liveMedia/MPEG1or2Demux.cpp000444 001751 000000 00000062641 12265042432 016612 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // Demultiplexer for a MPEG 1 or 2 Program Stream // Implementation #include "MPEG1or2Demux.hh" #include "MPEG1or2DemuxedElementaryStream.hh" #include "StreamParser.hh" #include ////////// MPEGProgramStreamParser definition ////////// // An enum representing the current state of the parser: enum MPEGParseState { PARSING_PACK_HEADER, PARSING_SYSTEM_HEADER, PARSING_PES_PACKET }; class MPEGProgramStreamParser: public StreamParser { public: MPEGProgramStreamParser(MPEG1or2Demux* usingDemux, FramedSource* inputSource); virtual ~MPEGProgramStreamParser(); public: unsigned char parse(); // returns the stream id of a stream for which a frame was acquired, // or 0 if no such frame was acquired. private: void setParseState(MPEGParseState parseState); void parsePackHeader(); void parseSystemHeader(); unsigned char parsePESPacket(); // returns as does parse() Boolean isSpecialStreamId(unsigned char stream_id) const; // for PES packet header parsing private: MPEG1or2Demux* fUsingDemux; MPEGParseState fCurrentParseState; }; ////////// MPEG1or2Demux::OutputDescriptor::SavedData definition/implementation ////////// class MPEG1or2Demux::OutputDescriptor::SavedData { public: SavedData(unsigned char* buf, unsigned size) : next(NULL), data(buf), dataSize(size), numBytesUsed(0) { } virtual ~SavedData() { delete[] data; delete next; } SavedData* next; unsigned char* data; unsigned dataSize, numBytesUsed; }; ////////// MPEG1or2Demux implementation ////////// MPEG1or2Demux ::MPEG1or2Demux(UsageEnvironment& env, FramedSource* inputSource, Boolean reclaimWhenLastESDies) : Medium(env), fInputSource(inputSource), fMPEGversion(0), fNextAudioStreamNumber(0), fNextVideoStreamNumber(0), fReclaimWhenLastESDies(reclaimWhenLastESDies), fNumOutstandingESs(0), fNumPendingReads(0), fHaveUndeliveredData(False) { fParser = new MPEGProgramStreamParser(this, inputSource); for (unsigned i = 0; i < 256; ++i) { fOutput[i].savedDataHead = fOutput[i].savedDataTail = NULL; fOutput[i].isPotentiallyReadable = False; fOutput[i].isCurrentlyActive = False; fOutput[i].isCurrentlyAwaitingData = False; } } MPEG1or2Demux::~MPEG1or2Demux() { delete fParser; for (unsigned i = 0; i < 256; ++i) delete fOutput[i].savedDataHead; Medium::close(fInputSource); } MPEG1or2Demux* MPEG1or2Demux ::createNew(UsageEnvironment& env, FramedSource* inputSource, Boolean reclaimWhenLastESDies) { // Need to add source type checking here??? ##### return new MPEG1or2Demux(env, inputSource, reclaimWhenLastESDies); } MPEG1or2Demux::SCR::SCR() : highBit(0), remainingBits(0), extension(0), isValid(False) { } void MPEG1or2Demux ::noteElementaryStreamDeletion(MPEG1or2DemuxedElementaryStream* /*es*/) { if (--fNumOutstandingESs == 0 && fReclaimWhenLastESDies) { Medium::close(this); } } void MPEG1or2Demux::flushInput() { fParser->flushInput(); } MPEG1or2DemuxedElementaryStream* MPEG1or2Demux::newElementaryStream(u_int8_t streamIdTag) { ++fNumOutstandingESs; fOutput[streamIdTag].isPotentiallyReadable = True; return new MPEG1or2DemuxedElementaryStream(envir(), streamIdTag, *this); } MPEG1or2DemuxedElementaryStream* MPEG1or2Demux::newAudioStream() { unsigned char newAudioStreamTag = 0xC0 | (fNextAudioStreamNumber++&~0xE0); // MPEG audio stream tags are 110x xxxx (binary) return newElementaryStream(newAudioStreamTag); } MPEG1or2DemuxedElementaryStream* MPEG1or2Demux::newVideoStream() { unsigned char newVideoStreamTag = 0xE0 | (fNextVideoStreamNumber++&~0xF0); // MPEG video stream tags are 1110 xxxx (binary) return newElementaryStream(newVideoStreamTag); } // Appropriate one of the reserved stream id tags to mean: return raw PES packets: #define RAW_PES 0xFC MPEG1or2DemuxedElementaryStream* MPEG1or2Demux::newRawPESStream() { return newElementaryStream(RAW_PES); } void MPEG1or2Demux::registerReadInterest(u_int8_t streamIdTag, unsigned char* to, unsigned maxSize, FramedSource::afterGettingFunc* afterGettingFunc, void* afterGettingClientData, FramedSource::onCloseFunc* onCloseFunc, void* onCloseClientData) { struct OutputDescriptor& out = fOutput[streamIdTag]; // Make sure this stream is not already being read: if (out.isCurrentlyAwaitingData) { envir() << "MPEG1or2Demux::registerReadInterest(): attempt to read stream id " << (void*)streamIdTag << " more than once!\n"; envir().internalError(); } out.to = to; out.maxSize = maxSize; out.fAfterGettingFunc = afterGettingFunc; out.afterGettingClientData = afterGettingClientData; out.fOnCloseFunc = onCloseFunc; out.onCloseClientData = onCloseClientData; out.isCurrentlyActive = True; out.isCurrentlyAwaitingData = True; // out.frameSize and out.presentationTime will be set when a frame's read ++fNumPendingReads; } Boolean MPEG1or2Demux::useSavedData(u_int8_t streamIdTag, unsigned char* to, unsigned maxSize, FramedSource::afterGettingFunc* afterGettingFunc, void* afterGettingClientData) { struct OutputDescriptor& out = fOutput[streamIdTag]; if (out.savedDataHead == NULL) return False; // common case unsigned totNumBytesCopied = 0; while (maxSize > 0 && out.savedDataHead != NULL) { OutputDescriptor::SavedData& savedData = *(out.savedDataHead); unsigned char* from = &savedData.data[savedData.numBytesUsed]; unsigned numBytesToCopy = savedData.dataSize - savedData.numBytesUsed; if (numBytesToCopy > maxSize) numBytesToCopy = maxSize; memmove(to, from, numBytesToCopy); to += numBytesToCopy; maxSize -= numBytesToCopy; out.savedDataTotalSize -= numBytesToCopy; totNumBytesCopied += numBytesToCopy; savedData.numBytesUsed += numBytesToCopy; if (savedData.numBytesUsed == savedData.dataSize) { out.savedDataHead = savedData.next; if (out.savedDataHead == NULL) out.savedDataTail = NULL; savedData.next = NULL; delete &savedData; } } out.isCurrentlyActive = True; if (afterGettingFunc != NULL) { struct timeval presentationTime; presentationTime.tv_sec = 0; presentationTime.tv_usec = 0; // should fix ##### (*afterGettingFunc)(afterGettingClientData, totNumBytesCopied, 0 /* numTruncatedBytes */, presentationTime, 0 /* durationInMicroseconds ?????#####*/); } return True; } void MPEG1or2Demux ::continueReadProcessing(void* clientData, unsigned char* /*ptr*/, unsigned /*size*/, struct timeval /*presentationTime*/) { MPEG1or2Demux* demux = (MPEG1or2Demux*)clientData; demux->continueReadProcessing(); } void MPEG1or2Demux::continueReadProcessing() { while (fNumPendingReads > 0) { unsigned char acquiredStreamIdTag = fParser->parse(); if (acquiredStreamIdTag != 0) { // We were able to acquire a frame from the input. struct OutputDescriptor& newOut = fOutput[acquiredStreamIdTag]; newOut.isCurrentlyAwaitingData = False; // indicates that we can be read again // (This needs to be set before the 'after getting' call below, // in case it tries to read another frame) // Call our own 'after getting' function. Because we're not a 'leaf' // source, we can call this directly, without risking infinite recursion. if (newOut.fAfterGettingFunc != NULL) { (*newOut.fAfterGettingFunc)(newOut.afterGettingClientData, newOut.frameSize, 0 /* numTruncatedBytes */, newOut.presentationTime, 0 /* durationInMicroseconds ?????#####*/); --fNumPendingReads; } } else { // We were unable to parse a complete frame from the input, because: // - we had to read more data from the source stream, or // - we found a frame for a stream that was being read, but whose // reader is not ready to get the frame right now, or // - the source stream has ended. break; } } } void MPEG1or2Demux::getNextFrame(u_int8_t streamIdTag, unsigned char* to, unsigned maxSize, FramedSource::afterGettingFunc* afterGettingFunc, void* afterGettingClientData, FramedSource::onCloseFunc* onCloseFunc, void* onCloseClientData) { // First, check whether we have saved data for this stream id: if (useSavedData(streamIdTag, to, maxSize, afterGettingFunc, afterGettingClientData)) { return; } // Then save the parameters of the specified stream id: registerReadInterest(streamIdTag, to, maxSize, afterGettingFunc, afterGettingClientData, onCloseFunc, onCloseClientData); // Next, if we're the only currently pending read, continue looking for data: if (fNumPendingReads == 1 || fHaveUndeliveredData) { fHaveUndeliveredData = 0; continueReadProcessing(); } // otherwise the continued read processing has already been taken care of } void MPEG1or2Demux::stopGettingFrames(u_int8_t streamIdTag) { struct OutputDescriptor& out = fOutput[streamIdTag]; if (out.isCurrentlyAwaitingData && fNumPendingReads > 0) --fNumPendingReads; out.isCurrentlyActive = out.isCurrentlyAwaitingData = False; } void MPEG1or2Demux::handleClosure(void* clientData) { MPEG1or2Demux* demux = (MPEG1or2Demux*)clientData; demux->fNumPendingReads = 0; // Tell all pending readers that our source has closed. // Note that we need to make a copy of our readers' close functions // (etc.) before we start calling any of them, in case one of them // ends up deleting this. struct { FramedSource::onCloseFunc* fOnCloseFunc; void* onCloseClientData; } savedPending[256]; unsigned i, numPending = 0; for (i = 0; i < 256; ++i) { struct OutputDescriptor& out = demux->fOutput[i]; if (out.isCurrentlyAwaitingData) { if (out.fOnCloseFunc != NULL) { savedPending[numPending].fOnCloseFunc = out.fOnCloseFunc; savedPending[numPending].onCloseClientData = out.onCloseClientData; ++numPending; } } delete out.savedDataHead; out.savedDataHead = out.savedDataTail = NULL; out.savedDataTotalSize = 0; out.isPotentiallyReadable = out.isCurrentlyActive = out.isCurrentlyAwaitingData = False; } for (i = 0; i < numPending; ++i) { (*savedPending[i].fOnCloseFunc)(savedPending[i].onCloseClientData); } } ////////// MPEGProgramStreamParser implementation ////////// #include MPEGProgramStreamParser::MPEGProgramStreamParser(MPEG1or2Demux* usingDemux, FramedSource* inputSource) : StreamParser(inputSource, MPEG1or2Demux::handleClosure, usingDemux, &MPEG1or2Demux::continueReadProcessing, usingDemux), fUsingDemux(usingDemux), fCurrentParseState(PARSING_PACK_HEADER) { } MPEGProgramStreamParser::~MPEGProgramStreamParser() { } void MPEGProgramStreamParser::setParseState(MPEGParseState parseState) { fCurrentParseState = parseState; saveParserState(); } unsigned char MPEGProgramStreamParser::parse() { unsigned char acquiredStreamTagId = 0; try { do { switch (fCurrentParseState) { case PARSING_PACK_HEADER: { parsePackHeader(); break; } case PARSING_SYSTEM_HEADER: { parseSystemHeader(); break; } case PARSING_PES_PACKET: { acquiredStreamTagId = parsePESPacket(); break; } } } while(acquiredStreamTagId == 0); return acquiredStreamTagId; } catch (int /*e*/) { #ifdef DEBUG fprintf(stderr, "MPEGProgramStreamParser::parse() EXCEPTION (This is normal behavior - *not* an error)\n"); fflush(stderr); #endif return 0; // the parsing got interrupted } } #define PACK_START_CODE 0x000001BA #define SYSTEM_HEADER_START_CODE 0x000001BB #define PACKET_START_CODE_PREFIX 0x00000100 static inline Boolean isPacketStartCode(unsigned code) { return (code&0xFFFFFF00) == PACKET_START_CODE_PREFIX && code > SYSTEM_HEADER_START_CODE; } void MPEGProgramStreamParser::parsePackHeader() { #ifdef DEBUG fprintf(stderr, "parsing pack header\n"); fflush(stderr); #endif unsigned first4Bytes; while (1) { first4Bytes = test4Bytes(); // We're supposed to have a pack header here, but check also for // a system header or a PES packet, just in case: if (first4Bytes == PACK_START_CODE) { skipBytes(4); break; } else if (first4Bytes == SYSTEM_HEADER_START_CODE) { #ifdef DEBUG fprintf(stderr, "found system header instead of pack header\n"); #endif setParseState(PARSING_SYSTEM_HEADER); return; } else if (isPacketStartCode(first4Bytes)) { #ifdef DEBUG fprintf(stderr, "found packet start code 0x%02x instead of pack header\n", first4Bytes); #endif setParseState(PARSING_PES_PACKET); return; } setParseState(PARSING_PACK_HEADER); // ensures we progress over bad data if ((first4Bytes&0xFF) > 1) { // a system code definitely doesn't start here skipBytes(4); } else { skipBytes(1); } } // The size of the pack header differs depending on whether it's // MPEG-1 or MPEG-2. The next byte tells us this: unsigned char nextByte = get1Byte(); MPEG1or2Demux::SCR& scr = fUsingDemux->fLastSeenSCR; // alias if ((nextByte&0xF0) == 0x20) { // MPEG-1 fUsingDemux->fMPEGversion = 1; scr.highBit = (nextByte&0x08)>>3; scr.remainingBits = (nextByte&0x06)<<29; unsigned next4Bytes = get4Bytes(); scr.remainingBits |= (next4Bytes&0xFFFE0000)>>2; scr.remainingBits |= (next4Bytes&0x0000FFFE)>>1; scr.extension = 0; scr.isValid = True; skipBits(24); #if defined(DEBUG_TIMESTAMPS) || defined(DEBUG_SCR_TIMESTAMPS) fprintf(stderr, "pack hdr system_clock_reference_base: 0x%x", scr.highBit); fprintf(stderr, "%08x\n", scr.remainingBits); #endif } else if ((nextByte&0xC0) == 0x40) { // MPEG-2 fUsingDemux->fMPEGversion = 2; scr.highBit = (nextByte&0x20)>>5; scr.remainingBits = (nextByte&0x18)<<27; scr.remainingBits |= (nextByte&0x03)<<28; unsigned next4Bytes = get4Bytes(); scr.remainingBits |= (next4Bytes&0xFFF80000)>>4; scr.remainingBits |= (next4Bytes&0x0003FFF8)>>3; scr.extension = (next4Bytes&0x00000003)<<7; next4Bytes = get4Bytes(); scr.extension |= (next4Bytes&0xFE000000)>>25; scr.isValid = True; skipBits(5); #if defined(DEBUG_TIMESTAMPS) || defined(DEBUG_SCR_TIMESTAMPS) fprintf(stderr, "pack hdr system_clock_reference_base: 0x%x", scr.highBit); fprintf(stderr, "%08x\n", scr.remainingBits); fprintf(stderr, "pack hdr system_clock_reference_extension: 0x%03x\n", scr.extension); #endif unsigned char pack_stuffing_length = getBits(3); skipBytes(pack_stuffing_length); } else { // unknown fUsingDemux->envir() << "StreamParser::parsePack() saw strange byte " << (void*)nextByte << " following pack_start_code\n"; } // Check for a System Header next: setParseState(PARSING_SYSTEM_HEADER); } void MPEGProgramStreamParser::parseSystemHeader() { #ifdef DEBUG fprintf(stderr, "parsing system header\n"); fflush(stderr); #endif unsigned next4Bytes = test4Bytes(); if (next4Bytes != SYSTEM_HEADER_START_CODE) { // The system header was optional. Look for a PES Packet instead: setParseState(PARSING_PES_PACKET); return; } #ifdef DEBUG fprintf(stderr, "saw system_header_start_code\n"); fflush(stderr); #endif skipBytes(4); // we've already seen the system_header_start_code unsigned short remaining_header_length = get2Bytes(); // According to the MPEG-1 and MPEG-2 specs, "remaining_header_length" should be // at least 6 bytes. Check this now: if (remaining_header_length < 6) { fUsingDemux->envir() << "StreamParser::parseSystemHeader(): saw strange header_length: " << remaining_header_length << " < 6\n"; } skipBytes(remaining_header_length); // Check for a PES Packet next: setParseState(PARSING_PES_PACKET); } #define private_stream_1 0xBD #define private_stream_2 0xBF // A test for stream ids that are exempt from normal PES packet header parsing Boolean MPEGProgramStreamParser ::isSpecialStreamId(unsigned char stream_id) const { if (stream_id == RAW_PES) return True; // hack if (fUsingDemux->fMPEGversion == 1) { return stream_id == private_stream_2; } else { // assume MPEG-2 if (stream_id <= private_stream_2) { return stream_id != private_stream_1; } else if ((stream_id&0xF0) == 0xF0) { unsigned char lower4Bits = stream_id&0x0F; return lower4Bits <= 2 || lower4Bits == 0x8 || lower4Bits == 0xF; } else { return False; } } } #define READER_NOT_READY 2 unsigned char MPEGProgramStreamParser::parsePESPacket() { #ifdef DEBUG fprintf(stderr, "parsing PES packet\n"); fflush(stderr); #endif unsigned next4Bytes = test4Bytes(); if (!isPacketStartCode(next4Bytes)) { // The PES Packet was optional. Look for a Pack Header instead: setParseState(PARSING_PACK_HEADER); return 0; } #ifdef DEBUG fprintf(stderr, "saw packet_start_code_prefix\n"); fflush(stderr); #endif skipBytes(3); // we've already seen the packet_start_code_prefix unsigned char stream_id = get1Byte(); #if defined(DEBUG) || defined(DEBUG_TIMESTAMPS) unsigned char streamNum = stream_id; char const* streamTypeStr; if ((stream_id&0xE0) == 0xC0) { streamTypeStr = "audio"; streamNum = stream_id&~0xE0; } else if ((stream_id&0xF0) == 0xE0) { streamTypeStr = "video"; streamNum = stream_id&~0xF0; } else if (stream_id == 0xbc) { streamTypeStr = "reserved"; } else if (stream_id == 0xbd) { streamTypeStr = "private_1"; } else if (stream_id == 0xbe) { streamTypeStr = "padding"; } else if (stream_id == 0xbf) { streamTypeStr = "private_2"; } else { streamTypeStr = "unknown"; } #endif #ifdef DEBUG static unsigned frameCount = 1; fprintf(stderr, "%d, saw %s stream: 0x%02x\n", frameCount, streamTypeStr, streamNum); fflush(stderr); #endif unsigned short PES_packet_length = get2Bytes(); #ifdef DEBUG fprintf(stderr, "PES_packet_length: %d\n", PES_packet_length); fflush(stderr); #endif // Parse over the rest of the header, until we get to the packet data itself. // This varies depending upon the MPEG version: if (fUsingDemux->fOutput[RAW_PES].isPotentiallyReadable) { // Hack: We've been asked to return raw PES packets, for every stream: stream_id = RAW_PES; } unsigned savedParserOffset = curOffset(); #ifdef DEBUG_TIMESTAMPS unsigned char pts_highBit = 0; unsigned pts_remainingBits = 0; unsigned char dts_highBit = 0; unsigned dts_remainingBits = 0; #endif if (fUsingDemux->fMPEGversion == 1) { if (!isSpecialStreamId(stream_id)) { unsigned char nextByte; while ((nextByte = get1Byte()) == 0xFF) { // stuffing_byte } if ((nextByte&0xC0) == 0x40) { // '01' skipBytes(1); nextByte = get1Byte(); } if ((nextByte&0xF0) == 0x20) { // '0010' #ifdef DEBUG_TIMESTAMPS pts_highBit = (nextByte&0x08)>>3; pts_remainingBits = (nextByte&0x06)<<29; unsigned next4Bytes = get4Bytes(); pts_remainingBits |= (next4Bytes&0xFFFE0000)>>2; pts_remainingBits |= (next4Bytes&0x0000FFFE)>>1; #else skipBytes(4); #endif } else if ((nextByte&0xF0) == 0x30) { // '0011' #ifdef DEBUG_TIMESTAMPS pts_highBit = (nextByte&0x08)>>3; pts_remainingBits = (nextByte&0x06)<<29; unsigned next4Bytes = get4Bytes(); pts_remainingBits |= (next4Bytes&0xFFFE0000)>>2; pts_remainingBits |= (next4Bytes&0x0000FFFE)>>1; nextByte = get1Byte(); dts_highBit = (nextByte&0x08)>>3; dts_remainingBits = (nextByte&0x06)<<29; next4Bytes = get4Bytes(); dts_remainingBits |= (next4Bytes&0xFFFE0000)>>2; dts_remainingBits |= (next4Bytes&0x0000FFFE)>>1; #else skipBytes(9); #endif } } } else { // assume MPEG-2 if (!isSpecialStreamId(stream_id)) { // Fields in the next 3 bytes determine the size of the rest: unsigned next3Bytes = getBits(24); #ifdef DEBUG_TIMESTAMPS unsigned char PTS_DTS_flags = (next3Bytes&0x00C000)>>14; #endif #ifdef undef unsigned char ESCR_flag = (next3Bytes&0x002000)>>13; unsigned char ES_rate_flag = (next3Bytes&0x001000)>>12; unsigned char DSM_trick_mode_flag = (next3Bytes&0x000800)>>11; #endif unsigned char PES_header_data_length = (next3Bytes&0x0000FF); #ifdef DEBUG fprintf(stderr, "PES_header_data_length: 0x%02x\n", PES_header_data_length); fflush(stderr); #endif #ifdef DEBUG_TIMESTAMPS if (PTS_DTS_flags == 0x2 && PES_header_data_length >= 5) { unsigned char nextByte = get1Byte(); pts_highBit = (nextByte&0x08)>>3; pts_remainingBits = (nextByte&0x06)<<29; unsigned next4Bytes = get4Bytes(); pts_remainingBits |= (next4Bytes&0xFFFE0000)>>2; pts_remainingBits |= (next4Bytes&0x0000FFFE)>>1; skipBytes(PES_header_data_length-5); } else if (PTS_DTS_flags == 0x3 && PES_header_data_length >= 10) { unsigned char nextByte = get1Byte(); pts_highBit = (nextByte&0x08)>>3; pts_remainingBits = (nextByte&0x06)<<29; unsigned next4Bytes = get4Bytes(); pts_remainingBits |= (next4Bytes&0xFFFE0000)>>2; pts_remainingBits |= (next4Bytes&0x0000FFFE)>>1; nextByte = get1Byte(); dts_highBit = (nextByte&0x08)>>3; dts_remainingBits = (nextByte&0x06)<<29; next4Bytes = get4Bytes(); dts_remainingBits |= (next4Bytes&0xFFFE0000)>>2; dts_remainingBits |= (next4Bytes&0x0000FFFE)>>1; skipBytes(PES_header_data_length-10); } #else skipBytes(PES_header_data_length); #endif } } #ifdef DEBUG_TIMESTAMPS fprintf(stderr, "%s stream, ", streamTypeStr); fprintf(stderr, "packet presentation_time_stamp: 0x%x", pts_highBit); fprintf(stderr, "%08x\n", pts_remainingBits); fprintf(stderr, "\t\tpacket decoding_time_stamp: 0x%x", dts_highBit); fprintf(stderr, "%08x\n", dts_remainingBits); #endif // The rest of the packet will be the "PES_packet_data_byte"s // Make sure that "PES_packet_length" was consistent with where we are now: unsigned char acquiredStreamIdTag = 0; unsigned currentParserOffset = curOffset(); unsigned bytesSkipped = currentParserOffset - savedParserOffset; if (stream_id == RAW_PES) { restoreSavedParserState(); // so we deliver from the beginning of the PES packet PES_packet_length += 6; // to include the whole of the PES packet bytesSkipped = 0; } if (PES_packet_length < bytesSkipped) { fUsingDemux->envir() << "StreamParser::parsePESPacket(): saw inconsistent PES_packet_length " << PES_packet_length << " < " << bytesSkipped << "\n"; } else { PES_packet_length -= bytesSkipped; #ifdef DEBUG unsigned next4Bytes = test4Bytes(); #endif // Check whether our using source is interested in this stream type. // If so, deliver the frame to him: MPEG1or2Demux::OutputDescriptor_t& out = fUsingDemux->fOutput[stream_id]; if (out.isCurrentlyAwaitingData) { unsigned numBytesToCopy; if (PES_packet_length > out.maxSize) { fUsingDemux->envir() << "MPEGProgramStreamParser::parsePESPacket() error: PES_packet_length (" << PES_packet_length << ") exceeds max frame size asked for (" << out.maxSize << ")\n"; numBytesToCopy = out.maxSize; } else { numBytesToCopy = PES_packet_length; } getBytes(out.to, numBytesToCopy); out.frameSize = numBytesToCopy; #ifdef DEBUG fprintf(stderr, "%d, %d bytes of PES_packet_data (out.maxSize: %d); first 4 bytes: 0x%08x\n", frameCount, numBytesToCopy, out.maxSize, next4Bytes); fflush(stderr); #endif // set out.presentationTime later ##### acquiredStreamIdTag = stream_id; PES_packet_length -= numBytesToCopy; } else if (out.isCurrentlyActive) { // Someone has been reading this stream, but isn't right now. // We can't deliver this frame until he asks for it, so punt for now. // The next time he asks for a frame, he'll get it. #ifdef DEBUG fprintf(stderr, "%d, currently undeliverable PES data; first 4 bytes: 0x%08x - currently undeliverable!\n", frameCount, next4Bytes); fflush(stderr); #endif restoreSavedParserState(); // so we read from the beginning next time fUsingDemux->fHaveUndeliveredData = True; throw READER_NOT_READY; } else if (out.isPotentiallyReadable && out.savedDataTotalSize + PES_packet_length < 1000000 /*limit*/) { // Someone is interested in this stream, but hasn't begun reading it yet. // Save this data, so that the reader will get it when he later asks for it. unsigned char* buf = new unsigned char[PES_packet_length]; getBytes(buf, PES_packet_length); MPEG1or2Demux::OutputDescriptor::SavedData* savedData = new MPEG1or2Demux::OutputDescriptor::SavedData(buf, PES_packet_length); if (out.savedDataHead == NULL) { out.savedDataHead = out.savedDataTail = savedData; } else { out.savedDataTail->next = savedData; out.savedDataTail = savedData; } out.savedDataTotalSize += PES_packet_length; PES_packet_length = 0; } skipBytes(PES_packet_length); } // Check for another PES Packet next: setParseState(PARSING_PES_PACKET); #ifdef DEBUG ++frameCount; #endif return acquiredStreamIdTag; } live/liveMedia/RTPSink.cpp000444 001751 000000 00000027140 12265042432 015640 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // RTP Sinks // Implementation #include "RTPSink.hh" #include "GroupsockHelper.hh" ////////// RTPSink ////////// Boolean RTPSink::lookupByName(UsageEnvironment& env, char const* sinkName, RTPSink*& resultSink) { resultSink = NULL; // unless we succeed MediaSink* sink; if (!MediaSink::lookupByName(env, sinkName, sink)) return False; if (!sink->isRTPSink()) { env.setResultMsg(sinkName, " is not a RTP sink"); return False; } resultSink = (RTPSink*)sink; return True; } Boolean RTPSink::isRTPSink() const { return True; } RTPSink::RTPSink(UsageEnvironment& env, Groupsock* rtpGS, unsigned char rtpPayloadType, unsigned rtpTimestampFrequency, char const* rtpPayloadFormatName, unsigned numChannels) : MediaSink(env), fRTPInterface(this, rtpGS), fRTPPayloadType(rtpPayloadType), fPacketCount(0), fOctetCount(0), fTotalOctetCount(0), fTimestampFrequency(rtpTimestampFrequency), fNextTimestampHasBeenPreset(False), fEnableRTCPReports(True), fNumChannels(numChannels) { fRTPPayloadFormatName = strDup(rtpPayloadFormatName == NULL ? "???" : rtpPayloadFormatName); gettimeofday(&fCreationTime, NULL); fTotalOctetCountStartTime = fCreationTime; resetPresentationTimes(); fSeqNo = (u_int16_t)our_random(); fSSRC = our_random32(); fTimestampBase = our_random32(); fTransmissionStatsDB = new RTPTransmissionStatsDB(*this); } RTPSink::~RTPSink() { delete fTransmissionStatsDB; delete[] (char*)fRTPPayloadFormatName; } u_int32_t RTPSink::convertToRTPTimestamp(struct timeval tv) { // Begin by converting from "struct timeval" units to RTP timestamp units: u_int32_t timestampIncrement = (fTimestampFrequency*tv.tv_sec); timestampIncrement += (u_int32_t)(fTimestampFrequency*(tv.tv_usec/1000000.0) + 0.5); // note: rounding // Then add this to our 'timestamp base': if (fNextTimestampHasBeenPreset) { // Make the returned timestamp the same as the current "fTimestampBase", // so that timestamps begin with the value that was previously preset: fTimestampBase -= timestampIncrement; fNextTimestampHasBeenPreset = False; } u_int32_t const rtpTimestamp = fTimestampBase + timestampIncrement; #ifdef DEBUG_TIMESTAMPS fprintf(stderr, "fTimestampBase: 0x%08x, tv: %lu.%06ld\n\t=> RTP timestamp: 0x%08x\n", fTimestampBase, tv.tv_sec, tv.tv_usec, rtpTimestamp); fflush(stderr); #endif return rtpTimestamp; } u_int32_t RTPSink::presetNextTimestamp() { struct timeval timeNow; gettimeofday(&timeNow, NULL); u_int32_t tsNow = convertToRTPTimestamp(timeNow); fTimestampBase = tsNow; fNextTimestampHasBeenPreset = True; return tsNow; } void RTPSink::getTotalBitrate(unsigned& outNumBytes, double& outElapsedTime) { struct timeval timeNow; gettimeofday(&timeNow, NULL); outNumBytes = fTotalOctetCount; outElapsedTime = (double)(timeNow.tv_sec-fTotalOctetCountStartTime.tv_sec) + (timeNow.tv_usec-fTotalOctetCountStartTime.tv_usec)/1000000.0; fTotalOctetCount = 0; fTotalOctetCountStartTime = timeNow; } void RTPSink::resetPresentationTimes() { fInitialPresentationTime.tv_sec = fMostRecentPresentationTime.tv_sec = 0; fInitialPresentationTime.tv_usec = fMostRecentPresentationTime.tv_usec = 0; } char const* RTPSink::sdpMediaType() const { return "data"; // default SDP media (m=) type, unless redefined by subclasses } char* RTPSink::rtpmapLine() const { if (rtpPayloadType() >= 96) { // the payload format type is dynamic char* encodingParamsPart; if (numChannels() != 1) { encodingParamsPart = new char[1 + 20 /* max int len */]; sprintf(encodingParamsPart, "/%d", numChannels()); } else { encodingParamsPart = strDup(""); } char const* const rtpmapFmt = "a=rtpmap:%d %s/%d%s\r\n"; unsigned rtpmapFmtSize = strlen(rtpmapFmt) + 3 /* max char len */ + strlen(rtpPayloadFormatName()) + 20 /* max int len */ + strlen(encodingParamsPart); char* rtpmapLine = new char[rtpmapFmtSize]; sprintf(rtpmapLine, rtpmapFmt, rtpPayloadType(), rtpPayloadFormatName(), rtpTimestampFrequency(), encodingParamsPart); delete[] encodingParamsPart; return rtpmapLine; } else { // The payload format is staic, so there's no "a=rtpmap:" line: return strDup(""); } } char const* RTPSink::auxSDPLine() { return NULL; // by default } ////////// RTPTransmissionStatsDB ////////// RTPTransmissionStatsDB::RTPTransmissionStatsDB(RTPSink& rtpSink) : fOurRTPSink(rtpSink), fTable(HashTable::create(ONE_WORD_HASH_KEYS)) { fNumReceivers=0; } RTPTransmissionStatsDB::~RTPTransmissionStatsDB() { // First, remove and delete all stats records from the table: RTPTransmissionStats* stats; while ((stats = (RTPTransmissionStats*)fTable->RemoveNext()) != NULL) { delete stats; } // Then, delete the table itself: delete fTable; } void RTPTransmissionStatsDB ::noteIncomingRR(u_int32_t SSRC, struct sockaddr_in const& lastFromAddress, unsigned lossStats, unsigned lastPacketNumReceived, unsigned jitter, unsigned lastSRTime, unsigned diffSR_RRTime) { RTPTransmissionStats* stats = lookup(SSRC); if (stats == NULL) { // This is the first time we've heard of this SSRC. // Create a new record for it: stats = new RTPTransmissionStats(fOurRTPSink, SSRC); if (stats == NULL) return; add(SSRC, stats); #ifdef DEBUG_RR fprintf(stderr, "Adding new entry for SSRC %x in RTPTransmissionStatsDB\n", SSRC); #endif } stats->noteIncomingRR(lastFromAddress, lossStats, lastPacketNumReceived, jitter, lastSRTime, diffSR_RRTime); } void RTPTransmissionStatsDB::removeRecord(u_int32_t SSRC) { RTPTransmissionStats* stats = lookup(SSRC); if (stats != NULL) { long SSRC_long = (long)SSRC; fTable->Remove((char const*)SSRC_long); --fNumReceivers; delete stats; } } RTPTransmissionStatsDB::Iterator ::Iterator(RTPTransmissionStatsDB& receptionStatsDB) : fIter(HashTable::Iterator::create(*(receptionStatsDB.fTable))) { } RTPTransmissionStatsDB::Iterator::~Iterator() { delete fIter; } RTPTransmissionStats* RTPTransmissionStatsDB::Iterator::next() { char const* key; // dummy return (RTPTransmissionStats*)(fIter->next(key)); } RTPTransmissionStats* RTPTransmissionStatsDB::lookup(u_int32_t SSRC) const { long SSRC_long = (long)SSRC; return (RTPTransmissionStats*)(fTable->Lookup((char const*)SSRC_long)); } void RTPTransmissionStatsDB::add(u_int32_t SSRC, RTPTransmissionStats* stats) { long SSRC_long = (long)SSRC; fTable->Add((char const*)SSRC_long, stats); ++fNumReceivers; } ////////// RTPTransmissionStats ////////// RTPTransmissionStats::RTPTransmissionStats(RTPSink& rtpSink, u_int32_t SSRC) : fOurRTPSink(rtpSink), fSSRC(SSRC), fLastPacketNumReceived(0), fPacketLossRatio(0), fTotNumPacketsLost(0), fJitter(0), fLastSRTime(0), fDiffSR_RRTime(0), fAtLeastTwoRRsHaveBeenReceived(False), fFirstPacket(True), fTotalOctetCount_hi(0), fTotalOctetCount_lo(0), fTotalPacketCount_hi(0), fTotalPacketCount_lo(0) { gettimeofday(&fTimeCreated, NULL); fLastOctetCount = rtpSink.octetCount(); fLastPacketCount = rtpSink.packetCount(); } RTPTransmissionStats::~RTPTransmissionStats() {} void RTPTransmissionStats ::noteIncomingRR(struct sockaddr_in const& lastFromAddress, unsigned lossStats, unsigned lastPacketNumReceived, unsigned jitter, unsigned lastSRTime, unsigned diffSR_RRTime) { if (fFirstPacket) { fFirstPacket = False; fFirstPacketNumReported = lastPacketNumReceived; } else { fAtLeastTwoRRsHaveBeenReceived = True; fOldLastPacketNumReceived = fLastPacketNumReceived; fOldTotNumPacketsLost = fTotNumPacketsLost; } gettimeofday(&fTimeReceived, NULL); fLastFromAddress = lastFromAddress; fPacketLossRatio = lossStats>>24; fTotNumPacketsLost = lossStats&0xFFFFFF; fLastPacketNumReceived = lastPacketNumReceived; fJitter = jitter; fLastSRTime = lastSRTime; fDiffSR_RRTime = diffSR_RRTime; #ifdef DEBUG_RR fprintf(stderr, "RTCP RR data (received at %lu.%06ld): lossStats 0x%08x, lastPacketNumReceived 0x%08x, jitter 0x%08x, lastSRTime 0x%08x, diffSR_RRTime 0x%08x\n", fTimeReceived.tv_sec, fTimeReceived.tv_usec, lossStats, lastPacketNumReceived, jitter, lastSRTime, diffSR_RRTime); unsigned rtd = roundTripDelay(); fprintf(stderr, "=> round-trip delay: 0x%04x (== %f seconds)\n", rtd, rtd/65536.0); #endif // Update our counts of the total number of octets and packets sent towards // this receiver: u_int32_t newOctetCount = fOurRTPSink.octetCount(); u_int32_t octetCountDiff = newOctetCount - fLastOctetCount; fLastOctetCount = newOctetCount; u_int32_t prevTotalOctetCount_lo = fTotalOctetCount_lo; fTotalOctetCount_lo += octetCountDiff; if (fTotalOctetCount_lo < prevTotalOctetCount_lo) { // wrap around ++fTotalOctetCount_hi; } u_int32_t newPacketCount = fOurRTPSink.packetCount(); u_int32_t packetCountDiff = newPacketCount - fLastPacketCount; fLastPacketCount = newPacketCount; u_int32_t prevTotalPacketCount_lo = fTotalPacketCount_lo; fTotalPacketCount_lo += packetCountDiff; if (fTotalPacketCount_lo < prevTotalPacketCount_lo) { // wrap around ++fTotalPacketCount_hi; } } unsigned RTPTransmissionStats::roundTripDelay() const { // Compute the round-trip delay that was indicated by the most recently-received // RTCP RR packet. Use the method noted in the RTP/RTCP specification (RFC 3350). if (fLastSRTime == 0) { // Either no RTCP RR packet has been received yet, or else the // reporting receiver has not yet received any RTCP SR packets from us: return 0; } // First, convert the time that we received the last RTCP RR packet to NTP format, // in units of 1/65536 (2^-16) seconds: unsigned lastReceivedTimeNTP_high = fTimeReceived.tv_sec + 0x83AA7E80; // 1970 epoch -> 1900 epoch double fractionalPart = (fTimeReceived.tv_usec*0x0400)/15625.0; // 2^16/10^6 unsigned lastReceivedTimeNTP = (unsigned)((lastReceivedTimeNTP_high<<16) + fractionalPart + 0.5); int rawResult = lastReceivedTimeNTP - fLastSRTime - fDiffSR_RRTime; if (rawResult < 0) { // This can happen if there's clock drift between the sender and receiver, // and if the round-trip time was very small. rawResult = 0; } return (unsigned)rawResult; } void RTPTransmissionStats::getTotalOctetCount(u_int32_t& hi, u_int32_t& lo) { hi = fTotalOctetCount_hi; lo = fTotalOctetCount_lo; } void RTPTransmissionStats::getTotalPacketCount(u_int32_t& hi, u_int32_t& lo) { hi = fTotalPacketCount_hi; lo = fTotalPacketCount_lo; } unsigned RTPTransmissionStats::packetsReceivedSinceLastRR() const { if (!fAtLeastTwoRRsHaveBeenReceived) return 0; return fLastPacketNumReceived-fOldLastPacketNumReceived; } int RTPTransmissionStats::packetsLostBetweenRR() const { if (!fAtLeastTwoRRsHaveBeenReceived) return 0; return fTotNumPacketsLost - fOldTotNumPacketsLost; } live/liveMedia/MP3Internals.cpp000444 001751 000000 00000063415 12265042432 016632 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // MP3 internal implementation details // Implementation #include "MP3InternalsHuffman.hh" #include #include #include #include // This is crufty old code that needs to be cleaned up ##### static unsigned const live_tabsel[2][3][16] = { { {32,32,64,96,128,160,192,224,256,288,320,352,384,416,448,448}, {32,32,48,56, 64, 80, 96,112,128,160,192,224,256,320,384,384}, {32,32,40,48, 56, 64, 80, 96,112,128,160,192,224,256,320,320} }, { {32,32,48,56,64,80,96,112,128,144,160,176,192,224,256,256}, {8,8,16,24,32,40,48,56,64,80,96,112,128,144,160,160}, {8,8,16,24,32,40,48,56,64,80,96,112,128,144,160,160} } }; /* Note: live_tabsel[*][*][0 or 15] shouldn't occur; use dummy values there */ static long const live_freqs[] = { 44100, 48000, 32000, 22050, 24000, 16000, 11025, 12000, 8000, 0 }; struct bandInfoStruct { int longIdx[23]; int longDiff[22]; int shortIdx[14]; int shortDiff[13]; }; static struct bandInfoStruct const bandInfo[7] = { /* MPEG 1.0 */ { {0,4,8,12,16,20,24,30,36,44,52,62,74, 90,110,134,162,196,238,288,342,418,576}, {4,4,4,4,4,4,6,6,8, 8,10,12,16,20,24,28,34,42,50,54, 76,158}, {0,4*3,8*3,12*3,16*3,22*3,30*3,40*3,52*3,66*3, 84*3,106*3,136*3,192*3}, {4,4,4,4,6,8,10,12,14,18,22,30,56} } , { {0,4,8,12,16,20,24,30,36,42,50,60,72, 88,106,128,156,190,230,276,330,384,576}, {4,4,4,4,4,4,6,6,6, 8,10,12,16,18,22,28,34,40,46,54, 54,192}, {0,4*3,8*3,12*3,16*3,22*3,28*3,38*3,50*3,64*3, 80*3,100*3,126*3,192*3}, {4,4,4,4,6,6,10,12,14,16,20,26,66} } , { {0,4,8,12,16,20,24,30,36,44,54,66,82,102,126,156,194,240,296,364,448,550,576} , {4,4,4,4,4,4,6,6,8,10,12,16,20,24,30,38,46,56,68,84,102, 26} , {0,4*3,8*3,12*3,16*3,22*3,30*3,42*3,58*3,78*3,104*3,138*3,180*3,192*3} , {4,4,4,4,6,8,12,16,20,26,34,42,12} } , /* MPEG 2.0 */ { {0,6,12,18,24,30,36,44,54,66,80,96,116,140,168,200,238,284,336,396,464,522,576}, {6,6,6,6,6,6,8,10,12,14,16,20,24,28,32,38,46,52,60,68,58,54 } , {0,4*3,8*3,12*3,18*3,24*3,32*3,42*3,56*3,74*3,100*3,132*3,174*3,192*3} , {4,4,4,6,6,8,10,14,18,26,32,42,18 } } , { {0,6,12,18,24,30,36,44,54,66,80,96,114,136,162,194,232,278,330,394,464,540,576}, {6,6,6,6,6,6,8,10,12,14,16,18,22,26,32,38,46,52,64,70,76,36 } , {0,4*3,8*3,12*3,18*3,26*3,36*3,48*3,62*3,80*3,104*3,136*3,180*3,192*3} , {4,4,4,6,8,10,12,14,18,24,32,44,12 } } , { {0,6,12,18,24,30,36,44,54,66,80,96,116,140,168,200,238,284,336,396,464,522,576}, {6,6,6,6,6,6,8,10,12,14,16,20,24,28,32,38,46,52,60,68,58,54 }, {0,4*3,8*3,12*3,18*3,26*3,36*3,48*3,62*3,80*3,104*3,134*3,174*3,192*3}, {4,4,4,6,8,10,12,14,18,24,30,40,18 } } , /* MPEG 2.5, wrong! table (it's just a copy of MPEG 2.0/44.1kHz) */ { {0,6,12,18,24,30,36,44,54,66,80,96,116,140,168,200,238,284,336,396,464,522,576}, {6,6,6,6,6,6,8,10,12,14,16,20,24,28,32,38,46,52,60,68,58,54 } , {0,4*3,8*3,12*3,18*3,24*3,32*3,42*3,56*3,74*3,100*3,132*3,174*3,192*3} , {4,4,4,6,6,8,10,14,18,26,32,42,18 } } , }; unsigned int n_slen2[512]; /* MPEG 2.0 slen for 'normal' mode */ unsigned int i_slen2[256]; /* MPEG 2.0 slen for intensity stereo */ #define MPG_MD_MONO 3 ////////// MP3FrameParams ////////// MP3FrameParams::MP3FrameParams() : bv(frameBytes, 0, sizeof frameBytes) /* by default */ { oldHdr = firstHdr = 0; static Boolean doneInit = False; if (doneInit) return; int i,j,k,l; for (i=0;i<5;i++) { for (j=0;j<6;j++) { for (k=0;k<6;k++) { int n = k + j * 6 + i * 36; i_slen2[n] = i|(j<<3)|(k<<6)|(3<<12); } } } for (i=0;i<4;i++) { for (j=0;j<4;j++) { for (k=0;k<4;k++) { int n = k + j * 4 + i * 16; i_slen2[n+180] = i|(j<<3)|(k<<6)|(4<<12); } } } for (i=0;i<4;i++) { for (j=0;j<3;j++) { int n = j + i * 3; i_slen2[n+244] = i|(j<<3) | (5<<12); n_slen2[n+500] = i|(j<<3) | (2<<12) | (1<<15); } } for (i=0;i<5;i++) { for (j=0;j<5;j++) { for (k=0;k<4;k++) { for (l=0;l<4;l++) { int n = l + k * 4 + j * 16 + i * 80; n_slen2[n] = i|(j<<3)|(k<<6)|(l<<9)|(0<<12); } } } } for (i=0;i<5;i++) { for (j=0;j<5;j++) { for (k=0;k<4;k++) { int n = k + j * 4 + i * 20; n_slen2[n+400] = i|(j<<3)|(k<<6)|(1<<12); } } } doneInit = True; } MP3FrameParams::~MP3FrameParams() { } void MP3FrameParams::setParamsFromHeader() { if (hdr & (1<<20)) { isMPEG2 = (hdr & (1<<19)) ? 0x0 : 0x1; isMPEG2_5 = 0; } else { isMPEG2 = 1; isMPEG2_5 = 1; } layer = 4-((hdr>>17)&3); if (layer == 4) layer = 3; // layer==4 is not allowed bitrateIndex = ((hdr>>12)&0xf); if (isMPEG2_5) { samplingFreqIndex = ((hdr>>10)&0x3) + 6; } else { samplingFreqIndex = ((hdr>>10)&0x3) + (isMPEG2*3); } hasCRC = (hdr & 0x10000) == 0; padding = ((hdr>>9)&0x1); extension = ((hdr>>8)&0x1); mode = ((hdr>>6)&0x3); mode_ext = ((hdr>>4)&0x3); copyright = ((hdr>>3)&0x1); original = ((hdr>>2)&0x1); emphasis = hdr & 0x3; stereo = (mode == MPG_MD_MONO) ? 1 : 2; if (((hdr>>10)&0x3) == 0x3) { #ifdef DEBUG_ERRORS fprintf(stderr,"Stream error - hdr: 0x%08x\n", hdr); #endif } bitrate = live_tabsel[isMPEG2][layer-1][bitrateIndex]; samplingFreq = live_freqs[samplingFreqIndex]; isStereo = (stereo > 1); isFreeFormat = (bitrateIndex == 0); frameSize = ComputeFrameSize(bitrate, samplingFreq, padding, isMPEG2, layer); sideInfoSize = computeSideInfoSize(); } unsigned MP3FrameParams::computeSideInfoSize() { unsigned size; if (isMPEG2) { size = isStereo ? 17 : 9; } else { size = isStereo ? 32 : 17; } if (hasCRC) { size += 2; } return size; } unsigned ComputeFrameSize(unsigned bitrate, unsigned samplingFreq, Boolean usePadding, Boolean isMPEG2, unsigned char layer) { if (samplingFreq == 0) return 0; unsigned const bitrateMultiplier = (layer == 1) ? 12000*4 : 144000; unsigned framesize; framesize = bitrate*bitrateMultiplier; framesize /= samplingFreq<<(isMPEG2 ? 1 : 0); framesize = framesize + usePadding - 4; return framesize; } #define TRUNC_FAIRLY static unsigned updateSideInfoSizes(MP3SideInfo& sideInfo, Boolean isMPEG2, unsigned char const* mainDataPtr, unsigned allowedNumBits, unsigned& part23Length0a, unsigned& part23Length0aTruncation, unsigned& part23Length0b, unsigned& part23Length0bTruncation, unsigned& part23Length1a, unsigned& part23Length1aTruncation, unsigned& part23Length1b, unsigned& part23Length1bTruncation) { unsigned p23L0, p23L1 = 0, p23L0Trunc = 0, p23L1Trunc = 0; p23L0 = sideInfo.ch[0].gr[0].part2_3_length; p23L1 = isMPEG2 ? 0 : sideInfo.ch[0].gr[1].part2_3_length; #ifdef TRUNC_ONLY0 if (p23L0 < allowedNumBits) allowedNumBits = p23L0; #endif #ifdef TRUNC_ONLY1 if (p23L1 < allowedNumBits) allowedNumBits = p23L1; #endif if (p23L0 + p23L1 > allowedNumBits) { /* We need to shorten one or both fields */ unsigned truncation = p23L0 + p23L1 - allowedNumBits; #ifdef TRUNC_FAIRLY p23L0Trunc = (truncation*p23L0)/(p23L0 + p23L1); p23L1Trunc = truncation - p23L0Trunc; #endif #if defined(TRUNC_FAVOR0) || defined(TRUNC_ONLY0) p23L1Trunc = (truncation>p23L1) ? p23L1 : truncation; p23L0Trunc = truncation - p23L1Trunc; #endif #if defined(TRUNC_FAVOR1) || defined(TRUNC_ONLY1) p23L0Trunc = (truncation>p23L0) ? p23L0 : truncation; p23L1Trunc = truncation - p23L0Trunc; #endif } /* ASSERT: (p23L0Trunc <= p23L0) && (p23l1Trunc <= p23L1) */ p23L0 -= p23L0Trunc; p23L1 -= p23L1Trunc; #ifdef DEBUG fprintf(stderr, "updateSideInfoSizes (allowed: %d): %d->%d, %d->%d\n", allowedNumBits, p23L0+p23L0Trunc, p23L0, p23L1+p23L1Trunc, p23L1); #endif // The truncations computed above are still estimates. We need to // adjust them so that the new fields will continue to end on // Huffman-encoded sample boundaries: updateSideInfoForHuffman(sideInfo, isMPEG2, mainDataPtr, p23L0, p23L1, part23Length0a, part23Length0aTruncation, part23Length0b, part23Length0bTruncation, part23Length1a, part23Length1aTruncation, part23Length1b, part23Length1bTruncation); p23L0 = part23Length0a + part23Length0b; p23L1 = part23Length1a + part23Length1b; sideInfo.ch[0].gr[0].part2_3_length = p23L0; sideInfo.ch[0].gr[1].part2_3_length = p23L1; part23Length0bTruncation += sideInfo.ch[1].gr[0].part2_3_length; /* allow for stereo */ sideInfo.ch[1].gr[0].part2_3_length = 0; /* output mono */ sideInfo.ch[1].gr[1].part2_3_length = 0; /* output mono */ return p23L0 + p23L1; } Boolean GetADUInfoFromMP3Frame(unsigned char const* framePtr, unsigned totFrameSize, unsigned& hdr, unsigned& frameSize, MP3SideInfo& sideInfo, unsigned& sideInfoSize, unsigned& backpointer, unsigned& aduSize) { if (totFrameSize < 4) return False; // there's not enough data MP3FrameParams fr; fr.hdr = ((unsigned)framePtr[0] << 24) | ((unsigned)framePtr[1] << 16) | ((unsigned)framePtr[2] << 8) | (unsigned)framePtr[3]; fr.setParamsFromHeader(); fr.setBytePointer(framePtr + 4, totFrameSize - 4); // skip hdr frameSize = 4 + fr.frameSize; if (fr.layer != 3) { // Special case for non-layer III frames backpointer = 0; sideInfoSize = 0; aduSize = fr.frameSize; return True; } sideInfoSize = fr.sideInfoSize; if (totFrameSize < 4 + sideInfoSize) return False; // not enough data fr.getSideInfo(sideInfo); hdr = fr.hdr; backpointer = sideInfo.main_data_begin; unsigned numBits = sideInfo.ch[0].gr[0].part2_3_length; numBits += sideInfo.ch[0].gr[1].part2_3_length; numBits += sideInfo.ch[1].gr[0].part2_3_length; numBits += sideInfo.ch[1].gr[1].part2_3_length; aduSize = (numBits+7)/8; #ifdef DEBUG fprintf(stderr, "mp3GetADUInfoFromFrame: hdr: %08x, frameSize: %d, part2_3_lengths: %d,%d,%d,%d, aduSize: %d, backpointer: %d\n", hdr, frameSize, sideInfo.ch[0].gr[0].part2_3_length, sideInfo.ch[0].gr[1].part2_3_length, sideInfo.ch[1].gr[0].part2_3_length, sideInfo.ch[1].gr[1].part2_3_length, aduSize, backpointer); #endif return True; } static void getSideInfo1(MP3FrameParams& fr, MP3SideInfo& si, int stereo, int ms_stereo, long sfreq, int /*single*/) { int ch, gr; #if 0 int powdiff = (single == 3) ? 4 : 0; #endif /* initialize all four "part2_3_length" fields to zero: */ si.ch[0].gr[0].part2_3_length = 0; si.ch[1].gr[0].part2_3_length = 0; si.ch[0].gr[1].part2_3_length = 0; si.ch[1].gr[1].part2_3_length = 0; si.main_data_begin = fr.getBits(9); if (stereo == 1) si.private_bits = fr.getBits(5); else si.private_bits = fr.getBits(3); for (ch=0; ch win-sw-flag = 0 */ gr_info.window_switching_flag = fr.get1Bit(); if (gr_info.window_switching_flag) { int i; gr_info.block_type = fr.getBits(2); gr_info.mixed_block_flag = fr.get1Bit(); gr_info.table_select[0] = fr.getBits(5); gr_info.table_select[1] = fr.getBits(5); /* * table_select[2] not needed, because there is no region2, * but to satisfy some verifications tools we set it either. */ gr_info.table_select[2] = 0; for (i=0;i<3;i++) { gr_info.subblock_gain[i] = fr.getBits(3); gr_info.full_gain[i] = gr_info.pow2gain + ((gr_info.subblock_gain[i])<<3); } #ifdef DEBUG_ERRORS if (gr_info.block_type == 0) { fprintf(stderr,"Blocktype == 0 and window-switching == 1 not allowed.\n"); } #endif /* region_count/start parameters are implicit in this case. */ gr_info.region1start = 36>>1; gr_info.region2start = 576>>1; } else { int i,r0c,r1c; for (i=0; i<3; i++) { gr_info.table_select[i] = fr.getBits(5); } r0c = gr_info.region0_count = fr.getBits(4); r1c = gr_info.region1_count = fr.getBits(3); gr_info.region1start = bandInfo[sfreq].longIdx[r0c+1] >> 1 ; gr_info.region2start = bandInfo[sfreq].longIdx[r0c+1+r1c+1] >> 1; gr_info.block_type = 0; gr_info.mixed_block_flag = 0; } gr_info.preflag = fr.get1Bit(); gr_info.scalefac_scale = fr.get1Bit(); gr_info.count1table_select = fr.get1Bit(); } } } static void getSideInfo2(MP3FrameParams& fr, MP3SideInfo& si, int stereo, int ms_stereo, long sfreq, int /*single*/) { int ch; #if 0 int powdiff = (single == 3) ? 4 : 0; #endif /* initialize all four "part2_3_length" fields to zero: */ si.ch[0].gr[0].part2_3_length = 0; si.ch[1].gr[0].part2_3_length = 0; si.ch[0].gr[1].part2_3_length = 0; si.ch[1].gr[1].part2_3_length = 0; si.main_data_begin = fr.getBits(8); if (stereo == 1) si.private_bits = fr.get1Bit(); else si.private_bits = fr.getBits(2); for (ch=0; ch win-sw-flag = 0 */ gr_info.window_switching_flag = fr.get1Bit(); if (gr_info.window_switching_flag) { int i; gr_info.block_type = fr.getBits(2); gr_info.mixed_block_flag = fr.get1Bit(); gr_info.table_select[0] = fr.getBits(5); gr_info.table_select[1] = fr.getBits(5); /* * table_select[2] not needed, because there is no region2, * but to satisfy some verifications tools we set it either. */ gr_info.table_select[2] = 0; for (i=0;i<3;i++) { gr_info.subblock_gain[i] = fr.getBits(3); gr_info.full_gain[i] = gr_info.pow2gain + ((gr_info.subblock_gain[i])<<3); } #ifdef DEBUG_ERRORS if (gr_info.block_type == 0) { fprintf(stderr,"Blocktype == 0 and window-switching == 1 not allowed.\n"); } #endif /* region_count/start parameters are implicit in this case. */ /* check this again! */ if (gr_info.block_type == 2) gr_info.region1start = 36>>1; else { gr_info.region1start = 54>>1; } gr_info.region2start = 576>>1; } else { int i,r0c,r1c; for (i=0; i<3; i++) { gr_info.table_select[i] = fr.getBits(5); } r0c = gr_info.region0_count = fr.getBits(4); r1c = gr_info.region1_count = fr.getBits(3); gr_info.region1start = bandInfo[sfreq].longIdx[r0c+1] >> 1 ; gr_info.region2start = bandInfo[sfreq].longIdx[r0c+1+r1c+1] >> 1; gr_info.block_type = 0; gr_info.mixed_block_flag = 0; } gr_info.scalefac_scale = fr.get1Bit(); gr_info.count1table_select = fr.get1Bit(); } } #define MPG_MD_JOINT_STEREO 1 void MP3FrameParams::getSideInfo(MP3SideInfo& si) { // First skip over the CRC if present: if (hasCRC) getBits(16); int single = -1; int ms_stereo; int sfreq = samplingFreqIndex; if (stereo == 1) { single = 0; } ms_stereo = (mode == MPG_MD_JOINT_STEREO) && (mode_ext & 0x2); if (isMPEG2) { getSideInfo2(*this, si, stereo, ms_stereo, sfreq, single); } else { getSideInfo1(*this, si, stereo, ms_stereo, sfreq, single); } } static void putSideInfo1(BitVector& bv, MP3SideInfo const& si, Boolean isStereo) { int ch, gr, i; int stereo = isStereo ? 2 : 1; bv.putBits(si.main_data_begin,9); if (stereo == 1) bv.putBits(si.private_bits, 5); else bv.putBits(si.private_bits, 3); for (ch=0; ch= bitrate) return i; } // "bitrate" was larger than any possible, so return the largest possible: return 14; } static void outputHeader(unsigned char* toPtr, unsigned hdr) { toPtr[0] = (unsigned char)(hdr>>24); toPtr[1] = (unsigned char)(hdr>>16); toPtr[2] = (unsigned char)(hdr>>8); toPtr[3] = (unsigned char)(hdr); } static void assignADUBackpointer(MP3FrameParams const& fr, unsigned aduSize, MP3SideInfo& sideInfo, unsigned& availableBytesForBackpointer) { // Give the ADU as large a backpointer as possible: unsigned maxBackpointerSize = fr.isMPEG2 ? 255 : 511; unsigned backpointerSize = availableBytesForBackpointer; if (backpointerSize > maxBackpointerSize) { backpointerSize = maxBackpointerSize; } // Store the new backpointer now: sideInfo.main_data_begin = backpointerSize; // Figure out how many bytes are available for the *next* ADU's backpointer: availableBytesForBackpointer = backpointerSize + fr.frameSize - fr.sideInfoSize ; if (availableBytesForBackpointer < aduSize) { availableBytesForBackpointer = 0; } else { availableBytesForBackpointer -= aduSize; } } unsigned TranscodeMP3ADU(unsigned char const* fromPtr, unsigned fromSize, unsigned toBitrate, unsigned char* toPtr, unsigned toMaxSize, unsigned& availableBytesForBackpointer) { // Begin by parsing the input ADU's parameters: unsigned hdr, inFrameSize, inSideInfoSize, backpointer, inAduSize; MP3SideInfo sideInfo; if (!GetADUInfoFromMP3Frame(fromPtr, fromSize, hdr, inFrameSize, sideInfo, inSideInfoSize, backpointer, inAduSize)) { return 0; } fromPtr += (4+inSideInfoSize); // skip to 'main data' // Alter the 4-byte MPEG header to reflect the output ADU: // (different bitrate; mono; no CRC) Boolean isMPEG2 = ((hdr&0x00080000) == 0); unsigned toBitrateIndex = MP3BitrateToBitrateIndex(toBitrate, isMPEG2); hdr &=~ 0xF000; hdr |= (toBitrateIndex<<12); // set bitrate index hdr |= 0x10200; // turn on !error-prot and padding bits hdr &=~ 0xC0; hdr |= 0xC0; // set mode to 3 (mono) // Set up the rest of the parameters of the new ADU: MP3FrameParams outFr; outFr.hdr = hdr; outFr.setParamsFromHeader(); // Figure out how big to make the output ADU: unsigned inAveAduSize = inFrameSize - inSideInfoSize; unsigned outAveAduSize = outFr.frameSize - outFr.sideInfoSize; unsigned desiredOutAduSize /*=inAduSize*outAveAduSize/inAveAduSize*/ = (2*inAduSize*outAveAduSize + inAveAduSize)/(2*inAveAduSize); // this rounds to the nearest integer if (toMaxSize < (4 + outFr.sideInfoSize)) return 0; unsigned maxOutAduSize = toMaxSize - (4 + outFr.sideInfoSize); if (desiredOutAduSize > maxOutAduSize) { desiredOutAduSize = maxOutAduSize; } // Figure out the new sizes of the various 'part23 lengths', // and how much they are truncated: unsigned part23Length0a, part23Length0aTruncation; unsigned part23Length0b, part23Length0bTruncation; unsigned part23Length1a, part23Length1aTruncation; unsigned part23Length1b, part23Length1bTruncation; unsigned numAduBits = updateSideInfoSizes(sideInfo, outFr.isMPEG2, fromPtr, 8*desiredOutAduSize, part23Length0a, part23Length0aTruncation, part23Length0b, part23Length0bTruncation, part23Length1a, part23Length1aTruncation, part23Length1b, part23Length1bTruncation); #ifdef DEBUG fprintf(stderr, "shrinkage %d->%d [(%d,%d),(%d,%d)] (trunc: [(%d,%d),(%d,%d)]) {%d}\n", inAduSize, (numAduBits+7)/8, part23Length0a, part23Length0b, part23Length1a, part23Length1b, part23Length0aTruncation, part23Length0bTruncation, part23Length1aTruncation, part23Length1bTruncation, maxOutAduSize); #endif unsigned actualOutAduSize = (numAduBits+7)/8; // Give the new ADU an appropriate 'backpointer': assignADUBackpointer(outFr, actualOutAduSize, sideInfo, availableBytesForBackpointer); ///// Now output the new ADU: // 4-byte header outputHeader(toPtr, hdr); toPtr += 4; // side info PutMP3SideInfoIntoFrame(sideInfo, outFr, toPtr); toPtr += outFr.sideInfoSize; // 'main data', using the new lengths unsigned toBitOffset = 0; unsigned fromBitOffset = 0; /* rebuild portion 0a: */ memmove(toPtr, fromPtr, (part23Length0a+7)/8); toBitOffset += part23Length0a; fromBitOffset += part23Length0a + part23Length0aTruncation; /* rebuild portion 0b: */ shiftBits(toPtr, toBitOffset, fromPtr, fromBitOffset, part23Length0b); toBitOffset += part23Length0b; fromBitOffset += part23Length0b + part23Length0bTruncation; /* rebuild portion 1a: */ shiftBits(toPtr, toBitOffset, fromPtr, fromBitOffset, part23Length1a); toBitOffset += part23Length1a; fromBitOffset += part23Length1a + part23Length1aTruncation; /* rebuild portion 1b: */ shiftBits(toPtr, toBitOffset, fromPtr, fromBitOffset, part23Length1b); toBitOffset += part23Length1b; /* zero out any remaining bits (probably unnecessary, but...) */ unsigned char const zero = '\0'; shiftBits(toPtr, toBitOffset, &zero, 0, actualOutAduSize*8 - numAduBits); return 4 + outFr.sideInfoSize + actualOutAduSize; } live/liveMedia/FramedFileSource.cpp000444 001751 000000 00000002143 12265042432 017521 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // Framed File Sources // Implementation #include "FramedFileSource.hh" ////////// FramedFileSource ////////// FramedFileSource::FramedFileSource(UsageEnvironment& env, FILE* fid) : FramedSource(env), fFid(fid) { } FramedFileSource::~FramedFileSource() { } live/liveMedia/MP3ADUdescriptor.hh000444 001751 000000 00000003465 12265042432 017217 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // Descriptor preceding frames of 'ADU' MP3 streams (for improved loss-tolerance) // C++ header #ifndef _MP3_ADU_DESCRIPTOR_HH #define _MP3_ADU_DESCRIPTOR_HH // A class for handling the descriptor that begins each ADU frame: // (Note: We don't yet implement fragmentation) class ADUdescriptor { public: // Operations for generating a new descriptor static unsigned computeSize(unsigned remainingFrameSize) { return remainingFrameSize >= 64 ? 2 : 1; } static unsigned generateDescriptor(unsigned char*& toPtr, unsigned remainingFrameSize); // returns descriptor size; increments "toPtr" afterwards static void generateTwoByteDescriptor(unsigned char*& toPtr, unsigned remainingFrameSize); // always generates a 2-byte descriptor, even if "remainingFrameSize" is // small enough for a 1-byte descriptor // Operations for reading a descriptor static unsigned getRemainingFrameSize(unsigned char*& fromPtr); // increments "fromPtr" afterwards }; #endif live/liveMedia/JPEGVideoRTPSink.cpp000444 001751 000000 00000011505 12265042432 017273 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // RTP sink for JPEG video (RFC 2435) // Implementation #include "JPEGVideoRTPSink.hh" #include "JPEGVideoSource.hh" JPEGVideoRTPSink ::JPEGVideoRTPSink(UsageEnvironment& env, Groupsock* RTPgs) : VideoRTPSink(env, RTPgs, 26, 90000, "JPEG") { } JPEGVideoRTPSink::~JPEGVideoRTPSink() { } JPEGVideoRTPSink* JPEGVideoRTPSink::createNew(UsageEnvironment& env, Groupsock* RTPgs) { return new JPEGVideoRTPSink(env, RTPgs); } Boolean JPEGVideoRTPSink::sourceIsCompatibleWithUs(MediaSource& source) { return source.isJPEGVideoSource(); } Boolean JPEGVideoRTPSink ::frameCanAppearAfterPacketStart(unsigned char const* /*frameStart*/, unsigned /*numBytesInFrame*/) const { // A packet can contain only one frame return False; } void JPEGVideoRTPSink ::doSpecialFrameHandling(unsigned fragmentationOffset, unsigned char* /*frameStart*/, unsigned /*numBytesInFrame*/, struct timeval framePresentationTime, unsigned numRemainingBytes) { // Our source is known to be a JPEGVideoSource JPEGVideoSource* source = (JPEGVideoSource*)fSource; if (source == NULL) return; // sanity check u_int8_t mainJPEGHeader[8]; // the special header u_int8_t const type = source->type(); mainJPEGHeader[0] = 0; // Type-specific mainJPEGHeader[1] = fragmentationOffset >> 16; mainJPEGHeader[2] = fragmentationOffset >> 8; mainJPEGHeader[3] = fragmentationOffset; mainJPEGHeader[4] = type; mainJPEGHeader[5] = source->qFactor(); mainJPEGHeader[6] = source->width(); mainJPEGHeader[7] = source->height(); setSpecialHeaderBytes(mainJPEGHeader, sizeof mainJPEGHeader); unsigned restartMarkerHeaderSize = 0; // by default if (type >= 64 && type <= 127) { // There is also a Restart Marker Header: restartMarkerHeaderSize = 4; u_int16_t const restartInterval = source->restartInterval(); // should be non-zero u_int8_t restartMarkerHeader[4]; restartMarkerHeader[0] = restartInterval>>8; restartMarkerHeader[1] = restartInterval&0xFF; restartMarkerHeader[2] = restartMarkerHeader[3] = 0xFF; // F=L=1; Restart Count = 0x3FFF setSpecialHeaderBytes(restartMarkerHeader, restartMarkerHeaderSize, sizeof mainJPEGHeader/* start position */); } if (fragmentationOffset == 0 && source->qFactor() >= 128) { // There is also a Quantization Header: u_int8_t precision; u_int16_t length; u_int8_t const* quantizationTables = source->quantizationTables(precision, length); unsigned const quantizationHeaderSize = 4 + length; u_int8_t* quantizationHeader = new u_int8_t[quantizationHeaderSize]; quantizationHeader[0] = 0; // MBZ quantizationHeader[1] = precision; quantizationHeader[2] = length >> 8; quantizationHeader[3] = length&0xFF; if (quantizationTables != NULL) { // sanity check for (u_int16_t i = 0; i < length; ++i) { quantizationHeader[4+i] = quantizationTables[i]; } } setSpecialHeaderBytes(quantizationHeader, quantizationHeaderSize, sizeof mainJPEGHeader + restartMarkerHeaderSize/* start position */); delete[] quantizationHeader; } if (numRemainingBytes == 0) { // This packet contains the last (or only) fragment of the frame. // Set the RTP 'M' ('marker') bit: setMarkerBit(); } // Also set the RTP timestamp: setTimestamp(framePresentationTime); } unsigned JPEGVideoRTPSink::specialHeaderSize() const { // Our source is known to be a JPEGVideoSource JPEGVideoSource* source = (JPEGVideoSource*)fSource; if (source == NULL) return 0; // sanity check unsigned headerSize = 8; // by default u_int8_t const type = source->type(); if (type >= 64 && type <= 127) { // There is also a Restart Marker Header: headerSize += 4; } if (curFragmentationOffset() == 0 && source->qFactor() >= 128) { // There is also a Quantization Header: u_int8_t dummy; u_int16_t quantizationTablesSize; (void)(source->quantizationTables(dummy, quantizationTablesSize)); headerSize += 4 + quantizationTablesSize; } return headerSize; } live/liveMedia/MPEG4LATMAudioRTPSource.cpp000444 001751 000000 00000017523 12265042432 020375 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // MPEG-4 audio, using LATM multiplexing // Implementation #include "MPEG4LATMAudioRTPSource.hh" ////////// LATMBufferedPacket and LATMBufferedPacketFactory ////////// class LATMBufferedPacket: public BufferedPacket { public: LATMBufferedPacket(Boolean includeLATMDataLengthField); virtual ~LATMBufferedPacket(); private: // redefined virtual functions virtual unsigned nextEnclosedFrameSize(unsigned char*& framePtr, unsigned dataSize); private: Boolean fIncludeLATMDataLengthField; }; class LATMBufferedPacketFactory: public BufferedPacketFactory { private: // redefined virtual functions virtual BufferedPacket* createNewPacket(MultiFramedRTPSource* ourSource); }; ///////// MPEG4LATMAudioRTPSource implementation //////// MPEG4LATMAudioRTPSource* MPEG4LATMAudioRTPSource::createNew(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, unsigned rtpTimestampFrequency) { return new MPEG4LATMAudioRTPSource(env, RTPgs, rtpPayloadFormat, rtpTimestampFrequency); } MPEG4LATMAudioRTPSource ::MPEG4LATMAudioRTPSource(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, unsigned rtpTimestampFrequency) : MultiFramedRTPSource(env, RTPgs, rtpPayloadFormat, rtpTimestampFrequency, new LATMBufferedPacketFactory), fIncludeLATMDataLengthField(True) { } MPEG4LATMAudioRTPSource::~MPEG4LATMAudioRTPSource() { } void MPEG4LATMAudioRTPSource::omitLATMDataLengthField() { fIncludeLATMDataLengthField = False; } Boolean MPEG4LATMAudioRTPSource ::processSpecialHeader(BufferedPacket* packet, unsigned& resultSpecialHeaderSize) { fCurrentPacketBeginsFrame = fCurrentPacketCompletesFrame; // whether the *previous* packet ended a frame // The RTP "M" (marker) bit indicates the last fragment of a frame: fCurrentPacketCompletesFrame = packet->rtpMarkerBit(); // There is no special header resultSpecialHeaderSize = 0; return True; } char const* MPEG4LATMAudioRTPSource::MIMEtype() const { return "audio/MP4A-LATM"; } ////////// LATMBufferedPacket and LATMBufferedPacketFactory implementation LATMBufferedPacket::LATMBufferedPacket(Boolean includeLATMDataLengthField) : fIncludeLATMDataLengthField(includeLATMDataLengthField) { } LATMBufferedPacket::~LATMBufferedPacket() { } unsigned LATMBufferedPacket ::nextEnclosedFrameSize(unsigned char*& framePtr, unsigned dataSize) { // Look at the LATM data length byte(s), to determine the size // of the LATM payload. unsigned resultFrameSize = 0; unsigned i; for (i = 0; i < dataSize; ++i) { resultFrameSize += framePtr[i]; if (framePtr[i] != 0xFF) break; } ++i; if (fIncludeLATMDataLengthField) { resultFrameSize += i; } else { framePtr += i; dataSize -= i; } return (resultFrameSize <= dataSize) ? resultFrameSize : dataSize; } BufferedPacket* LATMBufferedPacketFactory ::createNewPacket(MultiFramedRTPSource* ourSource) { MPEG4LATMAudioRTPSource* source = (MPEG4LATMAudioRTPSource*)ourSource; return new LATMBufferedPacket(source->returnedFrameIncludesLATMDataLengthField()); } ////////// parseStreamMuxConfigStr() implementation ////////// static Boolean getNibble(char const*& configStr, unsigned char& resultNibble) { char c = configStr[0]; if (c == '\0') return False; // we've reached the end if (c >= '0' && c <= '9') { resultNibble = c - '0'; } else if (c >= 'A' && c <= 'F') { resultNibble = 10 + c - 'A'; } else if (c >= 'a' && c <= 'f') { resultNibble = 10 + c - 'a'; } else { return False; } ++configStr; // move to the next nibble return True; } static Boolean getByte(char const*& configStr, unsigned char& resultByte) { resultByte = 0; // by default, in case parsing fails unsigned char firstNibble; if (!getNibble(configStr, firstNibble)) return False; resultByte = firstNibble<<4; unsigned char secondNibble = 0; if (!getNibble(configStr, secondNibble) && configStr[0] != '\0') { // There's a second nibble, but it's malformed return False; } resultByte |= secondNibble; return True; } Boolean parseStreamMuxConfigStr(char const* configStr, // result parameters: Boolean& audioMuxVersion, Boolean& allStreamsSameTimeFraming, unsigned char& numSubFrames, unsigned char& numProgram, unsigned char& numLayer, unsigned char*& audioSpecificConfig, unsigned& audioSpecificConfigSize) { // Set default versions of the result parameters: audioMuxVersion = False; allStreamsSameTimeFraming = True; numSubFrames = numProgram = numLayer = 0; audioSpecificConfig = NULL; audioSpecificConfigSize = 0; do { if (configStr == NULL) break; unsigned char nextByte; if (!getByte(configStr, nextByte)) break; audioMuxVersion = (nextByte&0x80) != 0; if (audioMuxVersion) break; allStreamsSameTimeFraming = ((nextByte&0x40)>>6) != 0; numSubFrames = (nextByte&0x3F); if (!getByte(configStr, nextByte)) break; numProgram = (nextByte&0xF0)>>4; numLayer = (nextByte&0x0E)>>1; // The one remaining bit, and the rest of the string, // are used for "audioSpecificConfig": unsigned char remainingBit = nextByte&1; unsigned ascSize = (strlen(configStr)+1)/2 + 1; audioSpecificConfig = new unsigned char[ascSize]; Boolean parseSuccess; unsigned i = 0; do { nextByte = 0; parseSuccess = getByte(configStr, nextByte); audioSpecificConfig[i++] = (remainingBit<<7)|((nextByte&0xFE)>>1); remainingBit = nextByte&1; } while (parseSuccess); if (i != ascSize) break; // part of the remaining string was bad audioSpecificConfigSize = ascSize; return True; // parsing succeeded } while (0); delete[] audioSpecificConfig; return False; // parsing failed } unsigned char* parseStreamMuxConfigStr(char const* configStr, // result parameter: unsigned& audioSpecificConfigSize) { Boolean audioMuxVersion, allStreamsSameTimeFraming; unsigned char numSubFrames, numProgram, numLayer; unsigned char* audioSpecificConfig; if (!parseStreamMuxConfigStr(configStr, audioMuxVersion, allStreamsSameTimeFraming, numSubFrames, numProgram, numLayer, audioSpecificConfig, audioSpecificConfigSize)) { audioSpecificConfigSize = 0; return NULL; } return audioSpecificConfig; } unsigned char* parseGeneralConfigStr(char const* configStr, // result parameter: unsigned& configSize) { unsigned char* config = NULL; do { if (configStr == NULL) break; configSize = (strlen(configStr)+1)/2; config = new unsigned char[configSize]; if (config == NULL) break; unsigned i; for (i = 0; i < configSize; ++i) { if (!getByte(configStr, config[i])) break; } if (i != configSize) break; // part of the string was bad return config; } while (0); configSize = 0; delete[] config; return NULL; } live/liveMedia/AC3AudioRTPSink.cpp000444 001751 000000 00000006752 12265042432 017117 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // RTP sink for AC3 audio // Implementation #include "AC3AudioRTPSink.hh" AC3AudioRTPSink::AC3AudioRTPSink(UsageEnvironment& env, Groupsock* RTPgs, u_int8_t rtpPayloadFormat, u_int32_t rtpTimestampFrequency) : AudioRTPSink(env, RTPgs, rtpPayloadFormat, rtpTimestampFrequency, "AC3"), fTotNumFragmentsUsed(0) { } AC3AudioRTPSink::~AC3AudioRTPSink() { } AC3AudioRTPSink* AC3AudioRTPSink::createNew(UsageEnvironment& env, Groupsock* RTPgs, u_int8_t rtpPayloadFormat, u_int32_t rtpTimestampFrequency) { return new AC3AudioRTPSink(env, RTPgs, rtpPayloadFormat, rtpTimestampFrequency); } Boolean AC3AudioRTPSink ::frameCanAppearAfterPacketStart(unsigned char const* /*frameStart*/, unsigned /*numBytesInFrame*/) const { // (For now) allow at most 1 frame in a single packet: return False; } void AC3AudioRTPSink ::doSpecialFrameHandling(unsigned fragmentationOffset, unsigned char* frameStart, unsigned numBytesInFrame, struct timeval framePresentationTime, unsigned numRemainingBytes) { // Set the 2-byte "payload header", as defined in RFC 4184. unsigned char headers[2]; Boolean isFragment = numRemainingBytes > 0 || fragmentationOffset > 0; if (!isFragment) { headers[0] = 0; // One or more complete frames headers[1] = 1; // because we (for now) allow at most 1 frame per packet } else { if (fragmentationOffset > 0) { headers[0] = 3; // Fragment of frame other than initial fragment } else { // An initial fragment of the frame unsigned const totalFrameSize = fragmentationOffset + numBytesInFrame + numRemainingBytes; unsigned const fiveEighthsPoint = totalFrameSize/2 + totalFrameSize/8; headers[0] = numBytesInFrame >= fiveEighthsPoint ? 1 : 2; // Because this outgoing packet will be full (because it's an initial fragment), we can compute how many total // fragments (and thus packets) will make up the complete AC-3 frame: fTotNumFragmentsUsed = (totalFrameSize + (numBytesInFrame-1))/numBytesInFrame; } headers[1] = fTotNumFragmentsUsed; } setSpecialHeaderBytes(headers, sizeof headers); if (numRemainingBytes == 0) { // This packet contains the last (or only) fragment of the frame. // Set the RTP 'M' ('marker') bit: setMarkerBit(); } // Important: Also call our base class's doSpecialFrameHandling(), // to set the packet's timestamp: MultiFramedRTPSink::doSpecialFrameHandling(fragmentationOffset, frameStart, numBytesInFrame, framePresentationTime, numRemainingBytes); } unsigned AC3AudioRTPSink::specialHeaderSize() const { return 2; } live/liveMedia/FramedFilter.cpp000444 001751 000000 00000003504 12265042432 016710 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // Framed Filters // Implementation #include "FramedFilter.hh" ////////// FramedFilter ////////// #include void FramedFilter::detachInputSource() { if (fInputSource != NULL) { fInputSource->stopGettingFrames(); reassignInputSource(NULL); } } FramedFilter::FramedFilter(UsageEnvironment& env, FramedSource* inputSource) : FramedSource(env), fInputSource(inputSource) { } FramedFilter::~FramedFilter() { Medium::close(fInputSource); } // Default implementations of needed virtual functions. These merely // call the same function in the input source - i.e., act like a 'null filter char const* FramedFilter::MIMEtype() const { if (fInputSource == NULL) return ""; return fInputSource->MIMEtype(); } void FramedFilter::getAttributes() const { if (fInputSource != NULL) fInputSource->getAttributes(); } void FramedFilter::doStopGettingFrames() { FramedSource::doStopGettingFrames(); if (fInputSource != NULL) fInputSource->stopGettingFrames(); } live/liveMedia/FramedSource.cpp000444 001751 000000 00000007773 12265042432 016737 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // Framed Sources // Implementation #include "FramedSource.hh" #include ////////// FramedSource ////////// FramedSource::FramedSource(UsageEnvironment& env) : MediaSource(env), fAfterGettingFunc(NULL), fAfterGettingClientData(NULL), fOnCloseFunc(NULL), fOnCloseClientData(NULL), fIsCurrentlyAwaitingData(False) { fPresentationTime.tv_sec = fPresentationTime.tv_usec = 0; // initially } FramedSource::~FramedSource() { } Boolean FramedSource::isFramedSource() const { return True; } Boolean FramedSource::lookupByName(UsageEnvironment& env, char const* sourceName, FramedSource*& resultSource) { resultSource = NULL; // unless we succeed MediaSource* source; if (!MediaSource::lookupByName(env, sourceName, source)) return False; if (!source->isFramedSource()) { env.setResultMsg(sourceName, " is not a framed source"); return False; } resultSource = (FramedSource*)source; return True; } void FramedSource::getNextFrame(unsigned char* to, unsigned maxSize, afterGettingFunc* afterGettingFunc, void* afterGettingClientData, onCloseFunc* onCloseFunc, void* onCloseClientData) { // Make sure we're not already being read: if (fIsCurrentlyAwaitingData) { envir() << "FramedSource[" << this << "]::getNextFrame(): attempting to read more than once at the same time!\n"; envir().internalError(); } fTo = to; fMaxSize = maxSize; fNumTruncatedBytes = 0; // by default; could be changed by doGetNextFrame() fDurationInMicroseconds = 0; // by default; could be changed by doGetNextFrame() fAfterGettingFunc = afterGettingFunc; fAfterGettingClientData = afterGettingClientData; fOnCloseFunc = onCloseFunc; fOnCloseClientData = onCloseClientData; fIsCurrentlyAwaitingData = True; doGetNextFrame(); } void FramedSource::afterGetting(FramedSource* source) { source->fIsCurrentlyAwaitingData = False; // indicates that we can be read again // Note that this needs to be done here, in case the "fAfterFunc" // called below tries to read another frame (which it usually will) if (source->fAfterGettingFunc != NULL) { (*(source->fAfterGettingFunc))(source->fAfterGettingClientData, source->fFrameSize, source->fNumTruncatedBytes, source->fPresentationTime, source->fDurationInMicroseconds); } } void FramedSource::handleClosure(void* clientData) { FramedSource* source = (FramedSource*)clientData; source->fIsCurrentlyAwaitingData = False; // because we got a close instead if (source->fOnCloseFunc != NULL) { (*(source->fOnCloseFunc))(source->fOnCloseClientData); } } void FramedSource::stopGettingFrames() { fIsCurrentlyAwaitingData = False; // indicates that we can be read again fAfterGettingFunc = NULL; fOnCloseFunc = NULL; // Perform any specialized action now: doStopGettingFrames(); } void FramedSource::doStopGettingFrames() { // Default implementation: Do nothing except cancel any pending 'delivery' task: envir().taskScheduler().unscheduleDelayedTask(nextTask()); // Subclasses may wish to redefine this function. } unsigned FramedSource::maxFrameSize() const { // By default, this source has no maximum frame size. return 0; } live/liveMedia/AMRAudioFileSource.cpp000444 001751 000000 00000013276 12265042432 017735 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A source object for AMR audio files (as defined in RFC 4867, section 5) // Implementation #include "AMRAudioFileSource.hh" #include "InputFile.hh" #include "GroupsockHelper.hh" ////////// AMRAudioFileSource ////////// AMRAudioFileSource* AMRAudioFileSource::createNew(UsageEnvironment& env, char const* fileName) { FILE* fid = NULL; Boolean magicNumberOK = True; do { fid = OpenInputFile(env, fileName); if (fid == NULL) break; // Now, having opened the input file, read the first few bytes, to // check the required 'magic number': magicNumberOK = False; // until we learn otherwise Boolean isWideband = False; // by default unsigned numChannels = 1; // by default char buf[100]; // Start with the first 6 bytes (the first 5 of which must be "#!AMR"): if (fread(buf, 1, 6, fid) < 6) break; if (strncmp(buf, "#!AMR", 5) != 0) break; // bad magic # unsigned bytesRead = 6; // The next bytes must be "\n", "-WB\n", "_MC1.0\n", or "-WB_MC1.0\n" if (buf[5] == '-') { // The next bytes must be "WB\n" or "WB_MC1.0\n" if (fread(&buf[bytesRead], 1, 3, fid) < 3) break; if (strncmp(&buf[bytesRead], "WB", 2) != 0) break; // bad magic # isWideband = True; bytesRead += 3; } if (buf[bytesRead-1] == '_') { // The next bytes must be "MC1.0\n" if (fread(&buf[bytesRead], 1, 6, fid) < 6) break; if (strncmp(&buf[bytesRead], "MC1.0\n", 6) != 0) break; // bad magic # bytesRead += 6; // The next 4 bytes contain the number of channels: char channelDesc[4]; if (fread(channelDesc, 1, 4, fid) < 4) break; numChannels = channelDesc[3]&0xF; } else if (buf[bytesRead-1] != '\n') { break; // bad magic # } // If we get here, the magic number was OK: magicNumberOK = True; #ifdef DEBUG fprintf(stderr, "isWideband: %d, numChannels: %d\n", isWideband, numChannels); #endif return new AMRAudioFileSource(env, fid, isWideband, numChannels); } while (0); // An error occurred: CloseInputFile(fid); if (!magicNumberOK) { env.setResultMsg("Bad (or nonexistent) AMR file header"); } return NULL; } AMRAudioFileSource ::AMRAudioFileSource(UsageEnvironment& env, FILE* fid, Boolean isWideband, unsigned numChannels) : AMRAudioSource(env, isWideband, numChannels), fFid(fid) { } AMRAudioFileSource::~AMRAudioFileSource() { CloseInputFile(fFid); } // The mapping from the "FT" field to frame size. // Values of 65535 are invalid. #define FT_INVALID 65535 static unsigned short const frameSize[16] = { 12, 13, 15, 17, 19, 20, 26, 31, 5, FT_INVALID, FT_INVALID, FT_INVALID, FT_INVALID, FT_INVALID, FT_INVALID, 0 }; static unsigned short const frameSizeWideband[16] = { 17, 23, 32, 36, 40, 46, 50, 58, 60, 5, FT_INVALID, FT_INVALID, FT_INVALID, FT_INVALID, 0, 0 }; // Note: We should change the following to use asynchronous file reading, ##### // as we now do with ByteStreamFileSource. ##### void AMRAudioFileSource::doGetNextFrame() { if (feof(fFid) || ferror(fFid)) { handleClosure(this); return; } // Begin by reading the 1-byte frame header (and checking it for validity) while (1) { if (fread(&fLastFrameHeader, 1, 1, fFid) < 1) { handleClosure(this); return; } if ((fLastFrameHeader&0x83) != 0) { #ifdef DEBUG fprintf(stderr, "Invalid frame header 0x%02x (padding bits (0x83) are not zero)\n", fLastFrameHeader); #endif } else { unsigned char ft = (fLastFrameHeader&0x78)>>3; fFrameSize = fIsWideband ? frameSizeWideband[ft] : frameSize[ft]; if (fFrameSize == FT_INVALID) { #ifdef DEBUG fprintf(stderr, "Invalid FT field %d (from frame header 0x%02x)\n", ft, fLastFrameHeader); #endif } else { // The frame header is OK #ifdef DEBUG fprintf(stderr, "Valid frame header 0x%02x -> ft %d -> frame size %d\n", fLastFrameHeader, ft, fFrameSize); #endif break; } } } // Next, read the frame-block into the buffer provided: fFrameSize *= fNumChannels; // because multiple channels make up a frame-block if (fFrameSize > fMaxSize) { fNumTruncatedBytes = fFrameSize - fMaxSize; fFrameSize = fMaxSize; } fFrameSize = fread(fTo, 1, fFrameSize, fFid); // Set the 'presentation time': if (fPresentationTime.tv_sec == 0 && fPresentationTime.tv_usec == 0) { // This is the first frame, so use the current time: gettimeofday(&fPresentationTime, NULL); } else { // Increment by the play time of the previous frame (20 ms) unsigned uSeconds = fPresentationTime.tv_usec + 20000; fPresentationTime.tv_sec += uSeconds/1000000; fPresentationTime.tv_usec = uSeconds%1000000; } fDurationInMicroseconds = 20000; // each frame is 20 ms // Switch to another task, and inform the reader that he has data: nextTask() = envir().taskScheduler().scheduleDelayedTask(0, (TaskFunc*)FramedSource::afterGetting, this); } live/liveMedia/MP3ADUdescriptor.cpp000444 001751 000000 00000004136 12265042432 017376 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // Descriptor preceding frames of 'ADU' MP3 streams (for improved loss-tolerance) // Implementation #include "MP3ADUdescriptor.hh" ////////// ADUdescriptor ////////// //##### NOTE: For now, ignore fragmentation. Fix this later! ##### #define TWO_BYTE_DESCR_FLAG 0x40 unsigned ADUdescriptor::generateDescriptor(unsigned char*& toPtr, unsigned remainingFrameSize) { unsigned descriptorSize = ADUdescriptor::computeSize(remainingFrameSize); switch (descriptorSize) { case 1: { *toPtr++ = (unsigned char)remainingFrameSize; break; } case 2: { generateTwoByteDescriptor(toPtr, remainingFrameSize); break; } } return descriptorSize; } void ADUdescriptor::generateTwoByteDescriptor(unsigned char*& toPtr, unsigned remainingFrameSize) { *toPtr++ = (TWO_BYTE_DESCR_FLAG|(unsigned char)(remainingFrameSize>>8)); *toPtr++ = (unsigned char)(remainingFrameSize&0xFF); } unsigned ADUdescriptor::getRemainingFrameSize(unsigned char*& fromPtr) { unsigned char firstByte = *fromPtr++; if (firstByte&TWO_BYTE_DESCR_FLAG) { // This is a 2-byte descriptor unsigned char secondByte = *fromPtr++; return ((firstByte&0x3F)<<8) | secondByte; } else { // This is a 1-byte descriptor return (firstByte&0x3F); } } live/liveMedia/MP3ADUinterleaving.cpp000444 001751 000000 00000040723 12265042432 017711 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // Interleaving of MP3 ADUs // Implementation #include "MP3ADUinterleaving.hh" #include "MP3ADUdescriptor.hh" #include #ifdef TEST_LOSS #include "GroupsockHelper.hh" #endif ////////// Interleaving ////////// Interleaving::Interleaving(unsigned cycleSize, unsigned char const* cycleArray) : fCycleSize(cycleSize) { for (unsigned i = 0; i < fCycleSize; ++i) { fInverseCycle[cycleArray[i]] = i; } } Interleaving::~Interleaving() { } ////////// MP3ADUinterleaverBase ////////// MP3ADUinterleaverBase::MP3ADUinterleaverBase(UsageEnvironment& env, FramedSource* inputSource) : FramedFilter(env, inputSource) { } MP3ADUinterleaverBase::~MP3ADUinterleaverBase() { } FramedSource* MP3ADUinterleaverBase::getInputSource(UsageEnvironment& env, char const* inputSourceName) { FramedSource* inputSource; if (!FramedSource::lookupByName(env, inputSourceName, inputSource)) return NULL; if (strcmp(inputSource->MIMEtype(), "audio/MPA-ROBUST") != 0) { env.setResultMsg(inputSourceName, " is not an MP3 ADU source"); return NULL; } return inputSource; } void MP3ADUinterleaverBase::afterGettingFrame(void* clientData, unsigned numBytesRead, unsigned /*numTruncatedBytes*/, struct timeval presentationTime, unsigned durationInMicroseconds) { MP3ADUinterleaverBase* interleaverBase = (MP3ADUinterleaverBase*)clientData; // Finish up after reading: interleaverBase->afterGettingFrame(numBytesRead, presentationTime, durationInMicroseconds); // Then, continue to deliver an outgoing frame: interleaverBase->doGetNextFrame(); } ////////// InterleavingFrames (definition) ////////// class InterleavingFrames { public: InterleavingFrames(unsigned maxCycleSize); virtual ~InterleavingFrames(); Boolean haveReleaseableFrame(); void getIncomingFrameParams(unsigned char index, unsigned char*& dataPtr, unsigned& bytesAvailable); void getReleasingFrameParams(unsigned char index, unsigned char*& dataPtr, unsigned& bytesInUse, struct timeval& presentationTime, unsigned& durationInMicroseconds); void setFrameParams(unsigned char index, unsigned char icc, unsigned char ii, unsigned frameSize, struct timeval presentationTime, unsigned durationInMicroseconds); unsigned nextIndexToRelease() {return fNextIndexToRelease;} void releaseNext(); private: unsigned fMaxCycleSize; unsigned fNextIndexToRelease; class InterleavingFrameDescriptor* fDescriptors; }; ////////// MP3ADUinterleaver ////////// MP3ADUinterleaver::MP3ADUinterleaver(UsageEnvironment& env, Interleaving const& interleaving, FramedSource* inputSource) : MP3ADUinterleaverBase(env, inputSource), fInterleaving(interleaving), fFrames(new InterleavingFrames(interleaving.cycleSize())), fII(0), fICC(0) { } MP3ADUinterleaver::~MP3ADUinterleaver() { delete fFrames; } MP3ADUinterleaver* MP3ADUinterleaver::createNew(UsageEnvironment& env, Interleaving const& interleaving, FramedSource* inputSource) { return new MP3ADUinterleaver(env, interleaving, inputSource); } void MP3ADUinterleaver::doGetNextFrame() { // If there's a frame immediately available, deliver it, otherwise get new // frames from the source until one's available: if (fFrames->haveReleaseableFrame()) { releaseOutgoingFrame(); // Call our own 'after getting' function. Because we're not a 'leaf' // source, we can call this directly, without risking infinite recursion. afterGetting(this); } else { fPositionOfNextIncomingFrame = fInterleaving.lookupInverseCycle(fII); unsigned char* dataPtr; unsigned bytesAvailable; fFrames->getIncomingFrameParams(fPositionOfNextIncomingFrame, dataPtr, bytesAvailable); // Read the next incoming frame (asynchronously) fInputSource->getNextFrame(dataPtr, bytesAvailable, &MP3ADUinterleaverBase::afterGettingFrame, this, handleClosure, this); } } void MP3ADUinterleaver::releaseOutgoingFrame() { unsigned char* fromPtr; fFrames->getReleasingFrameParams(fFrames->nextIndexToRelease(), fromPtr, fFrameSize, fPresentationTime, fDurationInMicroseconds); if (fFrameSize > fMaxSize) { fNumTruncatedBytes = fFrameSize - fMaxSize; fFrameSize = fMaxSize; } memmove(fTo, fromPtr, fFrameSize); fFrames->releaseNext(); } void MP3ADUinterleaver::afterGettingFrame(unsigned numBytesRead, struct timeval presentationTime, unsigned durationInMicroseconds) { // Set the (icc,ii) and frame size of the newly-read frame: fFrames->setFrameParams(fPositionOfNextIncomingFrame, fICC, fII, numBytesRead, presentationTime, durationInMicroseconds); // Prepare our counters for the next frame: if (++fII == fInterleaving.cycleSize()) { fII = 0; fICC = (fICC+1)%8; } } ////////// DeinterleavingFrames (definition) ////////// class DeinterleavingFrames { public: DeinterleavingFrames(); virtual ~DeinterleavingFrames(); Boolean haveReleaseableFrame(); void getIncomingFrameParams(unsigned char*& dataPtr, unsigned& bytesAvailable); void getIncomingFrameParamsAfter(unsigned frameSize, struct timeval presentationTime, unsigned durationInMicroseconds, unsigned char& icc, unsigned char& ii); void getReleasingFrameParams(unsigned char*& dataPtr, unsigned& bytesInUse, struct timeval& presentationTime, unsigned& durationInMicroseconds); void moveIncomingFrameIntoPlace(); void releaseNext(); void startNewCycle(); private: unsigned fNextIndexToRelease; Boolean fHaveEndedCycle; unsigned fIIlastSeen; unsigned fMinIndexSeen, fMaxIndexSeen; // actually, max+1 class DeinterleavingFrameDescriptor* fDescriptors; }; ////////// MP3ADUdeinterleaver ////////// MP3ADUdeinterleaver::MP3ADUdeinterleaver(UsageEnvironment& env, FramedSource* inputSource) : MP3ADUinterleaverBase(env, inputSource), fFrames(new DeinterleavingFrames), fIIlastSeen(~0), fICClastSeen(~0) { } MP3ADUdeinterleaver::~MP3ADUdeinterleaver() { delete fFrames; } MP3ADUdeinterleaver* MP3ADUdeinterleaver::createNew(UsageEnvironment& env, FramedSource* inputSource) { return new MP3ADUdeinterleaver(env, inputSource); } void MP3ADUdeinterleaver::doGetNextFrame() { // If there's a frame immediately available, deliver it, otherwise get new // frames from the source until one's available: if (fFrames->haveReleaseableFrame()) { releaseOutgoingFrame(); // Call our own 'after getting' function. Because we're not a 'leaf' // source, we can call this directly, without risking infinite recursion. afterGetting(this); } else { #ifdef TEST_LOSS NOTE: This code no longer works, because it uses synchronous reads, which are no longer supported. static unsigned const framesPerPacket = 3; static unsigned const frameCount = 0; static Boolean packetIsLost; while (1) { unsigned packetCount = frameCount/framesPerPacket; if ((frameCount++)%framesPerPacket == 0) { packetIsLost = (our_random()%10 == 0); // simulate 10% packet loss ##### } if (packetIsLost) { // Read and discard the next input frame (that would be part of // a lost packet): unsigned char dummyBuf[2000]; unsigned numBytesRead; struct timeval presentationTime; // (this works only if the source can be read synchronously) fInputSource->syncGetNextFrame(dummyBuf, sizeof dummyBuf, numBytesRead, presentationTime); } else { break; // from while (1) } } #endif unsigned char* dataPtr; unsigned bytesAvailable; fFrames->getIncomingFrameParams(dataPtr, bytesAvailable); // Read the next incoming frame (asynchronously) fInputSource->getNextFrame(dataPtr, bytesAvailable, &MP3ADUinterleaverBase::afterGettingFrame, this, handleClosure, this); } } void MP3ADUdeinterleaver::afterGettingFrame(unsigned numBytesRead, struct timeval presentationTime, unsigned durationInMicroseconds) { // Get the (icc,ii) and set the frame size of the newly-read frame: unsigned char icc, ii; fFrames->getIncomingFrameParamsAfter(numBytesRead, presentationTime, durationInMicroseconds, icc, ii); // Compare these to the values we saw last: if (icc != fICClastSeen || ii == fIIlastSeen) { // We've started a new interleave cycle // (or interleaving was not used). Release all // pending ADU frames to the ADU->MP3 conversion step: fFrames->startNewCycle(); } else { // We're still in the same cycle as before. // Move the newly-read frame into place, so it can be used: fFrames->moveIncomingFrameIntoPlace(); } fICClastSeen = icc; fIIlastSeen = ii; } void MP3ADUdeinterleaver::releaseOutgoingFrame() { unsigned char* fromPtr; fFrames->getReleasingFrameParams(fromPtr, fFrameSize, fPresentationTime, fDurationInMicroseconds); if (fFrameSize > fMaxSize) { fNumTruncatedBytes = fFrameSize - fMaxSize; fFrameSize = fMaxSize; } memmove(fTo, fromPtr, fFrameSize); fFrames->releaseNext(); } ////////// InterleavingFrames (implementation) ////////// #define MAX_FRAME_SIZE 2000 /* conservatively high */ class InterleavingFrameDescriptor { public: InterleavingFrameDescriptor() {frameDataSize = 0;} unsigned frameDataSize; // includes ADU descriptor and (modified) MPEG hdr struct timeval presentationTime; unsigned durationInMicroseconds; unsigned char frameData[MAX_FRAME_SIZE]; // ditto }; InterleavingFrames::InterleavingFrames(unsigned maxCycleSize) : fMaxCycleSize(maxCycleSize), fNextIndexToRelease(0), fDescriptors(new InterleavingFrameDescriptor[maxCycleSize]) { } InterleavingFrames::~InterleavingFrames() { delete[] fDescriptors; } Boolean InterleavingFrames::haveReleaseableFrame() { return fDescriptors[fNextIndexToRelease].frameDataSize > 0; } void InterleavingFrames::getIncomingFrameParams(unsigned char index, unsigned char*& dataPtr, unsigned& bytesAvailable) { InterleavingFrameDescriptor& desc = fDescriptors[index]; dataPtr = &desc.frameData[0]; bytesAvailable = MAX_FRAME_SIZE; } void InterleavingFrames::getReleasingFrameParams(unsigned char index, unsigned char*& dataPtr, unsigned& bytesInUse, struct timeval& presentationTime, unsigned& durationInMicroseconds) { InterleavingFrameDescriptor& desc = fDescriptors[index]; dataPtr = &desc.frameData[0]; bytesInUse = desc.frameDataSize; presentationTime = desc.presentationTime; durationInMicroseconds = desc.durationInMicroseconds; } void InterleavingFrames::setFrameParams(unsigned char index, unsigned char icc, unsigned char ii, unsigned frameSize, struct timeval presentationTime, unsigned durationInMicroseconds) { InterleavingFrameDescriptor& desc = fDescriptors[index]; desc.frameDataSize = frameSize; desc.presentationTime = presentationTime; desc.durationInMicroseconds = durationInMicroseconds; // Advance over the ADU descriptor, to get to the MPEG 'syncword': unsigned char* ptr = &desc.frameData[0]; (void)ADUdescriptor::getRemainingFrameSize(ptr); // Replace the next 11 bits with (ii,icc): *ptr++ = ii; *ptr &=~ 0xE0; *ptr |= (icc<<5); } void InterleavingFrames::releaseNext() { fDescriptors[fNextIndexToRelease].frameDataSize = 0; fNextIndexToRelease = (fNextIndexToRelease+1)%fMaxCycleSize; } ////////// DeinterleavingFrames (implementation) ////////// class DeinterleavingFrameDescriptor { public: DeinterleavingFrameDescriptor() {frameDataSize = 0; frameData = NULL;} virtual ~DeinterleavingFrameDescriptor() {delete[] frameData;} unsigned frameDataSize; // includes ADU descriptor and (modified) MPEG hdr struct timeval presentationTime; unsigned durationInMicroseconds; unsigned char* frameData; }; DeinterleavingFrames::DeinterleavingFrames() : fNextIndexToRelease(0), fHaveEndedCycle(False), fMinIndexSeen(MAX_CYCLE_SIZE), fMaxIndexSeen(0), fDescriptors(new DeinterleavingFrameDescriptor[MAX_CYCLE_SIZE+1]) { } DeinterleavingFrames::~DeinterleavingFrames() { delete[] fDescriptors; } Boolean DeinterleavingFrames::haveReleaseableFrame() { if (!fHaveEndedCycle) { // Check just the next frame in the sequence return fDescriptors[fNextIndexToRelease].frameDataSize > 0; } else { // We've just ended a cycle, so we can skip over frames that didn't // get filled in (due to packet loss): if (fNextIndexToRelease < fMinIndexSeen) { fNextIndexToRelease = fMinIndexSeen; } while (fNextIndexToRelease < fMaxIndexSeen && fDescriptors[fNextIndexToRelease].frameDataSize == 0) { ++fNextIndexToRelease; } if (fNextIndexToRelease >= fMaxIndexSeen) { // No more frames are available from the cycle that we just ended, so // clear out all previously stored frames, then make available // the last-read frame, and return false for now: for (unsigned i = fMinIndexSeen; i < fMaxIndexSeen; ++i) { fDescriptors[i].frameDataSize = 0; } fMinIndexSeen = MAX_CYCLE_SIZE; fMaxIndexSeen = 0; moveIncomingFrameIntoPlace(); fHaveEndedCycle = False; fNextIndexToRelease = 0; return False; } return True; } } void DeinterleavingFrames::getIncomingFrameParams(unsigned char*& dataPtr, unsigned& bytesAvailable) { // Use fDescriptors[MAX_CYCLE_SIZE] to store the incoming frame, // prior to figuring out its real position: DeinterleavingFrameDescriptor& desc = fDescriptors[MAX_CYCLE_SIZE]; if (desc.frameData == NULL) { // There's no buffer yet, so allocate a new one: desc.frameData = new unsigned char[MAX_FRAME_SIZE]; } dataPtr = desc.frameData; bytesAvailable = MAX_FRAME_SIZE; } void DeinterleavingFrames ::getIncomingFrameParamsAfter(unsigned frameSize, struct timeval presentationTime, unsigned durationInMicroseconds, unsigned char& icc, unsigned char& ii) { DeinterleavingFrameDescriptor& desc = fDescriptors[MAX_CYCLE_SIZE]; desc.frameDataSize = frameSize; desc.presentationTime = presentationTime; desc.durationInMicroseconds = durationInMicroseconds; // Advance over the ADU descriptor, to get to the MPEG 'syncword': unsigned char* ptr = desc.frameData; (void)ADUdescriptor::getRemainingFrameSize(ptr); // Read the next 11 bits into (ii,icc), and replace them with all-1s: fIIlastSeen = ii = *ptr; *ptr++ = 0xFF; icc = (*ptr&0xE0)>>5; *ptr |= 0xE0; } void DeinterleavingFrames::getReleasingFrameParams(unsigned char*& dataPtr, unsigned& bytesInUse, struct timeval& presentationTime, unsigned& durationInMicroseconds) { DeinterleavingFrameDescriptor& desc = fDescriptors[fNextIndexToRelease]; dataPtr = desc.frameData; bytesInUse = desc.frameDataSize; presentationTime = desc.presentationTime; durationInMicroseconds = desc.durationInMicroseconds; } void DeinterleavingFrames::moveIncomingFrameIntoPlace() { DeinterleavingFrameDescriptor& fromDesc = fDescriptors[MAX_CYCLE_SIZE]; DeinterleavingFrameDescriptor& toDesc = fDescriptors[fIIlastSeen]; toDesc.frameDataSize = fromDesc.frameDataSize; toDesc.presentationTime = fromDesc.presentationTime; // Move the data pointer into place by swapping the data pointers: unsigned char* tmp = toDesc.frameData; toDesc.frameData = fromDesc.frameData; fromDesc.frameData = tmp; if (fIIlastSeen < fMinIndexSeen) { fMinIndexSeen = fIIlastSeen; } if (fIIlastSeen + 1 > fMaxIndexSeen) { fMaxIndexSeen = fIIlastSeen + 1; } } void DeinterleavingFrames::releaseNext() { fDescriptors[fNextIndexToRelease].frameDataSize = 0; fNextIndexToRelease = (fNextIndexToRelease+1)%MAX_CYCLE_SIZE; } void DeinterleavingFrames::startNewCycle() { fHaveEndedCycle = True; } live/liveMedia/AMRAudioRTPSink.cpp000444 001751 000000 00000012134 12265042432 017157 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // RTP sink for AMR audio (RFC 4867) // Implementation // NOTE: At present, this is just a limited implementation, supporting: // octet-alignment only; no interleaving; no frame CRC; no robust-sorting. #include "AMRAudioRTPSink.hh" #include "AMRAudioSource.hh" AMRAudioRTPSink* AMRAudioRTPSink::createNew(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, Boolean sourceIsWideband, unsigned numChannelsInSource) { return new AMRAudioRTPSink(env, RTPgs, rtpPayloadFormat, sourceIsWideband, numChannelsInSource); } AMRAudioRTPSink ::AMRAudioRTPSink(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, Boolean sourceIsWideband, unsigned numChannelsInSource) : AudioRTPSink(env, RTPgs, rtpPayloadFormat, sourceIsWideband ? 16000 : 8000, sourceIsWideband ? "AMR-WB": "AMR", numChannelsInSource), fSourceIsWideband(sourceIsWideband), fFmtpSDPLine(NULL) { } AMRAudioRTPSink::~AMRAudioRTPSink() { delete[] fFmtpSDPLine; } Boolean AMRAudioRTPSink::sourceIsCompatibleWithUs(MediaSource& source) { // Our source must be an AMR audio source: if (!source.isAMRAudioSource()) return False; // Also, the source must be wideband iff we asked for this: AMRAudioSource& amrSource = (AMRAudioSource&)source; if ((amrSource.isWideband()^fSourceIsWideband) != 0) return False; // Also, the source must have the same number of channels that we // specified. (It could, in principle, have more, but we don't // support that.) if (amrSource.numChannels() != numChannels()) return False; // Also, because in our current implementation we output only one // frame in each RTP packet, this means that for multi-channel audio, // each 'frame-block' will be split over multiple RTP packets, which // may violate the spec. Warn about this: if (amrSource.numChannels() > 1) { envir() << "AMRAudioRTPSink: Warning: Input source has " << amrSource.numChannels() << " audio channels. In the current implementation, the multi-frame frame-block will be split over multiple RTP packets\n"; } return True; } void AMRAudioRTPSink::doSpecialFrameHandling(unsigned fragmentationOffset, unsigned char* frameStart, unsigned numBytesInFrame, struct timeval framePresentationTime, unsigned numRemainingBytes) { // If this is the 1st frame in the 1st packet, set the RTP 'M' (marker) // bit (because this is considered the start of a talk spurt): if (isFirstPacket() && isFirstFrameInPacket()) { setMarkerBit(); } // If this is the first frame in the packet, set the 1-byte payload // header (using CMR 15) if (isFirstFrameInPacket()) { u_int8_t payloadHeader = 0xF0; setSpecialHeaderBytes(&payloadHeader, 1, 0); } // Set the TOC field for the current frame, based on the "FT" and "Q" // values from our source: AMRAudioSource* amrSource = (AMRAudioSource*)fSource; if (amrSource == NULL) return; // sanity check u_int8_t toc = amrSource->lastFrameHeader(); // Clear the "F" bit, because we're the last frame in this packet: ##### toc &=~ 0x80; setSpecialHeaderBytes(&toc, 1, 1+numFramesUsedSoFar()); // Important: Also call our base class's doSpecialFrameHandling(), // to set the packet's timestamp: MultiFramedRTPSink::doSpecialFrameHandling(fragmentationOffset, frameStart, numBytesInFrame, framePresentationTime, numRemainingBytes); } Boolean AMRAudioRTPSink ::frameCanAppearAfterPacketStart(unsigned char const* /*frameStart*/, unsigned /*numBytesInFrame*/) const { // For now, pack only one AMR frame into each outgoing RTP packet: ##### return False; } unsigned AMRAudioRTPSink::specialHeaderSize() const { // For now, because we're packing only one frame per packet, // there's just a 1-byte payload header, plus a 1-byte TOC ##### return 2; } char const* AMRAudioRTPSink::auxSDPLine() { if (fFmtpSDPLine == NULL) { // Generate a "a=fmtp:" line with "octet-aligned=1" // (That is the only non-default parameter.) char buf[100]; sprintf(buf, "a=fmtp:%d octet-align=1\r\n", rtpPayloadType()); delete[] fFmtpSDPLine; fFmtpSDPLine = strDup(buf); } return fFmtpSDPLine; } live/liveMedia/MediaSession.cpp000444 001751 000000 00000144024 12265042432 016732 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A data structure that represents a session that consists of // potentially multiple (audio and/or video) sub-sessions // Implementation #include "liveMedia.hh" #include "Locale.hh" #include "GroupsockHelper.hh" #include ////////// MediaSession ////////// MediaSession* MediaSession::createNew(UsageEnvironment& env, char const* sdpDescription) { MediaSession* newSession = new MediaSession(env); if (newSession != NULL) { if (!newSession->initializeWithSDP(sdpDescription)) { delete newSession; return NULL; } } return newSession; } Boolean MediaSession::lookupByName(UsageEnvironment& env, char const* instanceName, MediaSession*& resultSession) { resultSession = NULL; // unless we succeed Medium* medium; if (!Medium::lookupByName(env, instanceName, medium)) return False; if (!medium->isMediaSession()) { env.setResultMsg(instanceName, " is not a 'MediaSession' object"); return False; } resultSession = (MediaSession*)medium; return True; } MediaSession::MediaSession(UsageEnvironment& env) : Medium(env), fSubsessionsHead(NULL), fSubsessionsTail(NULL), fConnectionEndpointName(NULL), fMaxPlayStartTime(0.0f), fMaxPlayEndTime(0.0f), fAbsStartTime(NULL), fAbsEndTime(NULL), fScale(1.0f), fMediaSessionType(NULL), fSessionName(NULL), fSessionDescription(NULL), fControlPath(NULL) { fSourceFilterAddr.s_addr = 0; // Get our host name, and use this for the RTCP CNAME: const unsigned maxCNAMElen = 100; char CNAME[maxCNAMElen+1]; #ifndef CRIS gethostname((char*)CNAME, maxCNAMElen); #else // "gethostname()" isn't defined for this platform sprintf(CNAME, "unknown host %d", (unsigned)(our_random()*0x7FFFFFFF)); #endif CNAME[maxCNAMElen] = '\0'; // just in case fCNAME = strDup(CNAME); } MediaSession::~MediaSession() { delete fSubsessionsHead; delete[] fCNAME; delete[] fConnectionEndpointName; delete[] fAbsStartTime; delete[] fAbsEndTime; delete[] fMediaSessionType; delete[] fSessionName; delete[] fSessionDescription; delete[] fControlPath; } Boolean MediaSession::isMediaSession() const { return True; } MediaSubsession* MediaSession::createNewMediaSubsession() { // default implementation: return new MediaSubsession(*this); } Boolean MediaSession::initializeWithSDP(char const* sdpDescription) { if (sdpDescription == NULL) return False; // Begin by processing all SDP lines until we see the first "m=" char const* sdpLine = sdpDescription; char const* nextSDPLine; while (1) { if (!parseSDPLine(sdpLine, nextSDPLine)) return False; //##### We should really check for the correct SDP version (v=0) if (sdpLine[0] == 'm') break; sdpLine = nextSDPLine; if (sdpLine == NULL) break; // there are no m= lines at all // Check for various special SDP lines that we understand: if (parseSDPLine_s(sdpLine)) continue; if (parseSDPLine_i(sdpLine)) continue; if (parseSDPLine_c(sdpLine)) continue; if (parseSDPAttribute_control(sdpLine)) continue; if (parseSDPAttribute_range(sdpLine)) continue; if (parseSDPAttribute_type(sdpLine)) continue; if (parseSDPAttribute_source_filter(sdpLine)) continue; } while (sdpLine != NULL) { // We have a "m=" line, representing a new sub-session: MediaSubsession* subsession = createNewMediaSubsession(); if (subsession == NULL) { envir().setResultMsg("Unable to create new MediaSubsession"); return False; } // Parse the line as "m= RTP/AVP " // or "m= / RTP/AVP " // (Should we be checking for >1 payload format number here?)##### char* mediumName = strDupSize(sdpLine); // ensures we have enough space char const* protocolName = NULL; unsigned payloadFormat; if ((sscanf(sdpLine, "m=%s %hu RTP/AVP %u", mediumName, &subsession->fClientPortNum, &payloadFormat) == 3 || sscanf(sdpLine, "m=%s %hu/%*u RTP/AVP %u", mediumName, &subsession->fClientPortNum, &payloadFormat) == 3) && payloadFormat <= 127) { protocolName = "RTP"; } else if ((sscanf(sdpLine, "m=%s %hu UDP %u", mediumName, &subsession->fClientPortNum, &payloadFormat) == 3 || sscanf(sdpLine, "m=%s %hu udp %u", mediumName, &subsession->fClientPortNum, &payloadFormat) == 3 || sscanf(sdpLine, "m=%s %hu RAW/RAW/UDP %u", mediumName, &subsession->fClientPortNum, &payloadFormat) == 3) && payloadFormat <= 127) { // This is a RAW UDP source protocolName = "UDP"; } else { // This "m=" line is bad; output an error message saying so: char* sdpLineStr; if (nextSDPLine == NULL) { sdpLineStr = (char*)sdpLine; } else { sdpLineStr = strDup(sdpLine); sdpLineStr[nextSDPLine-sdpLine] = '\0'; } envir() << "Bad SDP \"m=\" line: " << sdpLineStr << "\n"; if (sdpLineStr != (char*)sdpLine) delete[] sdpLineStr; delete[] mediumName; delete subsession; // Skip the following SDP lines, up until the next "m=": while (1) { sdpLine = nextSDPLine; if (sdpLine == NULL) break; // we've reached the end if (!parseSDPLine(sdpLine, nextSDPLine)) return False; if (sdpLine[0] == 'm') break; // we've reached the next subsession } continue; } // Insert this subsession at the end of the list: if (fSubsessionsTail == NULL) { fSubsessionsHead = fSubsessionsTail = subsession; } else { fSubsessionsTail->setNext(subsession); fSubsessionsTail = subsession; } subsession->serverPortNum = subsession->fClientPortNum; // by default char const* mStart = sdpLine; subsession->fSavedSDPLines = strDup(mStart); subsession->fMediumName = strDup(mediumName); delete[] mediumName; subsession->fProtocolName = strDup(protocolName); subsession->fRTPPayloadFormat = payloadFormat; // Process the following SDP lines, up until the next "m=": while (1) { sdpLine = nextSDPLine; if (sdpLine == NULL) break; // we've reached the end if (!parseSDPLine(sdpLine, nextSDPLine)) return False; if (sdpLine[0] == 'm') break; // we've reached the next subsession // Check for various special SDP lines that we understand: if (subsession->parseSDPLine_c(sdpLine)) continue; if (subsession->parseSDPLine_b(sdpLine)) continue; if (subsession->parseSDPAttribute_rtpmap(sdpLine)) continue; if (subsession->parseSDPAttribute_control(sdpLine)) continue; if (subsession->parseSDPAttribute_range(sdpLine)) continue; if (subsession->parseSDPAttribute_fmtp(sdpLine)) continue; if (subsession->parseSDPAttribute_source_filter(sdpLine)) continue; if (subsession->parseSDPAttribute_x_dimensions(sdpLine)) continue; if (subsession->parseSDPAttribute_framerate(sdpLine)) continue; // (Later, check for malformed lines, and other valid SDP lines#####) } if (sdpLine != NULL) subsession->fSavedSDPLines[sdpLine-mStart] = '\0'; // If we don't yet know the codec name, try looking it up from the // list of static payload types: if (subsession->fCodecName == NULL) { subsession->fCodecName = lookupPayloadFormat(subsession->fRTPPayloadFormat, subsession->fRTPTimestampFrequency, subsession->fNumChannels); if (subsession->fCodecName == NULL) { char typeStr[20]; sprintf(typeStr, "%d", subsession->fRTPPayloadFormat); envir().setResultMsg("Unknown codec name for RTP payload type ", typeStr); return False; } } // If we don't yet know this subsession's RTP timestamp frequency // (because it uses a dynamic payload type and the corresponding // SDP "rtpmap" attribute erroneously didn't specify it), // then guess it now: if (subsession->fRTPTimestampFrequency == 0) { subsession->fRTPTimestampFrequency = guessRTPTimestampFrequency(subsession->fMediumName, subsession->fCodecName); } } return True; } Boolean MediaSession::parseSDPLine(char const* inputLine, char const*& nextLine){ // Begin by finding the start of the next line (if any): nextLine = NULL; for (char const* ptr = inputLine; *ptr != '\0'; ++ptr) { if (*ptr == '\r' || *ptr == '\n') { // We found the end of the line ++ptr; while (*ptr == '\r' || *ptr == '\n') ++ptr; nextLine = ptr; if (nextLine[0] == '\0') nextLine = NULL; // special case for end break; } } // Then, check that this line is a SDP line of the form = // (However, we also accept blank lines in the input.) if (inputLine[0] == '\r' || inputLine[0] == '\n') return True; if (strlen(inputLine) < 2 || inputLine[1] != '=' || inputLine[0] < 'a' || inputLine[0] > 'z') { envir().setResultMsg("Invalid SDP line: ", inputLine); return False; } return True; } static char* parseCLine(char const* sdpLine) { char* resultStr = NULL; char* buffer = strDupSize(sdpLine); // ensures we have enough space if (sscanf(sdpLine, "c=IN IP4 %[^/\r\n]", buffer) == 1) { // Later, handle the optional / and / ##### resultStr = strDup(buffer); } delete[] buffer; return resultStr; } Boolean MediaSession::parseSDPLine_s(char const* sdpLine) { // Check for "s=" line char* buffer = strDupSize(sdpLine); Boolean parseSuccess = False; if (sscanf(sdpLine, "s=%[^\r\n]", buffer) == 1) { delete[] fSessionName; fSessionName = strDup(buffer); parseSuccess = True; } delete[] buffer; return parseSuccess; } Boolean MediaSession::parseSDPLine_i(char const* sdpLine) { // Check for "i=" line char* buffer = strDupSize(sdpLine); Boolean parseSuccess = False; if (sscanf(sdpLine, "i=%[^\r\n]", buffer) == 1) { delete[] fSessionDescription; fSessionDescription = strDup(buffer); parseSuccess = True; } delete[] buffer; return parseSuccess; } Boolean MediaSession::parseSDPLine_c(char const* sdpLine) { // Check for "c=IN IP4 " // or "c=IN IP4 /" // (Later, do something with also #####) char* connectionEndpointName = parseCLine(sdpLine); if (connectionEndpointName != NULL) { delete[] fConnectionEndpointName; fConnectionEndpointName = connectionEndpointName; return True; } return False; } Boolean MediaSession::parseSDPAttribute_type(char const* sdpLine) { // Check for a "a=type:broadcast|meeting|moderated|test|H.332|recvonly" line: Boolean parseSuccess = False; char* buffer = strDupSize(sdpLine); if (sscanf(sdpLine, "a=type: %[^ ]", buffer) == 1) { delete[] fMediaSessionType; fMediaSessionType = strDup(buffer); parseSuccess = True; } delete[] buffer; return parseSuccess; } Boolean MediaSession::parseSDPAttribute_control(char const* sdpLine) { // Check for a "a=control:" line: Boolean parseSuccess = False; char* controlPath = strDupSize(sdpLine); // ensures we have enough space if (sscanf(sdpLine, "a=control: %s", controlPath) == 1) { parseSuccess = True; delete[] fControlPath; fControlPath = strDup(controlPath); } delete[] controlPath; return parseSuccess; } static Boolean parseRangeAttribute(char const* sdpLine, double& startTime, double& endTime) { return sscanf(sdpLine, "a=range: npt = %lg - %lg", &startTime, &endTime) == 2; } static Boolean parseRangeAttribute(char const* sdpLine, char*& absStartTime, char*& absEndTime) { size_t len = strlen(sdpLine) + 1; char* as = new char[len]; char* ae = new char[len]; int sscanfResult = sscanf(sdpLine, "a=range: clock = %[^-\r\n]-%[^\r\n]", as, ae); if (sscanfResult == 2) { absStartTime = as; absEndTime = ae; } else if (sscanfResult == 1) { absStartTime = as; delete[] ae; } else { delete[] as; delete[] ae; return False; } return True; } Boolean MediaSession::parseSDPAttribute_range(char const* sdpLine) { // Check for a "a=range:npt=-" line: // (Later handle other kinds of "a=range" attributes also???#####) Boolean parseSuccess = False; double playStartTime; double playEndTime; if (parseRangeAttribute(sdpLine, playStartTime, playEndTime)) { parseSuccess = True; if (playStartTime > fMaxPlayStartTime) { fMaxPlayStartTime = playStartTime; } if (playEndTime > fMaxPlayEndTime) { fMaxPlayEndTime = playEndTime; } } else if (parseRangeAttribute(sdpLine, _absStartTime(), _absEndTime())) { parseSuccess = True; } return parseSuccess; } static Boolean parseSourceFilterAttribute(char const* sdpLine, struct in_addr& sourceAddr) { // Check for a "a=source-filter:incl IN IP4 " line. // Note: At present, we don't check that really matches // one of our multicast addresses. We also don't support more than // one ##### Boolean result = False; // until we succeed char* sourceName = strDupSize(sdpLine); // ensures we have enough space do { if (sscanf(sdpLine, "a=source-filter: incl IN IP4 %*s %s", sourceName) != 1) break; // Now, convert this name to an address, if we can: NetAddressList addresses(sourceName); if (addresses.numAddresses() == 0) break; netAddressBits sourceAddrBits = *(netAddressBits*)(addresses.firstAddress()->data()); if (sourceAddrBits == 0) break; sourceAddr.s_addr = sourceAddrBits; result = True; } while (0); delete[] sourceName; return result; } Boolean MediaSession ::parseSDPAttribute_source_filter(char const* sdpLine) { return parseSourceFilterAttribute(sdpLine, fSourceFilterAddr); } char* MediaSession::lookupPayloadFormat(unsigned char rtpPayloadType, unsigned& freq, unsigned& nCh) { // Look up the codec name and timestamp frequency for known (static) // RTP payload formats. char const* temp = NULL; switch (rtpPayloadType) { case 0: {temp = "PCMU"; freq = 8000; nCh = 1; break;} case 2: {temp = "G726-32"; freq = 8000; nCh = 1; break;} case 3: {temp = "GSM"; freq = 8000; nCh = 1; break;} case 4: {temp = "G723"; freq = 8000; nCh = 1; break;} case 5: {temp = "DVI4"; freq = 8000; nCh = 1; break;} case 6: {temp = "DVI4"; freq = 16000; nCh = 1; break;} case 7: {temp = "LPC"; freq = 8000; nCh = 1; break;} case 8: {temp = "PCMA"; freq = 8000; nCh = 1; break;} case 9: {temp = "G722"; freq = 8000; nCh = 1; break;} case 10: {temp = "L16"; freq = 44100; nCh = 2; break;} case 11: {temp = "L16"; freq = 44100; nCh = 1; break;} case 12: {temp = "QCELP"; freq = 8000; nCh = 1; break;} case 14: {temp = "MPA"; freq = 90000; nCh = 1; break;} // 'number of channels' is actually encoded in the media stream case 15: {temp = "G728"; freq = 8000; nCh = 1; break;} case 16: {temp = "DVI4"; freq = 11025; nCh = 1; break;} case 17: {temp = "DVI4"; freq = 22050; nCh = 1; break;} case 18: {temp = "G729"; freq = 8000; nCh = 1; break;} case 25: {temp = "CELB"; freq = 90000; nCh = 1; break;} case 26: {temp = "JPEG"; freq = 90000; nCh = 1; break;} case 28: {temp = "NV"; freq = 90000; nCh = 1; break;} case 31: {temp = "H261"; freq = 90000; nCh = 1; break;} case 32: {temp = "MPV"; freq = 90000; nCh = 1; break;} case 33: {temp = "MP2T"; freq = 90000; nCh = 1; break;} case 34: {temp = "H263"; freq = 90000; nCh = 1; break;} }; return strDup(temp); } unsigned MediaSession::guessRTPTimestampFrequency(char const* mediumName, char const* codecName) { // By default, we assume that audio sessions use a frequency of 8000, // video sessions use a frequency of 90000, // and text sessions use a frequency of 1000. // Begin by checking for known exceptions to this rule // (where the frequency is known unambiguously (e.g., not like "DVI4")) if (strcmp(codecName, "L16") == 0) return 44100; if (strcmp(codecName, "MPA") == 0 || strcmp(codecName, "MPA-ROBUST") == 0 || strcmp(codecName, "X-MP3-DRAFT-00") == 0) return 90000; // Now, guess default values: if (strcmp(mediumName, "video") == 0) return 90000; else if (strcmp(mediumName, "text") == 0) return 1000; return 8000; // for "audio", and any other medium } char* MediaSession::absStartTime() const { if (fAbsStartTime != NULL) return fAbsStartTime; // If a subsession has an 'absolute' start time, then use that: MediaSubsessionIterator iter(*this); MediaSubsession* subsession; while ((subsession = iter.next()) != NULL) { if (subsession->_absStartTime() != NULL) return subsession->_absStartTime(); } return NULL; } char* MediaSession::absEndTime() const { if (fAbsEndTime != NULL) return fAbsEndTime; // If a subsession has an 'absolute' end time, then use that: MediaSubsessionIterator iter(*this); MediaSubsession* subsession; while ((subsession = iter.next()) != NULL) { if (subsession->_absEndTime() != NULL) return subsession->_absEndTime(); } return NULL; } Boolean MediaSession ::initiateByMediaType(char const* mimeType, MediaSubsession*& resultSubsession, int useSpecialRTPoffset) { // Look through this session's subsessions for media that match "mimeType" resultSubsession = NULL; MediaSubsessionIterator iter(*this); MediaSubsession* subsession; while ((subsession = iter.next()) != NULL) { Boolean wasAlreadyInitiated = subsession->readSource() != NULL; if (!wasAlreadyInitiated) { // Try to create a source for this subsession: if (!subsession->initiate(useSpecialRTPoffset)) return False; } // Make sure the source's MIME type is one that we handle: if (strcmp(subsession->readSource()->MIMEtype(), mimeType) != 0) { if (!wasAlreadyInitiated) subsession->deInitiate(); continue; } resultSubsession = subsession; break; // use this } if (resultSubsession == NULL) { envir().setResultMsg("Session has no usable media subsession"); return False; } return True; } ////////// MediaSubsessionIterator ////////// MediaSubsessionIterator::MediaSubsessionIterator(MediaSession const& session) : fOurSession(session) { reset(); } MediaSubsessionIterator::~MediaSubsessionIterator() { } MediaSubsession* MediaSubsessionIterator::next() { MediaSubsession* result = fNextPtr; if (fNextPtr != NULL) fNextPtr = fNextPtr->fNext; return result; } void MediaSubsessionIterator::reset() { fNextPtr = fOurSession.fSubsessionsHead; } ////////// MediaSubsession ////////// MediaSubsession::MediaSubsession(MediaSession& parent) : serverPortNum(0), sink(NULL), miscPtr(NULL), fParent(parent), fNext(NULL), fConnectionEndpointName(NULL), fClientPortNum(0), fRTPPayloadFormat(0xFF), fSavedSDPLines(NULL), fMediumName(NULL), fCodecName(NULL), fProtocolName(NULL), fRTPTimestampFrequency(0), fControlPath(NULL), fSourceFilterAddr(parent.sourceFilterAddr()), fBandwidth(0), fAuxiliarydatasizelength(0), fConstantduration(0), fConstantsize(0), fCRC(0), fCtsdeltalength(0), fDe_interleavebuffersize(0), fDtsdeltalength(0), fIndexdeltalength(0), fIndexlength(0), fInterleaving(0), fMaxdisplacement(0), fObjecttype(0), fOctetalign(0), fProfile_level_id(0), fRobustsorting(0), fSizelength(0), fStreamstateindication(0), fStreamtype(0), fCpresent(False), fRandomaccessindication(False), fConfig(NULL), fMode(NULL), fSpropParameterSets(NULL), fEmphasis(NULL), fChannelOrder(NULL), fPlayStartTime(0.0), fPlayEndTime(0.0), fAbsStartTime(NULL), fAbsEndTime(NULL), fVideoWidth(0), fVideoHeight(0), fVideoFPS(0), fNumChannels(1), fScale(1.0f), fNPT_PTS_Offset(0.0f), fRTPSocket(NULL), fRTCPSocket(NULL), fRTPSource(NULL), fRTCPInstance(NULL), fReadSource(NULL), fReceiveRawMP3ADUs(False), fReceiveRawJPEGFrames(False), fSessionId(NULL) { rtpInfo.seqNum = 0; rtpInfo.timestamp = 0; rtpInfo.infoIsNew = False; } MediaSubsession::~MediaSubsession() { deInitiate(); delete[] fConnectionEndpointName; delete[] fSavedSDPLines; delete[] fMediumName; delete[] fCodecName; delete[] fProtocolName; delete[] fControlPath; delete[] fConfig; delete[] fMode; delete[] fSpropParameterSets; delete[] fEmphasis; delete[] fChannelOrder; delete[] fAbsStartTime; delete[] fAbsEndTime; delete[] fSessionId; delete fNext; } void MediaSubsession::addFilter(FramedFilter* filter){ if (filter == NULL || filter->inputSource() != fReadSource) return; // sanity check fReadSource = filter; } double MediaSubsession::playStartTime() const { if (fPlayStartTime > 0) return fPlayStartTime; return fParent.playStartTime(); } double MediaSubsession::playEndTime() const { if (fPlayEndTime > 0) return fPlayEndTime; return fParent.playEndTime(); } char* MediaSubsession::absStartTime() const { if (fAbsStartTime != NULL) return fAbsStartTime; return fParent.absStartTime(); } char* MediaSubsession::absEndTime() const { if (fAbsEndTime != NULL) return fAbsEndTime; return fParent.absEndTime(); } static Boolean const honorSDPPortChoice #ifdef IGNORE_UNICAST_SDP_PORTS = False; #else = True; #endif Boolean MediaSubsession::initiate(int useSpecialRTPoffset) { if (fReadSource != NULL) return True; // has already been initiated do { if (fCodecName == NULL) { env().setResultMsg("Codec is unspecified"); break; } // Create RTP and RTCP 'Groupsocks' on which to receive incoming data. // (Groupsocks will work even for unicast addresses) struct in_addr tempAddr; tempAddr.s_addr = connectionEndpointAddress(); // This could get changed later, as a result of a RTSP "SETUP" if (fClientPortNum != 0 && (honorSDPPortChoice || IsMulticastAddress(tempAddr.s_addr))) { // The sockets' port numbers were specified for us. Use these: Boolean const protocolIsRTP = strcmp(fProtocolName, "RTP") == 0; if (protocolIsRTP) { fClientPortNum = fClientPortNum&~1; // use an even-numbered port for RTP, and the next (odd-numbered) port for RTCP } if (isSSM()) { fRTPSocket = new Groupsock(env(), tempAddr, fSourceFilterAddr, fClientPortNum); } else { fRTPSocket = new Groupsock(env(), tempAddr, fClientPortNum, 255); } if (fRTPSocket == NULL) { env().setResultMsg("Failed to create RTP socket"); break; } if (protocolIsRTP) { // Set our RTCP port to be the RTP port +1 portNumBits const rtcpPortNum = fClientPortNum|1; if (isSSM()) { fRTCPSocket = new Groupsock(env(), tempAddr, fSourceFilterAddr, rtcpPortNum); } else { fRTCPSocket = new Groupsock(env(), tempAddr, rtcpPortNum, 255); } } } else { // Port numbers were not specified in advance, so we use ephemeral port numbers. // Create sockets until we get a port-number pair (even: RTP; even+1: RTCP). // We need to make sure that we don't keep trying to use the same bad port numbers over // and over again, so we store bad sockets in a table, and delete them all when we're done. HashTable* socketHashTable = HashTable::create(ONE_WORD_HASH_KEYS); if (socketHashTable == NULL) break; Boolean success = False; NoReuse dummy(env()); // ensures that our new ephemeral port number won't be one that's already in use while (1) { // Create a new socket: if (isSSM()) { fRTPSocket = new Groupsock(env(), tempAddr, fSourceFilterAddr, 0); } else { fRTPSocket = new Groupsock(env(), tempAddr, 0, 255); } if (fRTPSocket == NULL) { env().setResultMsg("MediaSession::initiate(): unable to create RTP and RTCP sockets"); break; } // Get the client port number, and check whether it's even (for RTP): Port clientPort(0); if (!getSourcePort(env(), fRTPSocket->socketNum(), clientPort)) { break; } fClientPortNum = ntohs(clientPort.num()); if ((fClientPortNum&1) != 0) { // it's odd // Record this socket in our table, and keep trying: unsigned key = (unsigned)fClientPortNum; Groupsock* existing = (Groupsock*)socketHashTable->Add((char const*)key, fRTPSocket); delete existing; // in case it wasn't NULL continue; } // Make sure we can use the next (i.e., odd) port number, for RTCP: portNumBits rtcpPortNum = fClientPortNum|1; if (isSSM()) { fRTCPSocket = new Groupsock(env(), tempAddr, fSourceFilterAddr, rtcpPortNum); } else { fRTCPSocket = new Groupsock(env(), tempAddr, rtcpPortNum, 255); } if (fRTCPSocket != NULL && fRTCPSocket->socketNum() >= 0) { // Success! Use these two sockets. success = True; break; } else { // We couldn't create the RTCP socket (perhaps that port number's already in use elsewhere?). delete fRTCPSocket; fRTCPSocket = NULL; // Record the first socket in our table, and keep trying: unsigned key = (unsigned)fClientPortNum; Groupsock* existing = (Groupsock*)socketHashTable->Add((char const*)key, fRTPSocket); delete existing; // in case it wasn't NULL continue; } } // Clean up the socket hash table (and contents): Groupsock* oldGS; while ((oldGS = (Groupsock*)socketHashTable->RemoveNext()) != NULL) { delete oldGS; } delete socketHashTable; if (!success) break; // a fatal error occurred trying to create the RTP and RTCP sockets; we can't continue } // Try to use a big receive buffer for RTP - at least 0.1 second of // specified bandwidth and at least 50 KB unsigned rtpBufSize = fBandwidth * 25 / 2; // 1 kbps * 0.1 s = 12.5 bytes if (rtpBufSize < 50 * 1024) rtpBufSize = 50 * 1024; increaseReceiveBufferTo(env(), fRTPSocket->socketNum(), rtpBufSize); if (isSSM() && fRTCPSocket != NULL) { // Special case for RTCP SSM: Send RTCP packets back to the source via unicast: fRTCPSocket->changeDestinationParameters(fSourceFilterAddr,0,~0); } // Create "fRTPSource" and "fReadSource": if (!createSourceObjects(useSpecialRTPoffset)) break; if (fReadSource == NULL) { env().setResultMsg("Failed to create read source"); break; } // Finally, create our RTCP instance. (It starts running automatically) if (fRTPSource != NULL && fRTCPSocket != NULL) { // If bandwidth is specified, use it and add 5% for RTCP overhead. // Otherwise make a guess at 500 kbps. unsigned totSessionBandwidth = fBandwidth ? fBandwidth + fBandwidth / 20 : 500; fRTCPInstance = RTCPInstance::createNew(env(), fRTCPSocket, totSessionBandwidth, (unsigned char const*) fParent.CNAME(), NULL /* we're a client */, fRTPSource); if (fRTCPInstance == NULL) { env().setResultMsg("Failed to create RTCP instance"); break; } } return True; } while (0); deInitiate(); fClientPortNum = 0; return False; } void MediaSubsession::deInitiate() { Medium::close(fRTCPInstance); fRTCPInstance = NULL; Medium::close(fReadSource); // this is assumed to also close fRTPSource fReadSource = NULL; fRTPSource = NULL; delete fRTPSocket; fRTPSocket = NULL; delete fRTCPSocket; fRTCPSocket = NULL; } Boolean MediaSubsession::setClientPortNum(unsigned short portNum) { if (fReadSource != NULL) { env().setResultMsg("A read source has already been created"); return False; } fClientPortNum = portNum; return True; } netAddressBits MediaSubsession::connectionEndpointAddress() const { do { // Get the endpoint name from with us, or our parent session: char const* endpointString = connectionEndpointName(); if (endpointString == NULL) { endpointString = parentSession().connectionEndpointName(); } if (endpointString == NULL) break; // Now, convert this name to an address, if we can: NetAddressList addresses(endpointString); if (addresses.numAddresses() == 0) break; return *(netAddressBits*)(addresses.firstAddress()->data()); } while (0); // No address known: return 0; } void MediaSubsession::setDestinations(netAddressBits defaultDestAddress) { // Get the destination address from the connection endpoint name // (This will be 0 if it's not known, in which case we use the default) netAddressBits destAddress = connectionEndpointAddress(); if (destAddress == 0) destAddress = defaultDestAddress; struct in_addr destAddr; destAddr.s_addr = destAddress; // The destination TTL remains unchanged: int destTTL = ~0; // means: don't change if (fRTPSocket != NULL) { Port destPort(serverPortNum); fRTPSocket->changeDestinationParameters(destAddr, destPort, destTTL); } if (fRTCPSocket != NULL && !isSSM()) { // Note: For SSM sessions, the dest address for RTCP was already set. Port destPort(serverPortNum+1); fRTCPSocket->changeDestinationParameters(destAddr, destPort, destTTL); } } void MediaSubsession::setSessionId(char const* sessionId) { delete[] fSessionId; fSessionId = strDup(sessionId); } double MediaSubsession::getNormalPlayTime(struct timeval const& presentationTime) { if (rtpSource() == NULL || rtpSource()->timestampFrequency() == 0) return 0.0; // no RTP source, or bad freq! // First, check whether our "RTPSource" object has already been synchronized using RTCP. // If it hasn't, then - as a special case - we need to use the RTP timestamp to compute the NPT. if (!rtpSource()->hasBeenSynchronizedUsingRTCP()) { if (!rtpInfo.infoIsNew) return 0.0; // the "rtpInfo" structure has not been filled in u_int32_t timestampOffset = rtpSource()->curPacketRTPTimestamp() - rtpInfo.timestamp; double nptOffset = (timestampOffset/(double)(rtpSource()->timestampFrequency()))*scale(); double npt = playStartTime() + nptOffset; return npt; } else { // Common case: We have been synchronized using RTCP. This means that the "presentationTime" parameter // will be accurate, and so we should use this to compute the NPT. double ptsDouble = (double)(presentationTime.tv_sec + presentationTime.tv_usec/1000000.0); if (rtpInfo.infoIsNew) { // This is the first time we've been called with a synchronized presentation time since the "rtpInfo" // structure was last filled in. Use this "presentationTime" to compute "fNPT_PTS_Offset": if (seqNumLT(rtpSource()->curPacketRTPSeqNum(), rtpInfo.seqNum)) return -0.1; // sanity check; ignore old packets u_int32_t timestampOffset = rtpSource()->curPacketRTPTimestamp() - rtpInfo.timestamp; double nptOffset = (timestampOffset/(double)(rtpSource()->timestampFrequency()))*scale(); double npt = playStartTime() + nptOffset; fNPT_PTS_Offset = npt - ptsDouble*scale(); rtpInfo.infoIsNew = False; // for next time return npt; } else { // Use the precomputed "fNPT_PTS_Offset" to compute the NPT from the PTS: if (fNPT_PTS_Offset == 0.0) return 0.0; // error: The "rtpInfo" structure was apparently never filled in return (double)(ptsDouble*scale() + fNPT_PTS_Offset); } } } Boolean MediaSubsession::parseSDPLine_c(char const* sdpLine) { // Check for "c=IN IP4 " // or "c=IN IP4 /" // (Later, do something with also #####) char* connectionEndpointName = parseCLine(sdpLine); if (connectionEndpointName != NULL) { delete[] fConnectionEndpointName; fConnectionEndpointName = connectionEndpointName; return True; } return False; } Boolean MediaSubsession::parseSDPLine_b(char const* sdpLine) { // Check for "b=:" line // RTP applications are expected to use bwtype="AS" return sscanf(sdpLine, "b=AS:%u", &fBandwidth) == 1; } Boolean MediaSubsession::parseSDPAttribute_rtpmap(char const* sdpLine) { // Check for a "a=rtpmap: /" line: // (Also check without the "/"; RealNetworks omits this) // Also check for a trailing "/". Boolean parseSuccess = False; unsigned rtpmapPayloadFormat; char* codecName = strDupSize(sdpLine); // ensures we have enough space unsigned rtpTimestampFrequency = 0; unsigned numChannels = 1; if (sscanf(sdpLine, "a=rtpmap: %u %[^/]/%u/%u", &rtpmapPayloadFormat, codecName, &rtpTimestampFrequency, &numChannels) == 4 || sscanf(sdpLine, "a=rtpmap: %u %[^/]/%u", &rtpmapPayloadFormat, codecName, &rtpTimestampFrequency) == 3 || sscanf(sdpLine, "a=rtpmap: %u %s", &rtpmapPayloadFormat, codecName) == 2) { parseSuccess = True; if (rtpmapPayloadFormat == fRTPPayloadFormat) { // This "rtpmap" matches our payload format, so set our // codec name and timestamp frequency: // (First, make sure the codec name is upper case) { Locale l("POSIX"); for (char* p = codecName; *p != '\0'; ++p) *p = toupper(*p); } delete[] fCodecName; fCodecName = strDup(codecName); fRTPTimestampFrequency = rtpTimestampFrequency; fNumChannels = numChannels; } } delete[] codecName; return parseSuccess; } Boolean MediaSubsession::parseSDPAttribute_control(char const* sdpLine) { // Check for a "a=control:" line: Boolean parseSuccess = False; char* controlPath = strDupSize(sdpLine); // ensures we have enough space if (sscanf(sdpLine, "a=control: %s", controlPath) == 1) { parseSuccess = True; delete[] fControlPath; fControlPath = strDup(controlPath); } delete[] controlPath; return parseSuccess; } Boolean MediaSubsession::parseSDPAttribute_range(char const* sdpLine) { // Check for a "a=range:npt=-" line: // (Later handle other kinds of "a=range" attributes also???#####) Boolean parseSuccess = False; double playStartTime; double playEndTime; if (parseRangeAttribute(sdpLine, playStartTime, playEndTime)) { parseSuccess = True; if (playStartTime > fPlayStartTime) { fPlayStartTime = playStartTime; if (playStartTime > fParent.playStartTime()) { fParent.playStartTime() = playStartTime; } } if (playEndTime > fPlayEndTime) { fPlayEndTime = playEndTime; if (playEndTime > fParent.playEndTime()) { fParent.playEndTime() = playEndTime; } } } else if (parseRangeAttribute(sdpLine, _absStartTime(), _absEndTime())) { parseSuccess = True; } return parseSuccess; } Boolean MediaSubsession::parseSDPAttribute_fmtp(char const* sdpLine) { // Check for a "a=fmtp:" line: // TEMP: We check only for a handful of expected parameter names ##### // Later: (i) check that payload format number matches; ##### // (ii) look for other parameters also (generalize?) ##### do { if (strncmp(sdpLine, "a=fmtp:", 7) != 0) break; sdpLine += 7; while (isdigit(*sdpLine)) ++sdpLine; // The remaining "sdpLine" should be a sequence of // =; // parameter assignments. Look at each of these. // First, convert the line to lower-case, to ease comparison: char* const lineCopy = strDup(sdpLine); char* line = lineCopy; { Locale l("POSIX"); for (char* c = line; *c != '\0'; ++c) *c = tolower(*c); } while (*line != '\0' && *line != '\r' && *line != '\n') { unsigned u; char* valueStr = strDupSize(line); if (sscanf(line, " auxiliarydatasizelength = %u", &u) == 1) { fAuxiliarydatasizelength = u; } else if (sscanf(line, " constantduration = %u", &u) == 1) { fConstantduration = u; } else if (sscanf(line, " constantsize; = %u", &u) == 1) { fConstantsize = u; } else if (sscanf(line, " crc = %u", &u) == 1) { fCRC = u; } else if (sscanf(line, " ctsdeltalength = %u", &u) == 1) { fCtsdeltalength = u; } else if (sscanf(line, " de-interleavebuffersize = %u", &u) == 1) { fDe_interleavebuffersize = u; } else if (sscanf(line, " dtsdeltalength = %u", &u) == 1) { fDtsdeltalength = u; } else if (sscanf(line, " indexdeltalength = %u", &u) == 1) { fIndexdeltalength = u; } else if (sscanf(line, " indexlength = %u", &u) == 1) { fIndexlength = u; } else if (sscanf(line, " interleaving = %u", &u) == 1) { fInterleaving = u; } else if (sscanf(line, " maxdisplacement = %u", &u) == 1) { fMaxdisplacement = u; } else if (sscanf(line, " objecttype = %u", &u) == 1) { fObjecttype = u; } else if (sscanf(line, " octet-align = %u", &u) == 1) { fOctetalign = u; } else if (sscanf(line, " profile-level-id = %x", &u) == 1) { // Note that the "profile-level-id" parameter is assumed to be hexadecimal fProfile_level_id = u; } else if (sscanf(line, " robust-sorting = %u", &u) == 1) { fRobustsorting = u; } else if (sscanf(line, " sizelength = %u", &u) == 1) { fSizelength = u; } else if (sscanf(line, " streamstateindication = %u", &u) == 1) { fStreamstateindication = u; } else if (sscanf(line, " streamtype = %u", &u) == 1) { fStreamtype = u; } else if (sscanf(line, " cpresent = %u", &u) == 1) { fCpresent = u != 0; } else if (sscanf(line, " randomaccessindication = %u", &u) == 1) { fRandomaccessindication = u != 0; } else if (sscanf(sdpLine, " config = %[^; \t\r\n]", valueStr) == 1 || sscanf(sdpLine, " configuration = %[^; \t\r\n]", valueStr) == 1) { // Note: We used "sdpLine" here, because the value may be case-sensitive (if it's Base-64). delete[] fConfig; fConfig = strDup(valueStr); } else if (sscanf(line, " mode = %[^; \t\r\n]", valueStr) == 1) { delete[] fMode; fMode = strDup(valueStr); } else if (sscanf(sdpLine, " sprop-parameter-sets = %[^; \t\r\n]", valueStr) == 1) { // Note: We used "sdpLine" here, because the value is case-sensitive. delete[] fSpropParameterSets; fSpropParameterSets = strDup(valueStr); } else if (sscanf(line, " emphasis = %[^; \t\r\n]", valueStr) == 1) { delete[] fEmphasis; fEmphasis = strDup(valueStr); } else if (sscanf(sdpLine, " channel-order = %[^; \t\r\n]", valueStr) == 1) { // Note: We used "sdpLine" here, because the value is case-sensitive. delete[] fChannelOrder; fChannelOrder = strDup(valueStr); } else if (sscanf(line, " width = %u", &u) == 1) { // A non-standard parameter, but one that's often used: fVideoWidth = u; } else if (sscanf(line, " height = %u", &u) == 1) { // A non-standard parameter, but one that's often used: fVideoHeight = u; } else { // Some of the above parameters are Boolean. Check whether the parameter // names appear alone, without a "= 1" at the end: if (sscanf(line, " %[^; \t\r\n]", valueStr) == 1) { if (strcmp(valueStr, "octet-align") == 0) { fOctetalign = 1; } else if (strcmp(valueStr, "cpresent") == 0) { fCpresent = True; } else if (strcmp(valueStr, "crc") == 0) { fCRC = 1; } else if (strcmp(valueStr, "robust-sorting") == 0) { fRobustsorting = 1; } else if (strcmp(valueStr, "randomaccessindication") == 0) { fRandomaccessindication = True; } } } delete[] valueStr; // Move to the next parameter assignment string: while (*line != '\0' && *line != '\r' && *line != '\n' && *line != ';') ++line; while (*line == ';') ++line; // Do the same with sdpLine; needed for finding case sensitive values: while (*sdpLine != '\0' && *sdpLine != '\r' && *sdpLine != '\n' && *sdpLine != ';') ++sdpLine; while (*sdpLine == ';') ++sdpLine; } delete[] lineCopy; return True; } while (0); return False; } Boolean MediaSubsession ::parseSDPAttribute_source_filter(char const* sdpLine) { return parseSourceFilterAttribute(sdpLine, fSourceFilterAddr); } Boolean MediaSubsession::parseSDPAttribute_x_dimensions(char const* sdpLine) { // Check for a "a=x-dimensions:," line: Boolean parseSuccess = False; int width, height; if (sscanf(sdpLine, "a=x-dimensions:%d,%d", &width, &height) == 2) { parseSuccess = True; fVideoWidth = (unsigned short)width; fVideoHeight = (unsigned short)height; } return parseSuccess; } Boolean MediaSubsession::parseSDPAttribute_framerate(char const* sdpLine) { // Check for a "a=framerate: " or "a=x-framerate: " line: Boolean parseSuccess = False; float frate; int rate; if (sscanf(sdpLine, "a=framerate: %f", &frate) == 1 || sscanf(sdpLine, "a=framerate:%f", &frate) == 1) { parseSuccess = True; fVideoFPS = (unsigned)frate; } else if (sscanf(sdpLine, "a=x-framerate: %d", &rate) == 1) { parseSuccess = True; fVideoFPS = (unsigned)rate; } return parseSuccess; } Boolean MediaSubsession::createSourceObjects(int useSpecialRTPoffset) { do { // First, check "fProtocolName" if (strcmp(fProtocolName, "UDP") == 0) { // A UDP-packetized stream (*not* a RTP stream) fReadSource = BasicUDPSource::createNew(env(), fRTPSocket); fRTPSource = NULL; // Note! if (strcmp(fCodecName, "MP2T") == 0) { // MPEG-2 Transport Stream fReadSource = MPEG2TransportStreamFramer::createNew(env(), fReadSource); // this sets "durationInMicroseconds" correctly, based on the PCR values } } else { // Check "fCodecName" against the set of codecs that we support, // and create our RTP source accordingly // (Later make this code more efficient, as this set grows #####) // (Also, add more fmts that can be implemented by SimpleRTPSource#####) Boolean createSimpleRTPSource = False; // by default; can be changed below Boolean doNormalMBitRule = False; // default behavior if "createSimpleRTPSource" is True if (strcmp(fCodecName, "QCELP") == 0) { // QCELP audio fReadSource = QCELPAudioRTPSource::createNew(env(), fRTPSocket, fRTPSource, fRTPPayloadFormat, fRTPTimestampFrequency); // Note that fReadSource will differ from fRTPSource in this case } else if (strcmp(fCodecName, "AMR") == 0) { // AMR audio (narrowband) fReadSource = AMRAudioRTPSource::createNew(env(), fRTPSocket, fRTPSource, fRTPPayloadFormat, False /*isWideband*/, fNumChannels, fOctetalign != 0, fInterleaving, fRobustsorting != 0, fCRC != 0); // Note that fReadSource will differ from fRTPSource in this case } else if (strcmp(fCodecName, "AMR-WB") == 0) { // AMR audio (wideband) fReadSource = AMRAudioRTPSource::createNew(env(), fRTPSocket, fRTPSource, fRTPPayloadFormat, True /*isWideband*/, fNumChannels, fOctetalign != 0, fInterleaving, fRobustsorting != 0, fCRC != 0); // Note that fReadSource will differ from fRTPSource in this case } else if (strcmp(fCodecName, "MPA") == 0) { // MPEG-1 or 2 audio fReadSource = fRTPSource = MPEG1or2AudioRTPSource::createNew(env(), fRTPSocket, fRTPPayloadFormat, fRTPTimestampFrequency); } else if (strcmp(fCodecName, "MPA-ROBUST") == 0) { // robust MP3 audio fReadSource = fRTPSource = MP3ADURTPSource::createNew(env(), fRTPSocket, fRTPPayloadFormat, fRTPTimestampFrequency); if (fRTPSource == NULL) break; if (!fReceiveRawMP3ADUs) { // Add a filter that deinterleaves the ADUs after depacketizing them: MP3ADUdeinterleaver* deinterleaver = MP3ADUdeinterleaver::createNew(env(), fRTPSource); if (deinterleaver == NULL) break; // Add another filter that converts these ADUs to MP3 frames: fReadSource = MP3FromADUSource::createNew(env(), deinterleaver); } } else if (strcmp(fCodecName, "X-MP3-DRAFT-00") == 0) { // a non-standard variant of "MPA-ROBUST" used by RealNetworks // (one 'ADU'ized MP3 frame per packet; no headers) fRTPSource = SimpleRTPSource::createNew(env(), fRTPSocket, fRTPPayloadFormat, fRTPTimestampFrequency, "audio/MPA-ROBUST" /*hack*/); if (fRTPSource == NULL) break; // Add a filter that converts these ADUs to MP3 frames: fReadSource = MP3FromADUSource::createNew(env(), fRTPSource, False /*no ADU header*/); } else if (strcmp(fCodecName, "MP4A-LATM") == 0) { // MPEG-4 LATM audio fReadSource = fRTPSource = MPEG4LATMAudioRTPSource::createNew(env(), fRTPSocket, fRTPPayloadFormat, fRTPTimestampFrequency); } else if (strcmp(fCodecName, "VORBIS") == 0) { // Vorbis audio fReadSource = fRTPSource = VorbisAudioRTPSource::createNew(env(), fRTPSocket, fRTPPayloadFormat, fRTPTimestampFrequency); } else if (strcmp(fCodecName, "VP8") == 0) { // VP8 video fReadSource = fRTPSource = VP8VideoRTPSource::createNew(env(), fRTPSocket, fRTPPayloadFormat, fRTPTimestampFrequency); } else if (strcmp(fCodecName, "AC3") == 0 || strcmp(fCodecName, "EAC3") == 0) { // AC3 audio fReadSource = fRTPSource = AC3AudioRTPSource::createNew(env(), fRTPSocket, fRTPPayloadFormat, fRTPTimestampFrequency); } else if (strcmp(fCodecName, "MP4V-ES") == 0) { // MPEG-4 Elementary Stream video fReadSource = fRTPSource = MPEG4ESVideoRTPSource::createNew(env(), fRTPSocket, fRTPPayloadFormat, fRTPTimestampFrequency); } else if (strcmp(fCodecName, "MPEG4-GENERIC") == 0) { fReadSource = fRTPSource = MPEG4GenericRTPSource::createNew(env(), fRTPSocket, fRTPPayloadFormat, fRTPTimestampFrequency, fMediumName, fMode, fSizelength, fIndexlength, fIndexdeltalength); } else if (strcmp(fCodecName, "MPV") == 0) { // MPEG-1 or 2 video fReadSource = fRTPSource = MPEG1or2VideoRTPSource::createNew(env(), fRTPSocket, fRTPPayloadFormat, fRTPTimestampFrequency); } else if (strcmp(fCodecName, "MP2T") == 0) { // MPEG-2 Transport Stream fRTPSource = SimpleRTPSource::createNew(env(), fRTPSocket, fRTPPayloadFormat, fRTPTimestampFrequency, "video/MP2T", 0, False); fReadSource = MPEG2TransportStreamFramer::createNew(env(), fRTPSource); // this sets "durationInMicroseconds" correctly, based on the PCR values } else if (strcmp(fCodecName, "H261") == 0) { // H.261 fReadSource = fRTPSource = H261VideoRTPSource::createNew(env(), fRTPSocket, fRTPPayloadFormat, fRTPTimestampFrequency); } else if (strcmp(fCodecName, "H263-1998") == 0 || strcmp(fCodecName, "H263-2000") == 0) { // H.263+ fReadSource = fRTPSource = H263plusVideoRTPSource::createNew(env(), fRTPSocket, fRTPPayloadFormat, fRTPTimestampFrequency); } else if (strcmp(fCodecName, "H264") == 0) { fReadSource = fRTPSource = H264VideoRTPSource::createNew(env(), fRTPSocket, fRTPPayloadFormat, fRTPTimestampFrequency); } else if (strcmp(fCodecName, "DV") == 0) { fReadSource = fRTPSource = DVVideoRTPSource::createNew(env(), fRTPSocket, fRTPPayloadFormat, fRTPTimestampFrequency); } else if (strcmp(fCodecName, "JPEG") == 0) { // motion JPEG if (fReceiveRawJPEGFrames) { // Special case (used when proxying JPEG/RTP streams): Receive each JPEG/RTP packet, including the special RTP headers: fReadSource = fRTPSource = SimpleRTPSource::createNew(env(), fRTPSocket, fRTPPayloadFormat, fRTPTimestampFrequency, "video/JPEG", 0/*special offset*/, False/*doNormalMBitRule => ignore the 'M' bit*/); } else { // Normal case: Receive each JPEG frame as a complete, displayable JPEG image: fReadSource = fRTPSource = JPEGVideoRTPSource::createNew(env(), fRTPSocket, fRTPPayloadFormat, fRTPTimestampFrequency, videoWidth(), videoHeight()); } } else if (strcmp(fCodecName, "X-QT") == 0 || strcmp(fCodecName, "X-QUICKTIME") == 0) { // Generic QuickTime streams, as defined in // char* mimeType = new char[strlen(mediumName()) + strlen(codecName()) + 2] ; sprintf(mimeType, "%s/%s", mediumName(), codecName()); fReadSource = fRTPSource = QuickTimeGenericRTPSource::createNew(env(), fRTPSocket, fRTPPayloadFormat, fRTPTimestampFrequency, mimeType); delete[] mimeType; } else if ( strcmp(fCodecName, "PCMU") == 0 // PCM u-law audio || strcmp(fCodecName, "GSM") == 0 // GSM audio || strcmp(fCodecName, "DVI4") == 0 // DVI4 (IMA ADPCM) audio || strcmp(fCodecName, "PCMA") == 0 // PCM a-law audio || strcmp(fCodecName, "MP1S") == 0 // MPEG-1 System Stream || strcmp(fCodecName, "MP2P") == 0 // MPEG-2 Program Stream || strcmp(fCodecName, "L8") == 0 // 8-bit linear audio || strcmp(fCodecName, "L16") == 0 // 16-bit linear audio || strcmp(fCodecName, "L20") == 0 // 20-bit linear audio (RFC 3190) || strcmp(fCodecName, "L24") == 0 // 24-bit linear audio (RFC 3190) || strcmp(fCodecName, "G726-16") == 0 // G.726, 16 kbps || strcmp(fCodecName, "G726-24") == 0 // G.726, 24 kbps || strcmp(fCodecName, "G726-32") == 0 // G.726, 32 kbps || strcmp(fCodecName, "G726-40") == 0 // G.726, 40 kbps || strcmp(fCodecName, "SPEEX") == 0 // SPEEX audio || strcmp(fCodecName, "ILBC") == 0 // iLBC audio || strcmp(fCodecName, "OPUS") == 0 // Opus audio || strcmp(fCodecName, "T140") == 0 // T.140 text (RFC 4103) || strcmp(fCodecName, "DAT12") == 0 // 12-bit nonlinear audio (RFC 3190) || strcmp(fCodecName, "VND.ONVIF.METADATA") == 0 // 'ONVIF' 'metadata' (a XML document) ) { createSimpleRTPSource = True; useSpecialRTPoffset = 0; if (strcmp(fCodecName, "VND.ONVIF.METADATA") == 0) { // This RTP payload format uses the RTP "M" bit to indicate the end of the content (a XML document): doNormalMBitRule = True; } } else if (useSpecialRTPoffset >= 0) { // We don't know this RTP payload format, but try to receive // it using a 'SimpleRTPSource' with the specified header offset: createSimpleRTPSource = True; } else { env().setResultMsg("RTP payload format unknown or not supported"); break; } if (createSimpleRTPSource) { char* mimeType = new char[strlen(mediumName()) + strlen(codecName()) + 2] ; sprintf(mimeType, "%s/%s", mediumName(), codecName()); fReadSource = fRTPSource = SimpleRTPSource::createNew(env(), fRTPSocket, fRTPPayloadFormat, fRTPTimestampFrequency, mimeType, (unsigned)useSpecialRTPoffset, doNormalMBitRule); delete[] mimeType; } } return True; } while (0); return False; // an error occurred } live/liveMedia/MP3Internals.hh000444 001751 000000 00000010216 12265042432 016436 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // MP3 internal implementation details // C++ header #ifndef _MP3_INTERNALS_HH #define _MP3_INTERNALS_HH #ifndef _BOOLEAN_HH #include "Boolean.hh" #endif #ifndef _BIT_VECTOR_HH #include "BitVector.hh" #endif typedef struct MP3SideInfo { unsigned main_data_begin; unsigned private_bits; typedef struct gr_info_s { int scfsi; unsigned part2_3_length; unsigned big_values; unsigned global_gain; unsigned scalefac_compress; unsigned window_switching_flag; unsigned block_type; unsigned mixed_block_flag; unsigned table_select[3]; unsigned region0_count; unsigned region1_count; unsigned subblock_gain[3]; unsigned maxband[3]; unsigned maxbandl; unsigned maxb; unsigned region1start; unsigned region2start; unsigned preflag; unsigned scalefac_scale; unsigned count1table_select; double *full_gain[3]; double *pow2gain; } gr_info_s_t; struct { gr_info_s_t gr[2]; } ch[2]; } MP3SideInfo_t; #define SBLIMIT 32 #define MAX_MP3_FRAME_SIZE 2500 /* also big enough for an 'ADU'ized frame */ class MP3FrameParams { public: MP3FrameParams(); ~MP3FrameParams(); // 4-byte MPEG header: unsigned hdr; // a buffer that can be used to hold the rest of the frame: unsigned char frameBytes[MAX_MP3_FRAME_SIZE]; // public parameters derived from the header void setParamsFromHeader(); // this sets them Boolean isMPEG2; unsigned layer; // currently only 3 is supported unsigned bitrate; // in kbps unsigned samplingFreq; Boolean isStereo; Boolean isFreeFormat; unsigned frameSize; // doesn't include the initial 4-byte header unsigned sideInfoSize; Boolean hasCRC; void setBytePointer(unsigned char const* restOfFrame, unsigned totNumBytes) {// called during setup bv.setup((unsigned char*)restOfFrame, 0, 8*totNumBytes); } // other, public parameters used when parsing input (perhaps get rid of) unsigned oldHdr, firstHdr; // Extract (unpack) the side info from the frame into a struct: void getSideInfo(MP3SideInfo& si); // The bit pointer used for reading data from frame data unsigned getBits(unsigned numBits) { return bv.getBits(numBits); } unsigned get1Bit() { return bv.get1Bit(); } private: BitVector bv; // other, private parameters derived from the header unsigned bitrateIndex; unsigned samplingFreqIndex; Boolean isMPEG2_5; Boolean padding; Boolean extension; unsigned mode; unsigned mode_ext; Boolean copyright; Boolean original; unsigned emphasis; unsigned stereo; private: unsigned computeSideInfoSize(); }; unsigned ComputeFrameSize(unsigned bitrate, unsigned samplingFreq, Boolean usePadding, Boolean isMPEG2, unsigned char layer); Boolean GetADUInfoFromMP3Frame(unsigned char const* framePtr, unsigned totFrameSize, unsigned& hdr, unsigned& frameSize, MP3SideInfo& sideInfo, unsigned& sideInfoSize, unsigned& backpointer, unsigned& aduSize); Boolean ZeroOutMP3SideInfo(unsigned char* framePtr, unsigned totFrameSize, unsigned newBackpointer); unsigned TranscodeMP3ADU(unsigned char const* fromPtr, unsigned fromSize, unsigned toBitrate, unsigned char* toPtr, unsigned toMaxSize, unsigned& availableBytesForBackpointer); // returns the size of the resulting ADU (0 on failure) #endif live/liveMedia/BitVector.cpp000444 001751 000000 00000011716 12265042432 016251 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // Bit Vector data structure // Implementation #include "BitVector.hh" BitVector::BitVector(unsigned char* baseBytePtr, unsigned baseBitOffset, unsigned totNumBits) { setup(baseBytePtr, baseBitOffset, totNumBits); } void BitVector::setup(unsigned char* baseBytePtr, unsigned baseBitOffset, unsigned totNumBits) { fBaseBytePtr = baseBytePtr; fBaseBitOffset = baseBitOffset; fTotNumBits = totNumBits; fCurBitIndex = 0; } static unsigned char const singleBitMask[8] = {0x80, 0x40, 0x20, 0x10, 0x08, 0x04, 0x02, 0x01}; #define MAX_LENGTH 32 void BitVector::putBits(unsigned from, unsigned numBits) { if (numBits == 0) return; unsigned char tmpBuf[4]; unsigned overflowingBits = 0; if (numBits > MAX_LENGTH) { numBits = MAX_LENGTH; } if (numBits > fTotNumBits - fCurBitIndex) { overflowingBits = numBits - (fTotNumBits - fCurBitIndex); } tmpBuf[0] = (unsigned char)(from>>24); tmpBuf[1] = (unsigned char)(from>>16); tmpBuf[2] = (unsigned char)(from>>8); tmpBuf[3] = (unsigned char)from; shiftBits(fBaseBytePtr, fBaseBitOffset + fCurBitIndex, /* to */ tmpBuf, MAX_LENGTH - numBits, /* from */ numBits - overflowingBits /* num bits */); fCurBitIndex += numBits - overflowingBits; } void BitVector::put1Bit(unsigned bit) { // The following is equivalent to "putBits(..., 1)", except faster: if (fCurBitIndex >= fTotNumBits) { /* overflow */ return; } else { unsigned totBitOffset = fBaseBitOffset + fCurBitIndex++; unsigned char mask = singleBitMask[totBitOffset%8]; if (bit) { fBaseBytePtr[totBitOffset/8] |= mask; } else { fBaseBytePtr[totBitOffset/8] &=~ mask; } } } unsigned BitVector::getBits(unsigned numBits) { if (numBits == 0) return 0; unsigned char tmpBuf[4]; unsigned overflowingBits = 0; if (numBits > MAX_LENGTH) { numBits = MAX_LENGTH; } if (numBits > fTotNumBits - fCurBitIndex) { overflowingBits = numBits - (fTotNumBits - fCurBitIndex); } shiftBits(tmpBuf, 0, /* to */ fBaseBytePtr, fBaseBitOffset + fCurBitIndex, /* from */ numBits - overflowingBits /* num bits */); fCurBitIndex += numBits - overflowingBits; unsigned result = (tmpBuf[0]<<24) | (tmpBuf[1]<<16) | (tmpBuf[2]<<8) | tmpBuf[3]; result >>= (MAX_LENGTH - numBits); // move into low-order part of word result &= (0xFFFFFFFF << overflowingBits); // so any overflow bits are 0 return result; } unsigned BitVector::get1Bit() { // The following is equivalent to "getBits(1)", except faster: if (fCurBitIndex >= fTotNumBits) { /* overflow */ return 0; } else { unsigned totBitOffset = fBaseBitOffset + fCurBitIndex++; unsigned char curFromByte = fBaseBytePtr[totBitOffset/8]; unsigned result = (curFromByte >> (7-(totBitOffset%8))) & 0x01; return result; } } void BitVector::skipBits(unsigned numBits) { if (numBits > fTotNumBits - fCurBitIndex) { /* overflow */ fCurBitIndex = fTotNumBits; } else { fCurBitIndex += numBits; } } unsigned BitVector::get_expGolomb() { unsigned numLeadingZeroBits = 0; unsigned codeStart = 1; while (get1Bit() == 0 && fCurBitIndex < fTotNumBits) { ++numLeadingZeroBits; codeStart *= 2; } return codeStart - 1 + getBits(numLeadingZeroBits); } void shiftBits(unsigned char* toBasePtr, unsigned toBitOffset, unsigned char const* fromBasePtr, unsigned fromBitOffset, unsigned numBits) { if (numBits == 0) return; /* Note that from and to may overlap, if from>to */ unsigned char const* fromBytePtr = fromBasePtr + fromBitOffset/8; unsigned fromBitRem = fromBitOffset%8; unsigned char* toBytePtr = toBasePtr + toBitOffset/8; unsigned toBitRem = toBitOffset%8; while (numBits-- > 0) { unsigned char fromBitMask = singleBitMask[fromBitRem]; unsigned char fromBit = (*fromBytePtr)&fromBitMask; unsigned char toBitMask = singleBitMask[toBitRem]; if (fromBit != 0) { *toBytePtr |= toBitMask; } else { *toBytePtr &=~ toBitMask; } if (++fromBitRem == 8) { ++fromBytePtr; fromBitRem = 0; } if (++toBitRem == 8) { ++toBytePtr; toBitRem = 0; } } } live/liveMedia/MPEG1or2AudioStreamFramer.cpp000444 001751 000000 00000015243 12265042432 021076 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A filter that breaks up an MPEG (1,2) audio elementary stream into frames // Implementation #include "MPEG1or2AudioStreamFramer.hh" #include "StreamParser.hh" #include "MP3Internals.hh" #include ////////// MPEG1or2AudioStreamParser definition ////////// class MPEG1or2AudioStreamParser: public StreamParser { public: MPEG1or2AudioStreamParser(MPEG1or2AudioStreamFramer* usingSource, FramedSource* inputSource); virtual ~MPEG1or2AudioStreamParser(); public: unsigned parse(unsigned& numTruncatedBytes); // returns the size of the frame that was acquired, or 0 if none was void registerReadInterest(unsigned char* to, unsigned maxSize); MP3FrameParams const& currentFrame() const { return fCurrentFrame; } private: unsigned char* fTo; unsigned fMaxSize; // Parameters of the most recently read frame: MP3FrameParams fCurrentFrame; // also works for layer I or II }; ////////// MPEG1or2AudioStreamFramer implementation ////////// MPEG1or2AudioStreamFramer ::MPEG1or2AudioStreamFramer(UsageEnvironment& env, FramedSource* inputSource, Boolean syncWithInputSource) : FramedFilter(env, inputSource), fSyncWithInputSource(syncWithInputSource) { reset(); fParser = new MPEG1or2AudioStreamParser(this, inputSource); } MPEG1or2AudioStreamFramer::~MPEG1or2AudioStreamFramer() { delete fParser; } MPEG1or2AudioStreamFramer* MPEG1or2AudioStreamFramer::createNew(UsageEnvironment& env, FramedSource* inputSource, Boolean syncWithInputSource) { // Need to add source type checking here??? ##### return new MPEG1or2AudioStreamFramer(env, inputSource, syncWithInputSource); } void MPEG1or2AudioStreamFramer::flushInput() { reset(); fParser->flushInput(); } void MPEG1or2AudioStreamFramer::reset() { // Use the current wallclock time as the initial 'presentation time': struct timeval timeNow; gettimeofday(&timeNow, NULL); resetPresentationTime(timeNow); } void MPEG1or2AudioStreamFramer ::resetPresentationTime(struct timeval newPresentationTime) { fNextFramePresentationTime = newPresentationTime; } void MPEG1or2AudioStreamFramer::doGetNextFrame() { fParser->registerReadInterest(fTo, fMaxSize); continueReadProcessing(); } #define MILLION 1000000 static unsigned const numSamplesByLayer[4] = {0, 384, 1152, 1152}; struct timeval MPEG1or2AudioStreamFramer::currentFramePlayTime() const { MP3FrameParams const& fr = fParser->currentFrame(); unsigned const numSamples = numSamplesByLayer[fr.layer]; struct timeval result; unsigned const freq = fr.samplingFreq*(1 + fr.isMPEG2); if (freq == 0) { result.tv_sec = 0; result.tv_usec = 0; return result; } // result is numSamples/freq unsigned const uSeconds = ((numSamples*2*MILLION)/freq + 1)/2; // rounds to nearest integer result.tv_sec = uSeconds/MILLION; result.tv_usec = uSeconds%MILLION; return result; } void MPEG1or2AudioStreamFramer ::continueReadProcessing(void* clientData, unsigned char* /*ptr*/, unsigned /*size*/, struct timeval presentationTime) { MPEG1or2AudioStreamFramer* framer = (MPEG1or2AudioStreamFramer*)clientData; if (framer->fSyncWithInputSource) { framer->resetPresentationTime(presentationTime); } framer->continueReadProcessing(); } void MPEG1or2AudioStreamFramer::continueReadProcessing() { unsigned acquiredFrameSize = fParser->parse(fNumTruncatedBytes); if (acquiredFrameSize > 0) { // We were able to acquire a frame from the input. // It has already been copied to the reader's space. fFrameSize = acquiredFrameSize; // Also set the presentation time, and increment it for next time, // based on the length of this frame: fPresentationTime = fNextFramePresentationTime; struct timeval framePlayTime = currentFramePlayTime(); fDurationInMicroseconds = framePlayTime.tv_sec*MILLION + framePlayTime.tv_usec; fNextFramePresentationTime.tv_usec += framePlayTime.tv_usec; fNextFramePresentationTime.tv_sec += framePlayTime.tv_sec + fNextFramePresentationTime.tv_usec/MILLION; fNextFramePresentationTime.tv_usec %= MILLION; // Call our own 'after getting' function. Because we're not a 'leaf' // source, we can call this directly, without risking infinite recursion. afterGetting(this); } else { // We were unable to parse a complete frame from the input, because: // - we had to read more data from the source stream, or // - the source stream has ended. } } ////////// MPEG1or2AudioStreamParser implementation ////////// MPEG1or2AudioStreamParser ::MPEG1or2AudioStreamParser(MPEG1or2AudioStreamFramer* usingSource, FramedSource* inputSource) : StreamParser(inputSource, FramedSource::handleClosure, usingSource, &MPEG1or2AudioStreamFramer::continueReadProcessing, usingSource) { } MPEG1or2AudioStreamParser::~MPEG1or2AudioStreamParser() { } void MPEG1or2AudioStreamParser::registerReadInterest(unsigned char* to, unsigned maxSize) { fTo = to; fMaxSize = maxSize; } unsigned MPEG1or2AudioStreamParser::parse(unsigned& numTruncatedBytes) { try { saveParserState(); // We expect a MPEG audio header (first 11 bits set to 1) at the start: while (((fCurrentFrame.hdr = test4Bytes())&0xFFE00000) != 0xFFE00000) { skipBytes(1); saveParserState(); } fCurrentFrame.setParamsFromHeader(); // Copy the frame to the requested destination: unsigned frameSize = fCurrentFrame.frameSize + 4; // include header if (frameSize > fMaxSize) { numTruncatedBytes = frameSize - fMaxSize; frameSize = fMaxSize; } else { numTruncatedBytes = 0; } getBytes(fTo, frameSize); skipBytes(numTruncatedBytes); return frameSize; } catch (int /*e*/) { #ifdef DEBUG fprintf(stderr, "MPEG1or2AudioStreamParser::parse() EXCEPTION (This is normal behavior - *not* an error)\n"); #endif return 0; // the parsing got interrupted } } live/liveMedia/MP3ADUTranscoder.cpp000444 001751 000000 00000006463 12265042432 017331 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // Transcoder for ADUized MP3 frames // Implementation #include "MP3ADUTranscoder.hh" #include "MP3Internals.hh" #include MP3ADUTranscoder::MP3ADUTranscoder(UsageEnvironment& env, unsigned outBitrate /* in kbps */, FramedSource* inputSource) : FramedFilter(env, inputSource), fOutBitrate(outBitrate), fAvailableBytesForBackpointer(0), fOrigADU(new unsigned char[MAX_MP3_FRAME_SIZE]) { } MP3ADUTranscoder::~MP3ADUTranscoder() { delete[] fOrigADU; } MP3ADUTranscoder* MP3ADUTranscoder::createNew(UsageEnvironment& env, unsigned outBitrate /* in kbps */, FramedSource* inputSource) { // The source must be an MP3 ADU source: if (strcmp(inputSource->MIMEtype(), "audio/MPA-ROBUST") != 0) { env.setResultMsg(inputSource->name(), " is not an MP3 ADU source"); return NULL; } return new MP3ADUTranscoder(env, outBitrate, inputSource); } void MP3ADUTranscoder::getAttributes() const { // Begin by getting the attributes from our input source: fInputSource->getAttributes(); // Then modify them by appending the corrected bandwidth char buffer[30]; sprintf(buffer, " bandwidth %d", outBitrate()); envir().appendToResultMsg(buffer); } void MP3ADUTranscoder::doGetNextFrame() { fInputSource->getNextFrame(fOrigADU, MAX_MP3_FRAME_SIZE, afterGettingFrame, this, handleClosure, this); } void MP3ADUTranscoder::afterGettingFrame(void* clientData, unsigned numBytesRead, unsigned numTruncatedBytes, struct timeval presentationTime, unsigned durationInMicroseconds) { MP3ADUTranscoder* transcoder = (MP3ADUTranscoder*)clientData; transcoder->afterGettingFrame1(numBytesRead, numTruncatedBytes, presentationTime, durationInMicroseconds); } void MP3ADUTranscoder::afterGettingFrame1(unsigned numBytesRead, unsigned numTruncatedBytes, struct timeval presentationTime, unsigned durationInMicroseconds) { fNumTruncatedBytes = numTruncatedBytes; // but can we handle this being >0? ##### fPresentationTime = presentationTime; fDurationInMicroseconds = durationInMicroseconds; fFrameSize = TranscodeMP3ADU(fOrigADU, numBytesRead, fOutBitrate, fTo, fMaxSize, fAvailableBytesForBackpointer); if (fFrameSize == 0) { // internal error - bad ADU data? handleClosure(this); return; } // Call our own 'after getting' function. Because we're not a 'leaf' // source, we can call this directly, without risking infinite recursion. afterGetting(this); } live/liveMedia/MPEG1or2VideoRTPSink.cpp000444 001751 000000 00000015165 12265042432 020010 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // RTP sink for MPEG video (RFC 2250) // Implementation #include "MPEG1or2VideoRTPSink.hh" #include "MPEG1or2VideoStreamFramer.hh" MPEG1or2VideoRTPSink::MPEG1or2VideoRTPSink(UsageEnvironment& env, Groupsock* RTPgs) : VideoRTPSink(env, RTPgs, 32, 90000, "MPV") { fPictureState.temporal_reference = 0; fPictureState.picture_coding_type = fPictureState.vector_code_bits = 0; } MPEG1or2VideoRTPSink::~MPEG1or2VideoRTPSink() { } MPEG1or2VideoRTPSink* MPEG1or2VideoRTPSink::createNew(UsageEnvironment& env, Groupsock* RTPgs) { return new MPEG1or2VideoRTPSink(env, RTPgs); } Boolean MPEG1or2VideoRTPSink::sourceIsCompatibleWithUs(MediaSource& source) { // Our source must be an appropriate framer: return source.isMPEG1or2VideoStreamFramer(); } Boolean MPEG1or2VideoRTPSink::allowFragmentationAfterStart() const { return True; } Boolean MPEG1or2VideoRTPSink ::frameCanAppearAfterPacketStart(unsigned char const* frameStart, unsigned numBytesInFrame) const { // A 'frame' (which in this context can mean a header or a slice as well as a // complete picture) can appear at other than the first position in a packet // in all situations, EXCEPT when it follows the end of (i.e., the last slice // of) a picture. I.e., the headers at the beginning of a picture must // appear at the start of a RTP packet. if (!fPreviousFrameWasSlice) return True; // A slice is already packed into this packet. We allow this new 'frame' // to be packed after it, provided that it is also a slice: return numBytesInFrame >= 4 && frameStart[0] == 0 && frameStart[1] == 0 && frameStart[2] == 1 && frameStart[3] >= 1 && frameStart[3] <= 0xAF; } #define VIDEO_SEQUENCE_HEADER_START_CODE 0x000001B3 #define PICTURE_START_CODE 0x00000100 void MPEG1or2VideoRTPSink ::doSpecialFrameHandling(unsigned fragmentationOffset, unsigned char* frameStart, unsigned numBytesInFrame, struct timeval framePresentationTime, unsigned numRemainingBytes) { Boolean thisFrameIsASlice = False; // until we learn otherwise if (isFirstFrameInPacket()) { fSequenceHeaderPresent = fPacketBeginsSlice = fPacketEndsSlice = False; } if (fragmentationOffset == 0) { // Begin by inspecting the 4-byte code at the start of the frame: if (numBytesInFrame < 4) return; // shouldn't happen unsigned startCode = (frameStart[0]<<24) | (frameStart[1]<<16) | (frameStart[2]<<8) | frameStart[3]; if (startCode == VIDEO_SEQUENCE_HEADER_START_CODE) { // This is a video sequence header fSequenceHeaderPresent = True; } else if (startCode == PICTURE_START_CODE) { // This is a picture header // Record the parameters of this picture: if (numBytesInFrame < 8) return; // shouldn't happen unsigned next4Bytes = (frameStart[4]<<24) | (frameStart[5]<<16) | (frameStart[6]<<8) | frameStart[7]; unsigned char byte8 = numBytesInFrame == 8 ? 0 : frameStart[8]; fPictureState.temporal_reference = (next4Bytes&0xFFC00000)>>(32-10); fPictureState.picture_coding_type = (next4Bytes&0x00380000)>>(32-(10+3)); unsigned char FBV, BFC, FFV, FFC; FBV = BFC = FFV = FFC = 0; switch (fPictureState.picture_coding_type) { case 3: FBV = (byte8&0x40)>>6; BFC = (byte8&0x38)>>3; // fall through to: case 2: FFV = (next4Bytes&0x00000004)>>2; FFC = ((next4Bytes&0x00000003)<<1) | ((byte8&0x80)>>7); } fPictureState.vector_code_bits = (FBV<<7) | (BFC<<4) | (FFV<<3) | FFC; } else if ((startCode&0xFFFFFF00) == 0x00000100) { unsigned char lastCodeByte = startCode&0xFF; if (lastCodeByte <= 0xAF) { // This is (the start of) a slice thisFrameIsASlice = True; } else { // This is probably a GOP header; we don't do anything with this } } else { // The first 4 bytes aren't a code that we recognize. envir() << "Warning: MPEG1or2VideoRTPSink::doSpecialFrameHandling saw strange first 4 bytes " << (void*)startCode << ", but we're not a fragment\n"; } } else { // We're a fragment (other than the first) of a slice. thisFrameIsASlice = True; } if (thisFrameIsASlice) { // This packet begins a slice iff there's no fragmentation offset: fPacketBeginsSlice = (fragmentationOffset == 0); // This packet also ends a slice iff there are no fragments remaining: fPacketEndsSlice = (numRemainingBytes == 0); } // Set the video-specific header based on the parameters that we've seen. // Note that this may get done more than once, if several frames appear // in the packet. That's OK, because this situation happens infrequently, // and we want the video-specific header to reflect the most up-to-date // information (in particular, from a Picture Header) anyway. unsigned videoSpecificHeader = // T == 0 (fPictureState.temporal_reference<<16) | // AN == N == 0 (fSequenceHeaderPresent<<13) | (fPacketBeginsSlice<<12) | (fPacketEndsSlice<<11) | (fPictureState.picture_coding_type<<8) | fPictureState.vector_code_bits; setSpecialHeaderWord(videoSpecificHeader); // Also set the RTP timestamp. (As above, we do this for each frame // in the packet.) setTimestamp(framePresentationTime); // Set the RTP 'M' (marker) bit iff this frame ends (i.e., is the last // slice of) a picture (and there are no fragments remaining). // This relies on the source being a "MPEG1or2VideoStreamFramer". MPEG1or2VideoStreamFramer* framerSource = (MPEG1or2VideoStreamFramer*)fSource; if (framerSource != NULL && framerSource->pictureEndMarker() && numRemainingBytes == 0) { setMarkerBit(); framerSource->pictureEndMarker() = False; } fPreviousFrameWasSlice = thisFrameIsASlice; } unsigned MPEG1or2VideoRTPSink::specialHeaderSize() const { // There's a 4 byte special video header: return 4; } live/liveMedia/MP3InternalsHuffman.hh000444 001751 000000 00000005141 12265042432 017744 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // MP3 internal implementation details (Huffman encoding) // C++ header #ifndef _MP3_INTERNALS_HUFFMAN_HH #define _MP3_INTERNALS_HUFFMAN_HH #ifndef _MP3_INTERNALS_HH #include "MP3Internals.hh" #endif void updateSideInfoForHuffman(MP3SideInfo& sideInfo, Boolean isMPEG2, unsigned char const* mainDataPtr, unsigned p23L0, unsigned p23L1, unsigned& part23Length0a, unsigned& part23Length0aTruncation, unsigned& part23Length0b, unsigned& part23Length0bTruncation, unsigned& part23Length1a, unsigned& part23Length1aTruncation, unsigned& part23Length1b, unsigned& part23Length1bTruncation); #define SSLIMIT 18 class MP3HuffmanEncodingInfo { public: MP3HuffmanEncodingInfo(Boolean includeDecodedValues = False); ~MP3HuffmanEncodingInfo(); public: unsigned numSamples; unsigned allBitOffsets[SBLIMIT*SSLIMIT + 1]; unsigned reg1Start, reg2Start, bigvalStart; /* special bit offsets */ unsigned* decodedValues; }; /* forward */ void MP3HuffmanDecode(MP3SideInfo::gr_info_s_t* gr, Boolean isMPEG2, unsigned char const* fromBasePtr, unsigned fromBitOffset, unsigned fromLength, unsigned& scaleFactorsLength, MP3HuffmanEncodingInfo& hei); extern unsigned char huffdec[]; // huffman table data // The following are used if we process Huffman-decoded values #ifdef FOUR_BYTE_SAMPLES #define BYTES_PER_SAMPLE_VALUE 4 #else #ifdef TWO_BYTE_SAMPLES #define BYTES_PER_SAMPLE_VALUE 2 #else // ONE_BYTE_SAMPLES #define BYTES_PER_SAMPLE_VALUE 1 #endif #endif #ifdef DO_HUFFMAN_ENCODING unsigned MP3HuffmanEncode(MP3SideInfo::gr_info_s_t const* gr, unsigned char const* fromPtr, unsigned char* toPtr, unsigned toBitOffset, unsigned numHuffBits); #endif #endif live/liveMedia/MP3InternalsHuffman.cpp000444 001751 000000 00000070565 12265042432 020143 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // MP3 internal implementation details (Huffman encoding) // Implementation #include "MP3InternalsHuffman.hh" #include #include #include MP3HuffmanEncodingInfo ::MP3HuffmanEncodingInfo(Boolean includeDecodedValues) { if (includeDecodedValues) { decodedValues = new unsigned[(SBLIMIT*SSLIMIT + 1)*4]; } else { decodedValues = NULL; } } MP3HuffmanEncodingInfo::~MP3HuffmanEncodingInfo() { delete[] decodedValues; } // This is crufty old code that needs to be cleaned up ##### static unsigned debugCount = 0; /* for debugging */ #define TRUNC_FAVORa void updateSideInfoForHuffman(MP3SideInfo& sideInfo, Boolean isMPEG2, unsigned char const* mainDataPtr, unsigned p23L0, unsigned p23L1, unsigned& part23Length0a, unsigned& part23Length0aTruncation, unsigned& part23Length0b, unsigned& part23Length0bTruncation, unsigned& part23Length1a, unsigned& part23Length1aTruncation, unsigned& part23Length1b, unsigned& part23Length1bTruncation) { int i, j; unsigned sfLength, origTotABsize, adjustment; MP3SideInfo::gr_info_s_t* gr; /* First, Huffman-decode each part of the segment's main data, to see at which bit-boundaries the samples appear: */ MP3HuffmanEncodingInfo hei; ++debugCount; #ifdef DEBUG fprintf(stderr, "usifh-start: p23L0: %d, p23L1: %d\n", p23L0, p23L1); #endif /* Process granule 0 */ { gr = &(sideInfo.ch[0].gr[0]); origTotABsize = gr->part2_3_length; MP3HuffmanDecode(gr, isMPEG2, mainDataPtr, 0, origTotABsize, sfLength, hei); /* Begin by computing new sizes for parts a & b (& their truncations) */ #ifdef DEBUG fprintf(stderr, "usifh-0: %d, %d:%d, %d:%d, %d:%d, %d:%d, %d:%d\n", hei.numSamples, sfLength/8, sfLength%8, hei.reg1Start/8, hei.reg1Start%8, hei.reg2Start/8, hei.reg2Start%8, hei.bigvalStart/8, hei.bigvalStart%8, origTotABsize/8, origTotABsize%8); #endif if (p23L0 < sfLength) { /* We can't use this, so give it all to the next granule: */ p23L1 += p23L0; p23L0 = 0; } part23Length0a = hei.bigvalStart; part23Length0b = origTotABsize - hei.bigvalStart; part23Length0aTruncation = part23Length0bTruncation = 0; if (origTotABsize > p23L0) { /* We need to shorten one or both of fields a & b */ unsigned truncation = origTotABsize - p23L0; #ifdef TRUNC_FAIRLY part23Length0aTruncation = (truncation*(part23Length0a-sfLength)) /(origTotABsize-sfLength); part23Length0bTruncation = truncation - part23Length0aTruncation; #endif #ifdef TRUNC_FAVORa part23Length0bTruncation = (truncation > part23Length0b) ? part23Length0b : truncation; part23Length0aTruncation = truncation - part23Length0bTruncation; #endif #ifdef TRUNC_FAVORb part23Length0aTruncation = (truncation > part23Length0a-sfLength) ? (part23Length0a-sfLength) : truncation; part23Length0bTruncation = truncation - part23Length0aTruncation; #endif } /* ASSERT: part23Length0xTruncation <= part23Length0x */ part23Length0a -= part23Length0aTruncation; part23Length0b -= part23Length0bTruncation; #ifdef DEBUG fprintf(stderr, "usifh-0: interim sizes: %d (%d), %d (%d)\n", part23Length0a, part23Length0aTruncation, part23Length0b, part23Length0bTruncation); #endif /* Adjust these new lengths so they end on sample bit boundaries: */ for (i = 0; i < (int)hei.numSamples; ++i) { if (hei.allBitOffsets[i] == part23Length0a) break; else if (hei.allBitOffsets[i] > part23Length0a) {--i; break;} } if (i < 0) { /* should happen only if we couldn't fit sfLength */ i = 0; adjustment = 0; } else { adjustment = part23Length0a - hei.allBitOffsets[i]; } #ifdef DEBUG fprintf(stderr, "%d usifh-0: adjustment 1: %d\n", debugCount, adjustment); #endif part23Length0a -= adjustment; part23Length0aTruncation += adjustment; /* Assign the bits we just shaved to field b and granule 1: */ if (part23Length0bTruncation < adjustment) { p23L1 += (adjustment - part23Length0bTruncation); adjustment = part23Length0bTruncation; } part23Length0b += adjustment; part23Length0bTruncation -= adjustment; for (j = i; j < (int)hei.numSamples; ++j) { if (hei.allBitOffsets[j] == part23Length0a + part23Length0aTruncation + part23Length0b) break; else if (hei.allBitOffsets[j] > part23Length0a + part23Length0aTruncation + part23Length0b) {--j; break;} } if (j < 0) { /* should happen only if we couldn't fit sfLength */ j = 0; adjustment = 0; } else { adjustment = part23Length0a+part23Length0aTruncation+part23Length0b - hei.allBitOffsets[j]; } #ifdef DEBUG fprintf(stderr, "%d usifh-0: adjustment 2: %d\n", debugCount, adjustment); #endif if (adjustment > part23Length0b) adjustment = part23Length0b; /*sanity*/ part23Length0b -= adjustment; part23Length0bTruncation += adjustment; /* Assign the bits we just shaved to granule 1 */ p23L1 += adjustment; if (part23Length0aTruncation > 0) { /* Change the granule's 'big_values' field to reflect the truncation */ gr->big_values = i; } } /* Process granule 1 (MPEG-1 only) */ if (isMPEG2) { part23Length1a = part23Length1b = 0; part23Length1aTruncation = part23Length1bTruncation = 0; } else { unsigned granule1Offset = origTotABsize + sideInfo.ch[1].gr[0].part2_3_length; gr = &(sideInfo.ch[0].gr[1]); origTotABsize = gr->part2_3_length; MP3HuffmanDecode(gr, isMPEG2, mainDataPtr, granule1Offset, origTotABsize, sfLength, hei); /* Begin by computing new sizes for parts a & b (& their truncations) */ #ifdef DEBUG fprintf(stderr, "usifh-1: %d, %d:%d, %d:%d, %d:%d, %d:%d, %d:%d\n", hei.numSamples, sfLength/8, sfLength%8, hei.reg1Start/8, hei.reg1Start%8, hei.reg2Start/8, hei.reg2Start%8, hei.bigvalStart/8, hei.bigvalStart%8, origTotABsize/8, origTotABsize%8); #endif if (p23L1 < sfLength) { /* We can't use this, so give up on this granule: */ p23L1 = 0; } part23Length1a = hei.bigvalStart; part23Length1b = origTotABsize - hei.bigvalStart; part23Length1aTruncation = part23Length1bTruncation = 0; if (origTotABsize > p23L1) { /* We need to shorten one or both of fields a & b */ unsigned truncation = origTotABsize - p23L1; #ifdef TRUNC_FAIRLY part23Length1aTruncation = (truncation*(part23Length1a-sfLength)) /(origTotABsize-sfLength); part23Length1bTruncation = truncation - part23Length1aTruncation; #endif #ifdef TRUNC_FAVORa part23Length1bTruncation = (truncation > part23Length1b) ? part23Length1b : truncation; part23Length1aTruncation = truncation - part23Length1bTruncation; #endif #ifdef TRUNC_FAVORb part23Length1aTruncation = (truncation > part23Length1a-sfLength) ? (part23Length1a-sfLength) : truncation; part23Length1bTruncation = truncation - part23Length1aTruncation; #endif } /* ASSERT: part23Length1xTruncation <= part23Length1x */ part23Length1a -= part23Length1aTruncation; part23Length1b -= part23Length1bTruncation; #ifdef DEBUG fprintf(stderr, "usifh-1: interim sizes: %d (%d), %d (%d)\n", part23Length1a, part23Length1aTruncation, part23Length1b, part23Length1bTruncation); #endif /* Adjust these new lengths so they end on sample bit boundaries: */ for (i = 0; i < (int)hei.numSamples; ++i) { if (hei.allBitOffsets[i] == part23Length1a) break; else if (hei.allBitOffsets[i] > part23Length1a) {--i; break;} } if (i < 0) { /* should happen only if we couldn't fit sfLength */ i = 0; adjustment = 0; } else { adjustment = part23Length1a - hei.allBitOffsets[i]; } #ifdef DEBUG fprintf(stderr, "%d usifh-1: adjustment 0: %d\n", debugCount, adjustment); #endif part23Length1a -= adjustment; part23Length1aTruncation += adjustment; /* Assign the bits we just shaved to field b: */ if (part23Length1bTruncation < adjustment) { adjustment = part23Length1bTruncation; } part23Length1b += adjustment; part23Length1bTruncation -= adjustment; for (j = i; j < (int)hei.numSamples; ++j) { if (hei.allBitOffsets[j] == part23Length1a + part23Length1aTruncation + part23Length1b) break; else if (hei.allBitOffsets[j] > part23Length1a + part23Length1aTruncation + part23Length1b) {--j; break;} } if (j < 0) { /* should happen only if we couldn't fit sfLength */ j = 0; adjustment = 0; } else { adjustment = part23Length1a+part23Length1aTruncation+part23Length1b - hei.allBitOffsets[j]; } #ifdef DEBUG fprintf(stderr, "%d usifh-1: adjustment 1: %d\n", debugCount, adjustment); #endif if (adjustment > part23Length1b) adjustment = part23Length1b; /*sanity*/ part23Length1b -= adjustment; part23Length1bTruncation += adjustment; if (part23Length1aTruncation > 0) { /* Change the granule's 'big_values' field to reflect the truncation */ gr->big_values = i; } } #ifdef DEBUG fprintf(stderr, "usifh-end, new vals: %d (%d), %d (%d), %d (%d), %d (%d)\n", part23Length0a, part23Length0aTruncation, part23Length0b, part23Length0bTruncation, part23Length1a, part23Length1aTruncation, part23Length1b, part23Length1bTruncation); #endif } static void rsf_getline(char* line, unsigned max, unsigned char**fi) { unsigned i; for (i = 0; i < max; ++i) { line[i] = *(*fi)++; if (line[i] == '\n') { line[i++] = '\0'; return; } } line[i] = '\0'; } static void rsfscanf(unsigned char **fi, unsigned int* v) { while (sscanf((char*)*fi, "%x", v) == 0) { /* skip past the next '\0' */ while (*(*fi)++ != '\0') {} } /* skip past any white-space before the value: */ while (*(*fi) <= ' ') ++(*fi); /* skip past the value: */ while (*(*fi) > ' ') ++(*fi); } #define HUFFBITS unsigned long int #define SIZEOF_HUFFBITS 4 #define HTN 34 #define MXOFF 250 struct huffcodetab { char tablename[3]; /*string, containing table_description */ unsigned int xlen; /*max. x-index+ */ unsigned int ylen; /*max. y-index+ */ unsigned int linbits; /*number of linbits */ unsigned int linmax; /*max number to be stored in linbits */ int ref; /*a positive value indicates a reference*/ HUFFBITS *table; /*pointer to array[xlen][ylen] */ unsigned char *hlen; /*pointer to array[xlen][ylen] */ unsigned char(*val)[2];/*decoder tree */ unsigned int treelen; /*length of decoder tree */ }; static struct huffcodetab rsf_ht[HTN]; // array of all huffcodetable headers /* 0..31 Huffman code table 0..31 */ /* 32,33 count1-tables */ /* read the huffman decoder table */ static int read_decoder_table(unsigned char* fi) { int n,i,nn,t; unsigned int v0,v1; char command[100],line[100]; for (n=0;nscalefac_compress]; int num1 = slen[1][gr_info->scalefac_compress]; if (gr_info->block_type == 2) { numbits = (num0 + num1) * 18; if (gr_info->mixed_block_flag) { numbits -= num0; /* num0 * 17 + num1 * 18 */ } } else { int scfsi = gr_info->scfsi; if(scfsi < 0) { /* scfsi < 0 => granule == 0 */ numbits = (num0 + num1) * 10 + num0; } else { numbits = 0; if(!(scfsi & 0x8)) { numbits += num0 * 6; } else { } if(!(scfsi & 0x4)) { numbits += num0 * 5; } else { } if(!(scfsi & 0x2)) { numbits += num1 * 5; } else { } if(!(scfsi & 0x1)) { numbits += num1 * 5; } else { } } } return numbits; } extern unsigned n_slen2[]; extern unsigned i_slen2[]; static unsigned rsf_get_scale_factors_2(MP3SideInfo::gr_info_s_t *gr_info) { unsigned char const* pnt; int i; unsigned int slen; int n = 0; int numbits = 0; slen = n_slen2[gr_info->scalefac_compress]; gr_info->preflag = (slen>>15) & 0x1; n = 0; if( gr_info->block_type == 2 ) { n++; if(gr_info->mixed_block_flag) n++; } pnt = stab[n][(slen>>12)&0x7]; for(i=0;i<4;i++) { int num = slen & 0x7; slen >>= 3; numbits += pnt[i] * num; } return numbits; } static unsigned getScaleFactorsLength(MP3SideInfo::gr_info_s_t* gr, Boolean isMPEG2) { return isMPEG2 ? rsf_get_scale_factors_2(gr) : rsf_get_scale_factors_1(gr); } static int rsf_huffman_decoder(BitVector& bv, struct huffcodetab const* h, int* x, int* y, int* v, int* w); // forward void MP3HuffmanDecode(MP3SideInfo::gr_info_s_t* gr, Boolean isMPEG2, unsigned char const* fromBasePtr, unsigned fromBitOffset, unsigned fromLength, unsigned& scaleFactorsLength, MP3HuffmanEncodingInfo& hei) { unsigned i; int x, y, v, w; struct huffcodetab *h; BitVector bv((unsigned char*)fromBasePtr, fromBitOffset, fromLength); /* Compute the size of the scale factors (& also advance bv): */ scaleFactorsLength = getScaleFactorsLength(gr, isMPEG2); bv.skipBits(scaleFactorsLength); initialize_huffman(); hei.reg1Start = hei.reg2Start = hei.numSamples = 0; /* Read bigvalues area. */ if (gr->big_values < gr->region1start + gr->region2start) { gr->big_values = gr->region1start + gr->region2start; /* sanity check */ } for (i = 0; i < gr->big_values; ++i) { if (i < gr->region1start) { /* in region 0 */ h = &rsf_ht[gr->table_select[0]]; } else if (i < gr->region2start) { /* in region 1 */ h = &rsf_ht[gr->table_select[1]]; if (hei.reg1Start == 0) { hei.reg1Start = bv.curBitIndex(); } } else { /* in region 2 */ h = &rsf_ht[gr->table_select[2]]; if (hei.reg2Start == 0) { hei.reg2Start = bv.curBitIndex(); } } hei.allBitOffsets[i] = bv.curBitIndex(); rsf_huffman_decoder(bv, h, &x, &y, &v, &w); if (hei.decodedValues != NULL) { // Record the decoded values: unsigned* ptr = &hei.decodedValues[4*i]; ptr[0] = x; ptr[1] = y; ptr[2] = v; ptr[3] = w; } } hei.bigvalStart = bv.curBitIndex(); /* Read count1 area. */ h = &rsf_ht[gr->count1table_select+32]; while (bv.curBitIndex() < bv.totNumBits() && i < SSLIMIT*SBLIMIT) { hei.allBitOffsets[i] = bv.curBitIndex(); rsf_huffman_decoder(bv, h, &x, &y, &v, &w); if (hei.decodedValues != NULL) { // Record the decoded values: unsigned* ptr = &hei.decodedValues[4*i]; ptr[0] = x; ptr[1] = y; ptr[2] = v; ptr[3] = w; } ++i; } hei.allBitOffsets[i] = bv.curBitIndex(); hei.numSamples = i; } HUFFBITS dmask = 1 << (SIZEOF_HUFFBITS*8-1); unsigned int hs = SIZEOF_HUFFBITS*8; /* do the huffman-decoding */ static int rsf_huffman_decoder(BitVector& bv, struct huffcodetab const* h, // ptr to huffman code record /* unsigned */ int *x, // returns decoded x value /* unsigned */ int *y, // returns decoded y value int* v, int* w) { HUFFBITS level; unsigned point = 0; int error = 1; level = dmask; *x = *y = *v = *w = 0; if (h->val == NULL) return 2; /* table 0 needs no bits */ if (h->treelen == 0) return 0; /* Lookup in Huffman table. */ do { if (h->val[point][0]==0) { /*end of tree*/ *x = h->val[point][1] >> 4; *y = h->val[point][1] & 0xf; error = 0; break; } if (bv.get1Bit()) { while (h->val[point][1] >= MXOFF) point += h->val[point][1]; point += h->val[point][1]; } else { while (h->val[point][0] >= MXOFF) point += h->val[point][0]; point += h->val[point][0]; } level >>= 1; } while (level || (point < h->treelen) ); ///// } while (level || (point < rsf_ht->treelen) ); /* Check for error. */ if (error) { /* set x and y to a medium value as a simple concealment */ printf("Illegal Huffman code in data.\n"); *x = ((h->xlen-1) << 1); *y = ((h->ylen-1) << 1); } /* Process sign encodings for quadruples tables. */ if (h->tablename[0] == '3' && (h->tablename[1] == '2' || h->tablename[1] == '3')) { *v = (*y>>3) & 1; *w = (*y>>2) & 1; *x = (*y>>1) & 1; *y = *y & 1; if (*v) if (bv.get1Bit() == 1) *v = -*v; if (*w) if (bv.get1Bit() == 1) *w = -*w; if (*x) if (bv.get1Bit() == 1) *x = -*x; if (*y) if (bv.get1Bit() == 1) *y = -*y; } /* Process sign and escape encodings for dual tables. */ else { if (h->linbits) if ((h->xlen-1) == (unsigned)*x) *x += bv.getBits(h->linbits); if (*x) if (bv.get1Bit() == 1) *x = -*x; if (h->linbits) if ((h->ylen-1) == (unsigned)*y) *y += bv.getBits(h->linbits); if (*y) if (bv.get1Bit() == 1) *y = -*y; } return error; } #ifdef DO_HUFFMAN_ENCODING inline int getNextSample(unsigned char const*& fromPtr) { int sample #ifdef FOUR_BYTE_SAMPLES = (fromPtr[0]<<24) | (fromPtr[1]<<16) | (fromPtr[2]<<8) | fromPtr[3]; #else #ifdef TWO_BYTE_SAMPLES = (fromPtr[0]<<8) | fromPtr[1]; #else // ONE_BYTE_SAMPLES = fromPtr[0]; #endif #endif fromPtr += BYTES_PER_SAMPLE_VALUE; return sample; } static void rsf_huffman_encoder(BitVector& bv, struct huffcodetab* h, int x, int y, int v, int w); // forward unsigned MP3HuffmanEncode(MP3SideInfo::gr_info_s_t const* gr, unsigned char const* fromPtr, unsigned char* toPtr, unsigned toBitOffset, unsigned numHuffBits) { unsigned i; struct huffcodetab *h; int x, y, v, w; BitVector bv(toPtr, toBitOffset, numHuffBits); initialize_huffman(); // Encode big_values area: unsigned big_values = gr->big_values; if (big_values < gr->region1start + gr->region2start) { big_values = gr->region1start + gr->region2start; /* sanity check */ } for (i = 0; i < big_values; ++i) { if (i < gr->region1start) { /* in region 0 */ h = &rsf_ht[gr->table_select[0]]; } else if (i < gr->region2start) { /* in region 1 */ h = &rsf_ht[gr->table_select[1]]; } else { /* in region 2 */ h = &rsf_ht[gr->table_select[2]]; } x = getNextSample(fromPtr); y = getNextSample(fromPtr); v = getNextSample(fromPtr); w = getNextSample(fromPtr); rsf_huffman_encoder(bv, h, x, y, v, w); } // Encode count1 area: h = &rsf_ht[gr->count1table_select+32]; while (bv.curBitIndex() < bv.totNumBits() && i < SSLIMIT*SBLIMIT) { x = getNextSample(fromPtr); y = getNextSample(fromPtr); v = getNextSample(fromPtr); w = getNextSample(fromPtr); rsf_huffman_encoder(bv, h, x, y, v, w); ++i; } return i; } static Boolean lookupHuffmanTableEntry(struct huffcodetab const* h, HUFFBITS bits, unsigned bitsLength, unsigned char& xy) { unsigned point = 0; unsigned mask = 1; unsigned numBitsTestedSoFar = 0; do { if (h->val[point][0]==0) { // end of tree xy = h->val[point][1]; if (h->hlen[xy] == 0) { // this entry hasn't already been used h->table[xy] = bits; h->hlen[xy] = bitsLength; return True; } else { // this entry has already been seen return False; } } if (numBitsTestedSoFar++ == bitsLength) { // We don't yet have enough bits for this prefix return False; } if (bits&mask) { while (h->val[point][1] >= MXOFF) point += h->val[point][1]; point += h->val[point][1]; } else { while (h->val[point][0] >= MXOFF) point += h->val[point][0]; point += h->val[point][0]; } mask <<= 1; } while (mask || (point < h->treelen)); return False; } static void buildHuffmanEncodingTable(struct huffcodetab* h) { h->table = new unsigned long[256]; h->hlen = new unsigned char[256]; if (h->table == NULL || h->hlen == NULL) { h->table = NULL; return; } for (unsigned i = 0; i < 256; ++i) { h->table[i] = 0; h->hlen[i] = 0; } // Look up entries for each possible bit sequence length: unsigned maxNumEntries = h->xlen * h->ylen; unsigned numEntries = 0; unsigned powerOf2 = 1; for (unsigned bitsLength = 1; bitsLength <= 8*SIZEOF_HUFFBITS; ++bitsLength) { powerOf2 *= 2; for (HUFFBITS bits = 0; bits < powerOf2; ++bits) { // Find the table value - if any - for 'bits' (length 'bitsLength'): unsigned char xy; if (lookupHuffmanTableEntry(h, bits, bitsLength, xy)) { ++numEntries; if (numEntries == maxNumEntries) return; // we're done } } } #ifdef DEBUG fprintf(stderr, "Didn't find enough entries!\n"); // shouldn't happen #endif } static void lookupXYandPutBits(BitVector& bv, struct huffcodetab const* h, unsigned char xy) { HUFFBITS bits = h->table[xy]; unsigned bitsLength = h->hlen[xy]; // Note that "bits" is in reverse order, so read them from right-to-left: while (bitsLength-- > 0) { bv.put1Bit(bits&0x00000001); bits >>= 1; } } static void putLinbits(BitVector& bv, struct huffcodetab const* h, HUFFBITS bits) { bv.putBits(bits, h->linbits); } static void rsf_huffman_encoder(BitVector& bv, struct huffcodetab* h, int x, int y, int v, int w) { if (h->val == NULL) return; /* table 0 produces no bits */ if (h->treelen == 0) return; if (h->table == NULL) { // We haven't yet built the encoding array for this table; do it now: buildHuffmanEncodingTable(h); if (h->table == NULL) return; } Boolean xIsNeg = False, yIsNeg = False, vIsNeg = False, wIsNeg = False; unsigned char xy; #ifdef FOUR_BYTE_SAMPLES #else #ifdef TWO_BYTE_SAMPLES // Convert 2-byte negative numbers to their 4-byte equivalents: if (x&0x8000) x |= 0xFFFF0000; if (y&0x8000) y |= 0xFFFF0000; if (v&0x8000) v |= 0xFFFF0000; if (w&0x8000) w |= 0xFFFF0000; #else // ONE_BYTE_SAMPLES // Convert 1-byte negative numbers to their 4-byte equivalents: if (x&0x80) x |= 0xFFFFFF00; if (y&0x80) y |= 0xFFFFFF00; if (v&0x80) v |= 0xFFFFFF00; if (w&0x80) w |= 0xFFFFFF00; #endif #endif if (h->tablename[0] == '3' && (h->tablename[1] == '2' || h->tablename[1] == '3')) {// quad tables if (x < 0) { xIsNeg = True; x = -x; } if (y < 0) { yIsNeg = True; y = -y; } if (v < 0) { vIsNeg = True; v = -v; } if (w < 0) { wIsNeg = True; w = -w; } // Sanity check: x,y,v,w must all be 0 or 1: if (x>1 || y>1 || v>1 || w>1) { #ifdef DEBUG fprintf(stderr, "rsf_huffman_encoder quad sanity check fails: %x,%x,%x,%x\n", x, y, v, w); #endif } xy = (v<<3)|(w<<2)|(x<<1)|y; lookupXYandPutBits(bv, h, xy); if (v) bv.put1Bit(vIsNeg); if (w) bv.put1Bit(wIsNeg); if (x) bv.put1Bit(xIsNeg); if (y) bv.put1Bit(yIsNeg); } else { // dual tables // Sanity check: v and w must be 0: if (v != 0 || w != 0) { #ifdef DEBUG fprintf(stderr, "rsf_huffman_encoder dual sanity check 1 fails: %x,%x,%x,%x\n", x, y, v, w); #endif } if (x < 0) { xIsNeg = True; x = -x; } if (y < 0) { yIsNeg = True; y = -y; } // Sanity check: x and y must be <= 255: if (x > 255 || y > 255) { #ifdef DEBUG fprintf(stderr, "rsf_huffman_encoder dual sanity check 2 fails: %x,%x,%x,%x\n", x, y, v, w); #endif } int xl1 = h->xlen-1; int yl1 = h->ylen-1; unsigned linbitsX = 0; unsigned linbitsY = 0; if (((x < xl1) || (xl1 == 0)) && (y < yl1)) { // normal case xy = (x<<4)|y; lookupXYandPutBits(bv, h, xy); if (x) bv.put1Bit(xIsNeg); if (y) bv.put1Bit(yIsNeg); } else if (x >= xl1) { linbitsX = (unsigned)(x - xl1); if (linbitsX > h->linmax) { #ifdef DEBUG fprintf(stderr,"warning: Huffman X table overflow\n"); #endif linbitsX = h->linmax; }; if (y >= yl1) { xy = (xl1<<4)|yl1; lookupXYandPutBits(bv, h, xy); linbitsY = (unsigned)(y - yl1); if (linbitsY > h->linmax) { #ifdef DEBUG fprintf(stderr,"warning: Huffman Y table overflow\n"); #endif linbitsY = h->linmax; }; if (h->linbits) putLinbits(bv, h, linbitsX); if (x) bv.put1Bit(xIsNeg); if (h->linbits) putLinbits(bv, h, linbitsY); if (y) bv.put1Bit(yIsNeg); } else { /* x >= h->xlen, y < h->ylen */ xy = (xl1<<4)|y; lookupXYandPutBits(bv, h, xy); if (h->linbits) putLinbits(bv, h, linbitsX); if (x) bv.put1Bit(xIsNeg); if (y) bv.put1Bit(yIsNeg); } } else { /* ((x < h->xlen) && (y >= h->ylen)) */ xy = (x<<4)|yl1; lookupXYandPutBits(bv, h, xy); linbitsY = y-yl1; if (linbitsY > h->linmax) { #ifdef DEBUG fprintf(stderr,"warning: Huffman Y table overflow\n"); #endif linbitsY = h->linmax; }; if (x) bv.put1Bit(xIsNeg); if (h->linbits) putLinbits(bv, h, linbitsY); if (y) bv.put1Bit(yIsNeg); } } } #endif live/liveMedia/MP3ADURTPSink.cpp000444 001751 000000 00000010313 12265042432 016504 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // RTP sink for 'ADUized' MP3 frames ("mpa-robust") // Implementation #include "MP3ADURTPSink.hh" MP3ADURTPSink::MP3ADURTPSink(UsageEnvironment& env, Groupsock* RTPgs, unsigned char RTPPayloadType) : AudioRTPSink(env, RTPgs, RTPPayloadType, 90000, "MPA-ROBUST") { } MP3ADURTPSink::~MP3ADURTPSink() { } MP3ADURTPSink* MP3ADURTPSink::createNew(UsageEnvironment& env, Groupsock* RTPgs, unsigned char RTPPayloadType) { return new MP3ADURTPSink(env, RTPgs, RTPPayloadType); } static void badDataSize(UsageEnvironment& env, unsigned numBytesInFrame) { env << "MP3ADURTPSink::doSpecialFrameHandling(): invalid size (" << numBytesInFrame << ") of non-fragmented input ADU!\n"; } void MP3ADURTPSink::doSpecialFrameHandling(unsigned fragmentationOffset, unsigned char* frameStart, unsigned numBytesInFrame, struct timeval framePresentationTime, unsigned numRemainingBytes) { // If this is the first (or only) fragment of an ADU, then // check the "ADU descriptor" (that should be at the front) for validity: if (fragmentationOffset == 0) { unsigned aduDescriptorSize; if (numBytesInFrame < 1) { badDataSize(envir(), numBytesInFrame); return; } if (frameStart[0]&0x40) { // We have a 2-byte ADU descriptor aduDescriptorSize = 2; if (numBytesInFrame < 2) { badDataSize(envir(), numBytesInFrame); return; } fCurADUSize = ((frameStart[0]&~0xC0)<<8) | frameStart[1]; } else { // We have a 1-byte ADU descriptor aduDescriptorSize = 1; fCurADUSize = frameStart[0]&~0x80; } if (frameStart[0]&0x80) { envir() << "Unexpected \"C\" bit seen on non-fragment input ADU!\n"; return; } // Now, check whether the ADU size in the ADU descriptor is consistent // with the total data size of (all fragments of) the input frame: unsigned expectedADUSize = fragmentationOffset + numBytesInFrame + numRemainingBytes - aduDescriptorSize; if (fCurADUSize != expectedADUSize) { envir() << "MP3ADURTPSink::doSpecialFrameHandling(): Warning: Input ADU size " << expectedADUSize << " (=" << fragmentationOffset << "+" << numBytesInFrame << "+" << numRemainingBytes << "-" << aduDescriptorSize << ") did not match the value (" << fCurADUSize << ") in the ADU descriptor!\n"; fCurADUSize = expectedADUSize; } } else { // This is the second (or subsequent) fragment. // Insert a new ADU descriptor: unsigned char aduDescriptor[2]; aduDescriptor[0] = 0xC0|(fCurADUSize>>8); aduDescriptor[1] = fCurADUSize&0xFF; setSpecialHeaderBytes(aduDescriptor, 2); } // Important: Also call our base class's doSpecialFrameHandling(), // to set the packet's timestamp: MultiFramedRTPSink::doSpecialFrameHandling(fragmentationOffset, frameStart, numBytesInFrame, framePresentationTime, numRemainingBytes); } unsigned MP3ADURTPSink::specialHeaderSize() const { // Normally there's no special header. // (The "ADU descriptor" is already present in the data.) unsigned specialHeaderSize = 0; // However, if we're about to output the second (or subsequent) fragment // of a fragmented ADU, then we need to insert a new ADU descriptor at // the front of the packet: if (curFragmentationOffset() > 0) { specialHeaderSize = 2; } return specialHeaderSize; } live/liveMedia/JPEGVideoRTPSource.cpp000444 001751 000000 00000040465 12265042432 017636 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // JPEG Video (RFC 2435) RTP Sources // Implementation #include "JPEGVideoRTPSource.hh" ////////// JPEGBufferedPacket and JPEGBufferedPacketFactory ////////// class JPEGBufferedPacket: public BufferedPacket { public: Boolean completesFrame; private: // Redefined virtual functions: virtual void reset(); virtual unsigned nextEnclosedFrameSize(unsigned char*& framePtr, unsigned dataSize); }; class JPEGBufferedPacketFactory: public BufferedPacketFactory { private: // redefined virtual functions virtual BufferedPacket* createNewPacket(MultiFramedRTPSource* ourSource); }; ////////// JPEGVideoRTPSource implementation ////////// #define BYTE unsigned char #define WORD unsigned #define DWORD unsigned long JPEGVideoRTPSource* JPEGVideoRTPSource::createNew(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, unsigned rtpTimestampFrequency, unsigned defaultWidth, unsigned defaultHeight) { return new JPEGVideoRTPSource(env, RTPgs, rtpPayloadFormat, rtpTimestampFrequency, defaultWidth, defaultHeight); } JPEGVideoRTPSource::JPEGVideoRTPSource(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, unsigned rtpTimestampFrequency, unsigned defaultWidth, unsigned defaultHeight) : MultiFramedRTPSource(env, RTPgs, rtpPayloadFormat, rtpTimestampFrequency, new JPEGBufferedPacketFactory), fDefaultWidth(defaultWidth), fDefaultHeight(defaultHeight) { } JPEGVideoRTPSource::~JPEGVideoRTPSource() { } enum { MARKER_SOF0 = 0xc0, // start-of-frame, baseline scan MARKER_SOI = 0xd8, // start of image MARKER_EOI = 0xd9, // end of image MARKER_SOS = 0xda, // start of scan MARKER_DRI = 0xdd, // restart interval MARKER_DQT = 0xdb, // define quantization tables MARKER_DHT = 0xc4, // huffman tables MARKER_APP_FIRST = 0xe0, MARKER_APP_LAST = 0xef, MARKER_COMMENT = 0xfe, }; static unsigned char const lum_dc_codelens[] = { 0, 1, 5, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, }; static unsigned char const lum_dc_symbols[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, }; static unsigned char const lum_ac_codelens[] = { 0, 2, 1, 3, 3, 2, 4, 3, 5, 5, 4, 4, 0, 0, 1, 0x7d, }; static unsigned char const lum_ac_symbols[] = { 0x01, 0x02, 0x03, 0x00, 0x04, 0x11, 0x05, 0x12, 0x21, 0x31, 0x41, 0x06, 0x13, 0x51, 0x61, 0x07, 0x22, 0x71, 0x14, 0x32, 0x81, 0x91, 0xa1, 0x08, 0x23, 0x42, 0xb1, 0xc1, 0x15, 0x52, 0xd1, 0xf0, 0x24, 0x33, 0x62, 0x72, 0x82, 0x09, 0x0a, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, }; static unsigned char const chm_dc_codelens[] = { 0, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, }; static unsigned char const chm_dc_symbols[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, }; static unsigned char const chm_ac_codelens[] = { 0, 2, 1, 2, 4, 4, 3, 4, 7, 5, 4, 4, 0, 1, 2, 0x77, }; static unsigned char const chm_ac_symbols[] = { 0x00, 0x01, 0x02, 0x03, 0x11, 0x04, 0x05, 0x21, 0x31, 0x06, 0x12, 0x41, 0x51, 0x07, 0x61, 0x71, 0x13, 0x22, 0x32, 0x81, 0x08, 0x14, 0x42, 0x91, 0xa1, 0xb1, 0xc1, 0x09, 0x23, 0x33, 0x52, 0xf0, 0x15, 0x62, 0x72, 0xd1, 0x0a, 0x16, 0x24, 0x34, 0xe1, 0x25, 0xf1, 0x17, 0x18, 0x19, 0x1a, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, }; static void createHuffmanHeader(unsigned char*& p, unsigned char const* codelens, int ncodes, unsigned char const* symbols, int nsymbols, int tableNo, int tableClass) { *p++ = 0xff; *p++ = MARKER_DHT; *p++ = 0; /* length msb */ *p++ = 3 + ncodes + nsymbols; /* length lsb */ *p++ = (tableClass << 4) | tableNo; memcpy(p, codelens, ncodes); p += ncodes; memcpy(p, symbols, nsymbols); p += nsymbols; } static unsigned computeJPEGHeaderSize(unsigned qtlen, unsigned dri) { unsigned qtlen_half = qtlen/2; // in case qtlen is odd; shouldn't happen qtlen = qtlen_half*2; unsigned numQtables = qtlen > 64 ? 2 : 1; return 485 + numQtables*5 + qtlen + (dri > 0 ? 6 : 0); } static void createJPEGHeader(unsigned char* buf, unsigned type, unsigned w, unsigned h, unsigned char const* qtables, unsigned qtlen, unsigned dri) { unsigned char *ptr = buf; unsigned numQtables = qtlen > 64 ? 2 : 1; // MARKER_SOI: *ptr++ = 0xFF; *ptr++ = MARKER_SOI; // MARKER_APP_FIRST: *ptr++ = 0xFF; *ptr++ = MARKER_APP_FIRST; *ptr++ = 0x00; *ptr++ = 0x10; // size of chunk *ptr++ = 'J'; *ptr++ = 'F'; *ptr++ = 'I'; *ptr++ = 'F'; *ptr++ = 0x00; *ptr++ = 0x01; *ptr++ = 0x01; // JFIF format version (1.1) *ptr++ = 0x00; // no units *ptr++ = 0x00; *ptr++ = 0x01; // Horizontal pixel aspect ratio *ptr++ = 0x00; *ptr++ = 0x01; // Vertical pixel aspect ratio *ptr++ = 0x00; *ptr++ = 0x00; // no thumbnail // MARKER_DRI: if (dri > 0) { *ptr++ = 0xFF; *ptr++ = MARKER_DRI; *ptr++ = 0x00; *ptr++ = 0x04; // size of chunk *ptr++ = (BYTE)(dri >> 8); *ptr++ = (BYTE)(dri); // restart interval } // MARKER_DQT (luma): unsigned tableSize = numQtables == 1 ? qtlen : qtlen/2; *ptr++ = 0xFF; *ptr++ = MARKER_DQT; *ptr++ = 0x00; *ptr++ = tableSize + 3; // size of chunk *ptr++ = 0x00; // precision(0), table id(0) memcpy(ptr, qtables, tableSize); qtables += tableSize; ptr += tableSize; if (numQtables > 1) { unsigned tableSize = qtlen - qtlen/2; // MARKER_DQT (chroma): *ptr++ = 0xFF; *ptr++ = MARKER_DQT; *ptr++ = 0x00; *ptr++ = tableSize + 3; // size of chunk *ptr++ = 0x01; // precision(0), table id(1) memcpy(ptr, qtables, tableSize); qtables += tableSize; ptr += tableSize; } // MARKER_SOF0: *ptr++ = 0xFF; *ptr++ = MARKER_SOF0; *ptr++ = 0x00; *ptr++ = 0x11; // size of chunk *ptr++ = 0x08; // sample precision *ptr++ = (BYTE)(h >> 8); *ptr++ = (BYTE)(h); // number of lines (must be a multiple of 8) *ptr++ = (BYTE)(w >> 8); *ptr++ = (BYTE)(w); // number of columns (must be a multiple of 8) *ptr++ = 0x03; // number of components *ptr++ = 0x01; // id of component *ptr++ = type ? 0x22 : 0x21; // sampling ratio (h,v) *ptr++ = 0x00; // quant table id *ptr++ = 0x02; // id of component *ptr++ = 0x11; // sampling ratio (h,v) *ptr++ = numQtables == 1 ? 0x00 : 0x01; // quant table id *ptr++ = 0x03; // id of component *ptr++ = 0x11; // sampling ratio (h,v) *ptr++ = numQtables == 1 ? 0x00 : 0x01; // quant table id createHuffmanHeader(ptr, lum_dc_codelens, sizeof lum_dc_codelens, lum_dc_symbols, sizeof lum_dc_symbols, 0, 0); createHuffmanHeader(ptr, lum_ac_codelens, sizeof lum_ac_codelens, lum_ac_symbols, sizeof lum_ac_symbols, 0, 1); createHuffmanHeader(ptr, chm_dc_codelens, sizeof chm_dc_codelens, chm_dc_symbols, sizeof chm_dc_symbols, 1, 0); createHuffmanHeader(ptr, chm_ac_codelens, sizeof chm_ac_codelens, chm_ac_symbols, sizeof chm_ac_symbols, 1, 1); // MARKER_SOS: *ptr++ = 0xFF; *ptr++ = MARKER_SOS; *ptr++ = 0x00; *ptr++ = 0x0C; // size of chunk *ptr++ = 0x03; // number of components *ptr++ = 0x01; // id of component *ptr++ = 0x00; // huffman table id (DC, AC) *ptr++ = 0x02; // id of component *ptr++ = 0x11; // huffman table id (DC, AC) *ptr++ = 0x03; // id of component *ptr++ = 0x11; // huffman table id (DC, AC) *ptr++ = 0x00; // start of spectral *ptr++ = 0x3F; // end of spectral *ptr++ = 0x00; // successive approximation bit position (high, low) } // The default 'luma' and 'chroma' quantizer tables, in zigzag order: static unsigned char const defaultQuantizers[128] = { // luma table: 16, 11, 12, 14, 12, 10, 16, 14, 13, 14, 18, 17, 16, 19, 24, 40, 26, 24, 22, 22, 24, 49, 35, 37, 29, 40, 58, 51, 61, 60, 57, 51, 56, 55, 64, 72, 92, 78, 64, 68, 87, 69, 55, 56, 80, 109, 81, 87, 95, 98, 103, 104, 103, 62, 77, 113, 121, 112, 100, 120, 92, 101, 103, 99, // chroma table: 17, 18, 18, 24, 21, 24, 47, 26, 26, 47, 99, 66, 56, 66, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99 }; static void makeDefaultQtables(unsigned char* resultTables, unsigned Q) { int factor = Q; int q; if (Q < 1) factor = 1; else if (Q > 99) factor = 99; if (Q < 50) { q = 5000 / factor; } else { q = 200 - factor*2; } for (int i = 0; i < 128; ++i) { int newVal = (defaultQuantizers[i]*q + 50)/100; if (newVal < 1) newVal = 1; else if (newVal > 255) newVal = 255; resultTables[i] = newVal; } } Boolean JPEGVideoRTPSource ::processSpecialHeader(BufferedPacket* packet, unsigned& resultSpecialHeaderSize) { unsigned char* headerStart = packet->data(); unsigned packetSize = packet->dataSize(); unsigned char* qtables = NULL; unsigned qtlen = 0; unsigned dri = 0; // There's at least 8-byte video-specific header /* 0 1 2 3 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Type-specific | Fragment Offset | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Type | Q | Width | Height | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ */ if (packetSize < 8) return False; resultSpecialHeaderSize = 8; unsigned Offset = (unsigned)((DWORD)headerStart[1] << 16 | (DWORD)headerStart[2] << 8 | (DWORD)headerStart[3]); unsigned Type = (unsigned)headerStart[4]; unsigned type = Type & 1; unsigned Q = (unsigned)headerStart[5]; unsigned width = (unsigned)headerStart[6] * 8; unsigned height = (unsigned)headerStart[7] * 8; if ((width == 0 || height == 0) && fDefaultWidth != 0 && fDefaultHeight != 0) { // Use the default width and height parameters instead: width = fDefaultWidth; height = fDefaultHeight; } if (width == 0) width = 256*8; // special case if (height == 0) height = 256*8; // special case if (Type > 63) { // Restart Marker header present /* 0 1 2 3 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Restart Interval |F|L| Restart Count | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ */ if (packetSize < resultSpecialHeaderSize + 4) return False; unsigned RestartInterval = (unsigned)((WORD)headerStart[resultSpecialHeaderSize] << 8 | (WORD)headerStart[resultSpecialHeaderSize + 1]); dri = RestartInterval; resultSpecialHeaderSize += 4; } if (Offset == 0) { if (Q > 127) { // Quantization Table header present /* 0 1 2 3 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | MBZ | Precision | Length | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Quantization Table Data | | ... | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ */ if (packetSize < resultSpecialHeaderSize + 4) return False; unsigned MBZ = (unsigned)headerStart[resultSpecialHeaderSize]; if (MBZ == 0) { // unsigned Precision = (unsigned)headerStart[resultSpecialHeaderSize + 1]; unsigned Length = (unsigned)((WORD)headerStart[resultSpecialHeaderSize + 2] << 8 | (WORD)headerStart[resultSpecialHeaderSize + 3]); //ASSERT(Length == 128); resultSpecialHeaderSize += 4; if (packetSize < resultSpecialHeaderSize + Length) return False; qtlen = Length; qtables = &headerStart[resultSpecialHeaderSize]; resultSpecialHeaderSize += Length; } } } // If this is the first (or only) fragment of a JPEG frame, then we need // to synthesize a JPEG header, and prepend it to the incoming data. // Hack: We can do this because we allowed space for it in // our special "JPEGBufferedPacket" subclass. We also adjust // "resultSpecialHeaderSize" to compensate for this, by subtracting // the size of the synthesized header. Note that this will cause // "resultSpecialHeaderSize" to become negative, but the code that called // us (in "MultiFramedRTPSource") will handle this properly. if (Offset == 0) { unsigned char newQtables[128]; if (qtlen == 0) { // A quantization table was not present in the RTP JPEG header, // so use the default tables, scaled according to the "Q" factor: makeDefaultQtables(newQtables, Q); qtables = newQtables; qtlen = sizeof newQtables; } unsigned hdrlen = computeJPEGHeaderSize(qtlen, dri); resultSpecialHeaderSize -= hdrlen; // goes negative headerStart += (int)resultSpecialHeaderSize; // goes backward createJPEGHeader(headerStart, type, width, height, qtables, qtlen, dri); } fCurrentPacketBeginsFrame = (Offset == 0); // The RTP "M" (marker) bit indicates the last fragment of a frame: ((JPEGBufferedPacket*)packet)->completesFrame = fCurrentPacketCompletesFrame = packet->rtpMarkerBit(); return True; } char const* JPEGVideoRTPSource::MIMEtype() const { return "video/JPEG"; } ////////// JPEGBufferedPacket and JPEGBufferedPacketFactory implementation void JPEGBufferedPacket::reset() { BufferedPacket::reset(); // Move our "fHead" and "fTail" forward, to allow space for a synthesized // JPEG header to precede the RTP data that comes in over the network. unsigned offset = MAX_JPEG_HEADER_SIZE; if (offset > fPacketSize) offset = fPacketSize; // shouldn't happen fHead = fTail = offset; } unsigned JPEGBufferedPacket ::nextEnclosedFrameSize(unsigned char*& framePtr, unsigned dataSize) { // Normally, the enclosed frame size is just "dataSize". If, however, // the frame does not end with the "EOI" marker, then add this now: if (completesFrame && dataSize >= 2 && !(framePtr[dataSize-2] == 0xFF && framePtr[dataSize-1] == MARKER_EOI)) { framePtr[dataSize++] = 0xFF; framePtr[dataSize++] = MARKER_EOI; } return dataSize; } BufferedPacket* JPEGBufferedPacketFactory ::createNewPacket(MultiFramedRTPSource* /*ourSource*/) { return new JPEGBufferedPacket; } live/liveMedia/AudioInputDevice.cpp000444 001751 000000 00000003165 12265042432 017550 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // Copyright (c) 2001-2003 Live Networks, Inc. All rights reserved. // Generic audio input device (such as a microphone, or an input sound card) // Implementation #include AudioInputDevice ::AudioInputDevice(UsageEnvironment& env, unsigned char bitsPerSample, unsigned char numChannels, unsigned samplingFrequency, unsigned granularityInMS) : FramedSource(env), fBitsPerSample(bitsPerSample), fNumChannels(numChannels), fSamplingFrequency(samplingFrequency), fGranularityInMS(granularityInMS) { } AudioInputDevice::~AudioInputDevice() { } char** AudioInputDevice::allowedDeviceNames = NULL; ////////// AudioPortNames implementation ////////// AudioPortNames::AudioPortNames() : numPorts(0), portName(NULL) { } AudioPortNames::~AudioPortNames() { for (unsigned i = 0; i < numPorts; ++i) delete portName[i]; delete portName; } live/liveMedia/StreamParser.hh000444 001751 000000 00000011757 12265042432 016602 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // Abstract class for parsing a byte stream // C++ header #ifndef _STREAM_PARSER_HH #define _STREAM_PARSER_HH #ifndef _FRAMED_SOURCE_HH #include "FramedSource.hh" #endif class StreamParser { public: virtual void flushInput(); protected: // we're a virtual base class typedef void (clientContinueFunc)(void* clientData, unsigned char* ptr, unsigned size, struct timeval presentationTime); StreamParser(FramedSource* inputSource, FramedSource::onCloseFunc* onInputCloseFunc, void* onInputCloseClientData, clientContinueFunc* clientContinueFunc, void* clientContinueClientData); virtual ~StreamParser(); void saveParserState(); virtual void restoreSavedParserState(); u_int32_t get4Bytes() { // byte-aligned; returned in big-endian order u_int32_t result = test4Bytes(); fCurParserIndex += 4; fRemainingUnparsedBits = 0; return result; } u_int32_t test4Bytes() { // as above, but doesn't advance ptr ensureValidBytes(4); unsigned char const* ptr = nextToParse(); return (ptr[0]<<24)|(ptr[1]<<16)|(ptr[2]<<8)|ptr[3]; } u_int16_t get2Bytes() { ensureValidBytes(2); unsigned char const* ptr = nextToParse(); u_int16_t result = (ptr[0]<<8)|ptr[1]; fCurParserIndex += 2; fRemainingUnparsedBits = 0; return result; } u_int8_t get1Byte() { // byte-aligned ensureValidBytes(1); fRemainingUnparsedBits = 0; return curBank()[fCurParserIndex++]; } u_int8_t test1Byte(unsigned numBytes) { // as above, but doesn't advance ptr ensureValidBytes(1); return nextToParse()[0]; } void getBytes(u_int8_t* to, unsigned numBytes) { testBytes(to, numBytes); fCurParserIndex += numBytes; fRemainingUnparsedBits = 0; } void testBytes(u_int8_t* to, unsigned numBytes) { // as above, but doesn't advance ptr ensureValidBytes(numBytes); memmove(to, nextToParse(), numBytes); } void skipBytes(unsigned numBytes) { ensureValidBytes(numBytes); fCurParserIndex += numBytes; } void skipBits(unsigned numBits); unsigned getBits(unsigned numBits); // numBits <= 32; returns data into low-order bits of result unsigned curOffset() const { return fCurParserIndex; } unsigned& totNumValidBytes() { return fTotNumValidBytes; } Boolean haveSeenEOF() const { return fHaveSeenEOF; } unsigned bankSize() const; private: unsigned char* curBank() { return fCurBank; } unsigned char* nextToParse() { return &curBank()[fCurParserIndex]; } unsigned char* lastParsed() { return &curBank()[fCurParserIndex-1]; } // makes sure that at least "numBytes" valid bytes remain: void ensureValidBytes(unsigned numBytesNeeded) { // common case: inlined: if (fCurParserIndex + numBytesNeeded <= fTotNumValidBytes) return; ensureValidBytes1(numBytesNeeded); } void ensureValidBytes1(unsigned numBytesNeeded); static void afterGettingBytes(void* clientData, unsigned numBytesRead, unsigned numTruncatedBytes, struct timeval presentationTime, unsigned durationInMicroseconds); void afterGettingBytes1(unsigned numBytesRead, struct timeval presentationTime); static void onInputClosure(void* clientData); void onInputClosure1(); private: FramedSource* fInputSource; // should be a byte-stream source?? FramedSource::onCloseFunc* fClientOnInputCloseFunc; void* fClientOnInputCloseClientData; clientContinueFunc* fClientContinueFunc; void* fClientContinueClientData; // Use a pair of 'banks', and swap between them as they fill up: unsigned char* fBank[2]; unsigned char fCurBankNum; unsigned char* fCurBank; // The most recent 'saved' parse position: unsigned fSavedParserIndex; // <= fCurParserIndex unsigned char fSavedRemainingUnparsedBits; // The current position of the parser within the current bank: unsigned fCurParserIndex; // <= fTotNumValidBytes unsigned char fRemainingUnparsedBits; // in previous byte: [0,7] // The total number of valid bytes stored in the current bank: unsigned fTotNumValidBytes; // <= BANK_SIZE // Whether we have seen EOF on the input source: Boolean fHaveSeenEOF; struct timeval fLastSeenPresentationTime; // hack used for EOF handling }; #endif live/liveMedia/StreamParser.cpp000444 001751 000000 00000016653 12265042432 016765 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // Abstract class for parsing a byte stream // Implementation #include "StreamParser.hh" #include #include #define BANK_SIZE 150000 void StreamParser::flushInput() { fCurParserIndex = fSavedParserIndex = 0; fSavedRemainingUnparsedBits = fRemainingUnparsedBits = 0; fTotNumValidBytes = 0; } StreamParser::StreamParser(FramedSource* inputSource, FramedSource::onCloseFunc* onInputCloseFunc, void* onInputCloseClientData, clientContinueFunc* clientContinueFunc, void* clientContinueClientData) : fInputSource(inputSource), fClientOnInputCloseFunc(onInputCloseFunc), fClientOnInputCloseClientData(onInputCloseClientData), fClientContinueFunc(clientContinueFunc), fClientContinueClientData(clientContinueClientData), fSavedParserIndex(0), fSavedRemainingUnparsedBits(0), fCurParserIndex(0), fRemainingUnparsedBits(0), fTotNumValidBytes(0), fHaveSeenEOF(False) { fBank[0] = new unsigned char[BANK_SIZE]; fBank[1] = new unsigned char[BANK_SIZE]; fCurBankNum = 0; fCurBank = fBank[fCurBankNum]; fLastSeenPresentationTime.tv_sec = 0; fLastSeenPresentationTime.tv_usec = 0; } StreamParser::~StreamParser() { delete[] fBank[0]; delete[] fBank[1]; } void StreamParser::saveParserState() { fSavedParserIndex = fCurParserIndex; fSavedRemainingUnparsedBits = fRemainingUnparsedBits; } void StreamParser::restoreSavedParserState() { fCurParserIndex = fSavedParserIndex; fRemainingUnparsedBits = fSavedRemainingUnparsedBits; } void StreamParser::skipBits(unsigned numBits) { if (numBits <= fRemainingUnparsedBits) { fRemainingUnparsedBits -= numBits; } else { numBits -= fRemainingUnparsedBits; unsigned numBytesToExamine = (numBits+7)/8; // round up ensureValidBytes(numBytesToExamine); fCurParserIndex += numBytesToExamine; fRemainingUnparsedBits = 8*numBytesToExamine - numBits; } } unsigned StreamParser::getBits(unsigned numBits) { if (numBits <= fRemainingUnparsedBits) { unsigned char lastByte = *lastParsed(); lastByte >>= (fRemainingUnparsedBits - numBits); fRemainingUnparsedBits -= numBits; return (unsigned)lastByte &~ ((~0)< 0) { lastByte = *lastParsed(); } else { lastByte = 0; } unsigned remainingBits = numBits - fRemainingUnparsedBits; // > 0 // For simplicity, read the next 4 bytes, even though we might not // need all of them here: unsigned result = test4Bytes(); result >>= (32 - remainingBits); result |= (lastByte << remainingBits); if (numBits < 32) result &=~ ((~0)<maxFrameSize(); if (maxInputFrameSize > numBytesNeeded) numBytesNeeded = maxInputFrameSize; // First, check whether these new bytes would overflow the current // bank. If so, start using a new bank now. if (fCurParserIndex + numBytesNeeded > BANK_SIZE) { // Swap banks, but save any still-needed bytes from the old bank: unsigned numBytesToSave = fTotNumValidBytes - fSavedParserIndex; unsigned char const* from = &curBank()[fSavedParserIndex]; fCurBankNum = (fCurBankNum + 1)%2; fCurBank = fBank[fCurBankNum]; memmove(curBank(), from, numBytesToSave); fCurParserIndex = fCurParserIndex - fSavedParserIndex; fSavedParserIndex = 0; fTotNumValidBytes = numBytesToSave; } // ASSERT: fCurParserIndex + numBytesNeeded > fTotNumValidBytes // && fCurParserIndex + numBytesNeeded <= BANK_SIZE if (fCurParserIndex + numBytesNeeded > BANK_SIZE) { // If this happens, it means that we have too much saved parser state. // To fix this, increase BANK_SIZE as appropriate. fInputSource->envir() << "StreamParser internal error (" << fCurParserIndex << " + " << numBytesNeeded << " > " << BANK_SIZE << ")\n"; fInputSource->envir().internalError(); } // Try to read as many new bytes as will fit in the current bank: unsigned maxNumBytesToRead = BANK_SIZE - fTotNumValidBytes; fInputSource->getNextFrame(&curBank()[fTotNumValidBytes], maxNumBytesToRead, afterGettingBytes, this, onInputClosure, this); throw NO_MORE_BUFFERED_INPUT; } void StreamParser::afterGettingBytes(void* clientData, unsigned numBytesRead, unsigned /*numTruncatedBytes*/, struct timeval presentationTime, unsigned /*durationInMicroseconds*/){ StreamParser* parser = (StreamParser*)clientData; if (parser != NULL) parser->afterGettingBytes1(numBytesRead, presentationTime); } void StreamParser::afterGettingBytes1(unsigned numBytesRead, struct timeval presentationTime) { // Sanity check: Make sure we didn't get too many bytes for our bank: if (fTotNumValidBytes + numBytesRead > BANK_SIZE) { fInputSource->envir() << "StreamParser::afterGettingBytes() warning: read " << numBytesRead << " bytes; expected no more than " << BANK_SIZE - fTotNumValidBytes << "\n"; } fLastSeenPresentationTime = presentationTime; unsigned char* ptr = &curBank()[fTotNumValidBytes]; fTotNumValidBytes += numBytesRead; // Continue our original calling source where it left off: restoreSavedParserState(); // Sigh... this is a crock; things would have been a lot simpler // here if we were using threads, with synchronous I/O... fClientContinueFunc(fClientContinueClientData, ptr, numBytesRead, presentationTime); } void StreamParser::onInputClosure(void* clientData) { StreamParser* parser = (StreamParser*)clientData; if (parser != NULL) parser->onInputClosure1(); } void StreamParser::onInputClosure1() { if (!fHaveSeenEOF) { // We're hitting EOF for the first time. Set our 'EOF' flag, and continue parsing, as if we'd just read 0 bytes of data. // This allows the parser to re-parse any remaining unparsed data (perhaps while testing for EOF at the end): fHaveSeenEOF = True; afterGettingBytes1(0, fLastSeenPresentationTime); } else { // We're hitting EOF for the second time. Now, we handle the source input closure: fHaveSeenEOF = False; if (fClientOnInputCloseFunc != NULL) (*fClientOnInputCloseFunc)(fClientOnInputCloseClientData); } } live/liveMedia/MPEG1or2AudioRTPSink.cpp000444 001751 000000 00000004473 12265042432 020003 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // RTP sink for MPEG audio (RFC 2250) // Implementation #include "MPEG1or2AudioRTPSink.hh" MPEG1or2AudioRTPSink::MPEG1or2AudioRTPSink(UsageEnvironment& env, Groupsock* RTPgs) : AudioRTPSink(env, RTPgs, 14, 90000, "MPA") { } MPEG1or2AudioRTPSink::~MPEG1or2AudioRTPSink() { } MPEG1or2AudioRTPSink* MPEG1or2AudioRTPSink::createNew(UsageEnvironment& env, Groupsock* RTPgs) { return new MPEG1or2AudioRTPSink(env, RTPgs); } void MPEG1or2AudioRTPSink::doSpecialFrameHandling(unsigned fragmentationOffset, unsigned char* frameStart, unsigned numBytesInFrame, struct timeval framePresentationTime, unsigned numRemainingBytes) { // If this is the 1st frame in the 1st packet, set the RTP 'M' (marker) // bit (because this is considered the start of a talk spurt): if (isFirstPacket() && isFirstFrameInPacket()) { setMarkerBit(); } // If this is the first frame in the packet, set the lower half of the // audio-specific header (to the "fragmentationOffset"): if (isFirstFrameInPacket()) { setSpecialHeaderWord(fragmentationOffset&0xFFFF); } // Important: Also call our base class's doSpecialFrameHandling(), // to set the packet's timestamp: MultiFramedRTPSink::doSpecialFrameHandling(fragmentationOffset, frameStart, numBytesInFrame, framePresentationTime, numRemainingBytes); } unsigned MPEG1or2AudioRTPSink::specialHeaderSize() const { // There's a 4 byte special audio header: return 4; } live/liveMedia/MPEG4VideoStreamFramer.cpp000444 001751 000000 00000056607 12265042432 020474 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A filter that breaks up an MPEG-4 video elementary stream into // frames for: // - Visual Object Sequence (VS) Header + Visual Object (VO) Header // + Video Object Layer (VOL) Header // - Group of VOP (GOV) Header // - VOP frame // Implementation #include "MPEG4VideoStreamFramer.hh" #include "MPEGVideoStreamParser.hh" #include "MPEG4LATMAudioRTPSource.hh" // for "parseGeneralConfigStr()" #include ////////// MPEG4VideoStreamParser definition ////////// // An enum representing the current state of the parser: enum MPEGParseState { PARSING_VISUAL_OBJECT_SEQUENCE, PARSING_VISUAL_OBJECT_SEQUENCE_SEEN_CODE, PARSING_VISUAL_OBJECT, PARSING_VIDEO_OBJECT_LAYER, PARSING_GROUP_OF_VIDEO_OBJECT_PLANE, PARSING_VIDEO_OBJECT_PLANE, PARSING_VISUAL_OBJECT_SEQUENCE_END_CODE }; class MPEG4VideoStreamParser: public MPEGVideoStreamParser { public: MPEG4VideoStreamParser(MPEG4VideoStreamFramer* usingSource, FramedSource* inputSource); virtual ~MPEG4VideoStreamParser(); private: // redefined virtual functions: virtual void flushInput(); virtual unsigned parse(); private: MPEG4VideoStreamFramer* usingSource() { return (MPEG4VideoStreamFramer*)fUsingSource; } void setParseState(MPEGParseState parseState); unsigned parseVisualObjectSequence(Boolean haveSeenStartCode = False); unsigned parseVisualObject(); unsigned parseVideoObjectLayer(); unsigned parseGroupOfVideoObjectPlane(); unsigned parseVideoObjectPlane(); unsigned parseVisualObjectSequenceEndCode(); // These are used for parsing within an already-read frame: Boolean getNextFrameBit(u_int8_t& result); Boolean getNextFrameBits(unsigned numBits, u_int32_t& result); // Which are used by: void analyzeVOLHeader(); private: MPEGParseState fCurrentParseState; unsigned fNumBitsSeenSoFar; // used by the getNextFrameBit*() routines u_int32_t vop_time_increment_resolution; unsigned fNumVTIRBits; // # of bits needed to count to "vop_time_increment_resolution" u_int8_t fixed_vop_rate; unsigned fixed_vop_time_increment; // used if 'fixed_vop_rate' is set unsigned fSecondsSinceLastTimeCode, fTotalTicksSinceLastTimeCode, fPrevNewTotalTicks; unsigned fPrevPictureCountDelta; Boolean fJustSawTimeCode; }; ////////// MPEG4VideoStreamFramer implementation ////////// MPEG4VideoStreamFramer* MPEG4VideoStreamFramer::createNew(UsageEnvironment& env, FramedSource* inputSource) { // Need to add source type checking here??? ##### return new MPEG4VideoStreamFramer(env, inputSource); } unsigned char* MPEG4VideoStreamFramer ::getConfigBytes(unsigned& numBytes) const { numBytes = fNumConfigBytes; return fConfigBytes; } void MPEG4VideoStreamFramer ::setConfigInfo(u_int8_t profileAndLevelIndication, char const* configStr) { fProfileAndLevelIndication = profileAndLevelIndication; delete[] fConfigBytes; fConfigBytes = parseGeneralConfigStr(configStr, fNumConfigBytes); } MPEG4VideoStreamFramer::MPEG4VideoStreamFramer(UsageEnvironment& env, FramedSource* inputSource, Boolean createParser) : MPEGVideoStreamFramer(env, inputSource), fProfileAndLevelIndication(0), fConfigBytes(NULL), fNumConfigBytes(0), fNewConfigBytes(NULL), fNumNewConfigBytes(0) { fParser = createParser ? new MPEG4VideoStreamParser(this, inputSource) : NULL; } MPEG4VideoStreamFramer::~MPEG4VideoStreamFramer() { delete[] fConfigBytes; delete[] fNewConfigBytes; } void MPEG4VideoStreamFramer::startNewConfig() { delete[] fNewConfigBytes; fNewConfigBytes = NULL; fNumNewConfigBytes = 0; } void MPEG4VideoStreamFramer ::appendToNewConfig(unsigned char* newConfigBytes, unsigned numNewBytes) { // Allocate a new block of memory for the new config bytes: unsigned char* configNew = new unsigned char[fNumNewConfigBytes + numNewBytes]; // Copy the old, then the new, config bytes there: memmove(configNew, fNewConfigBytes, fNumNewConfigBytes); memmove(&configNew[fNumNewConfigBytes], newConfigBytes, numNewBytes); delete[] fNewConfigBytes; fNewConfigBytes = configNew; fNumNewConfigBytes += numNewBytes; } void MPEG4VideoStreamFramer::completeNewConfig() { delete[] fConfigBytes; fConfigBytes = fNewConfigBytes; fNewConfigBytes = NULL; fNumConfigBytes = fNumNewConfigBytes; fNumNewConfigBytes = 0; } Boolean MPEG4VideoStreamFramer::isMPEG4VideoStreamFramer() const { return True; } ////////// MPEG4VideoStreamParser implementation ////////// MPEG4VideoStreamParser ::MPEG4VideoStreamParser(MPEG4VideoStreamFramer* usingSource, FramedSource* inputSource) : MPEGVideoStreamParser(usingSource, inputSource), fCurrentParseState(PARSING_VISUAL_OBJECT_SEQUENCE), vop_time_increment_resolution(0), fNumVTIRBits(0), fixed_vop_rate(0), fixed_vop_time_increment(0), fSecondsSinceLastTimeCode(0), fTotalTicksSinceLastTimeCode(0), fPrevNewTotalTicks(0), fPrevPictureCountDelta(1), fJustSawTimeCode(False) { } MPEG4VideoStreamParser::~MPEG4VideoStreamParser() { } void MPEG4VideoStreamParser::setParseState(MPEGParseState parseState) { fCurrentParseState = parseState; MPEGVideoStreamParser::setParseState(); } void MPEG4VideoStreamParser::flushInput() { fSecondsSinceLastTimeCode = 0; fTotalTicksSinceLastTimeCode = 0; fPrevNewTotalTicks = 0; fPrevPictureCountDelta = 1; StreamParser::flushInput(); if (fCurrentParseState != PARSING_VISUAL_OBJECT_SEQUENCE) { setParseState(PARSING_VISUAL_OBJECT_SEQUENCE); // later, change to GOV or VOP? ##### } } unsigned MPEG4VideoStreamParser::parse() { try { switch (fCurrentParseState) { case PARSING_VISUAL_OBJECT_SEQUENCE: { return parseVisualObjectSequence(); } case PARSING_VISUAL_OBJECT_SEQUENCE_SEEN_CODE: { return parseVisualObjectSequence(True); } case PARSING_VISUAL_OBJECT: { return parseVisualObject(); } case PARSING_VIDEO_OBJECT_LAYER: { return parseVideoObjectLayer(); } case PARSING_GROUP_OF_VIDEO_OBJECT_PLANE: { return parseGroupOfVideoObjectPlane(); } case PARSING_VIDEO_OBJECT_PLANE: { return parseVideoObjectPlane(); } case PARSING_VISUAL_OBJECT_SEQUENCE_END_CODE: { return parseVisualObjectSequenceEndCode(); } default: { return 0; // shouldn't happen } } } catch (int /*e*/) { #ifdef DEBUG fprintf(stderr, "MPEG4VideoStreamParser::parse() EXCEPTION (This is normal behavior - *not* an error)\n"); #endif return 0; // the parsing got interrupted } } #define VISUAL_OBJECT_SEQUENCE_START_CODE 0x000001B0 #define VISUAL_OBJECT_SEQUENCE_END_CODE 0x000001B1 #define GROUP_VOP_START_CODE 0x000001B3 #define VISUAL_OBJECT_START_CODE 0x000001B5 #define VOP_START_CODE 0x000001B6 unsigned MPEG4VideoStreamParser ::parseVisualObjectSequence(Boolean haveSeenStartCode) { #ifdef DEBUG fprintf(stderr, "parsing VisualObjectSequence\n"); #endif usingSource()->startNewConfig(); u_int32_t first4Bytes; if (!haveSeenStartCode) { while ((first4Bytes = test4Bytes()) != VISUAL_OBJECT_SEQUENCE_START_CODE) { #ifdef DEBUG fprintf(stderr, "ignoring non VS header: 0x%08x\n", first4Bytes); #endif get1Byte(); setParseState(PARSING_VISUAL_OBJECT_SEQUENCE); // ensures we progress over bad data } first4Bytes = get4Bytes(); } else { // We've already seen the start code first4Bytes = VISUAL_OBJECT_SEQUENCE_START_CODE; } save4Bytes(first4Bytes); // The next byte is the "profile_and_level_indication": u_int8_t pali = get1Byte(); #ifdef DEBUG fprintf(stderr, "profile_and_level_indication: %02x\n", pali); #endif saveByte(pali); usingSource()->fProfileAndLevelIndication = pali; // Now, copy all bytes that we see, up until we reach // a VISUAL_OBJECT_START_CODE: u_int32_t next4Bytes = get4Bytes(); while (next4Bytes != VISUAL_OBJECT_START_CODE) { saveToNextCode(next4Bytes); } setParseState(PARSING_VISUAL_OBJECT); // Compute this frame's presentation time: usingSource()->computePresentationTime(fTotalTicksSinceLastTimeCode); // This header forms part of the 'configuration' information: usingSource()->appendToNewConfig(fStartOfFrame, curFrameSize()); return curFrameSize(); } static inline Boolean isVideoObjectStartCode(u_int32_t code) { return code >= 0x00000100 && code <= 0x0000011F; } unsigned MPEG4VideoStreamParser::parseVisualObject() { #ifdef DEBUG fprintf(stderr, "parsing VisualObject\n"); #endif // Note that we've already read the VISUAL_OBJECT_START_CODE save4Bytes(VISUAL_OBJECT_START_CODE); // Next, extract the "visual_object_type" from the next 1 or 2 bytes: u_int8_t nextByte = get1Byte(); saveByte(nextByte); Boolean is_visual_object_identifier = (nextByte&0x80) != 0; u_int8_t visual_object_type; if (is_visual_object_identifier) { #ifdef DEBUG fprintf(stderr, "visual_object_verid: 0x%x; visual_object_priority: 0x%x\n", (nextByte&0x78)>>3, (nextByte&0x07)); #endif nextByte = get1Byte(); saveByte(nextByte); visual_object_type = (nextByte&0xF0)>>4; } else { visual_object_type = (nextByte&0x78)>>3; } #ifdef DEBUG fprintf(stderr, "visual_object_type: 0x%x\n", visual_object_type); #endif // At present, we support only the "Video ID" "visual_object_type" (1) if (visual_object_type != 1) { usingSource()->envir() << "MPEG4VideoStreamParser::parseVisualObject(): Warning: We don't handle visual_object_type " << visual_object_type << "\n"; } // Now, copy all bytes that we see, up until we reach // a video_object_start_code u_int32_t next4Bytes = get4Bytes(); while (!isVideoObjectStartCode(next4Bytes)) { saveToNextCode(next4Bytes); } save4Bytes(next4Bytes); #ifdef DEBUG fprintf(stderr, "saw a video_object_start_code: 0x%08x\n", next4Bytes); #endif setParseState(PARSING_VIDEO_OBJECT_LAYER); // Compute this frame's presentation time: usingSource()->computePresentationTime(fTotalTicksSinceLastTimeCode); // This header forms part of the 'configuration' information: usingSource()->appendToNewConfig(fStartOfFrame, curFrameSize()); return curFrameSize(); } static inline Boolean isVideoObjectLayerStartCode(u_int32_t code) { return code >= 0x00000120 && code <= 0x0000012F; } Boolean MPEG4VideoStreamParser::getNextFrameBit(u_int8_t& result) { if (fNumBitsSeenSoFar/8 >= curFrameSize()) return False; u_int8_t nextByte = fStartOfFrame[fNumBitsSeenSoFar/8]; result = (nextByte>>(7-fNumBitsSeenSoFar%8))&1; ++fNumBitsSeenSoFar; return True; } Boolean MPEG4VideoStreamParser::getNextFrameBits(unsigned numBits, u_int32_t& result) { result = 0; for (unsigned i = 0; i < numBits; ++i) { u_int8_t nextBit; if (!getNextFrameBit(nextBit)) return False; result = (result<<1)|nextBit; } return True; } void MPEG4VideoStreamParser::analyzeVOLHeader() { // Extract timing information (in particular, // "vop_time_increment_resolution") from the VOL Header: fNumBitsSeenSoFar = 41; do { u_int8_t is_object_layer_identifier; if (!getNextFrameBit(is_object_layer_identifier)) break; if (is_object_layer_identifier) fNumBitsSeenSoFar += 7; u_int32_t aspect_ratio_info; if (!getNextFrameBits(4, aspect_ratio_info)) break; if (aspect_ratio_info == 15 /*extended_PAR*/) fNumBitsSeenSoFar += 16; u_int8_t vol_control_parameters; if (!getNextFrameBit(vol_control_parameters)) break; if (vol_control_parameters) { fNumBitsSeenSoFar += 3; // chroma_format; low_delay u_int8_t vbw_parameters; if (!getNextFrameBit(vbw_parameters)) break; if (vbw_parameters) fNumBitsSeenSoFar += 79; } fNumBitsSeenSoFar += 2; // video_object_layer_shape u_int8_t marker_bit; if (!getNextFrameBit(marker_bit)) break; if (marker_bit != 1) { // sanity check usingSource()->envir() << "MPEG4VideoStreamParser::analyzeVOLHeader(): marker_bit 1 not set!\n"; break; } if (!getNextFrameBits(16, vop_time_increment_resolution)) break; #ifdef DEBUG fprintf(stderr, "vop_time_increment_resolution: %d\n", vop_time_increment_resolution); #endif if (vop_time_increment_resolution == 0) { usingSource()->envir() << "MPEG4VideoStreamParser::analyzeVOLHeader(): vop_time_increment_resolution is zero!\n"; break; } // Compute how many bits are necessary to represent this: fNumVTIRBits = 0; for (unsigned test = vop_time_increment_resolution; test>0; test /= 2) { ++fNumVTIRBits; } if (!getNextFrameBit(marker_bit)) break; if (marker_bit != 1) { // sanity check usingSource()->envir() << "MPEG4VideoStreamParser::analyzeVOLHeader(): marker_bit 2 not set!\n"; break; } if (!getNextFrameBit(fixed_vop_rate)) break; if (fixed_vop_rate) { // Get the following "fixed_vop_time_increment": if (!getNextFrameBits(fNumVTIRBits, fixed_vop_time_increment)) break; #ifdef DEBUG fprintf(stderr, "fixed_vop_time_increment: %d\n", fixed_vop_time_increment); if (fixed_vop_time_increment == 0) { usingSource()->envir() << "MPEG4VideoStreamParser::analyzeVOLHeader(): fixed_vop_time_increment is zero!\n"; } #endif } // Use "vop_time_increment_resolution" as the 'frame rate' // (really, 'tick rate'): usingSource()->fFrameRate = (double)vop_time_increment_resolution; #ifdef DEBUG fprintf(stderr, "fixed_vop_rate: %d; 'frame' (really tick) rate: %f\n", fixed_vop_rate, usingSource()->fFrameRate); #endif return; } while (0); if (fNumBitsSeenSoFar/8 >= curFrameSize()) { char errMsg[200]; sprintf(errMsg, "Not enough bits in VOL header: %d/8 >= %d\n", fNumBitsSeenSoFar, curFrameSize()); usingSource()->envir() << errMsg; } } unsigned MPEG4VideoStreamParser::parseVideoObjectLayer() { #ifdef DEBUG fprintf(stderr, "parsing VideoObjectLayer\n"); #endif // The first 4 bytes must be a "video_object_layer_start_code". // If not, this is a 'short video header', which we currently // don't support: u_int32_t next4Bytes = get4Bytes(); if (!isVideoObjectLayerStartCode(next4Bytes)) { usingSource()->envir() << "MPEG4VideoStreamParser::parseVideoObjectLayer(): This appears to be a 'short video header', which we current don't support\n"; } // Now, copy all bytes that we see, up until we reach // a GROUP_VOP_START_CODE or a VOP_START_CODE: do { saveToNextCode(next4Bytes); } while (next4Bytes != GROUP_VOP_START_CODE && next4Bytes != VOP_START_CODE); analyzeVOLHeader(); setParseState((next4Bytes == GROUP_VOP_START_CODE) ? PARSING_GROUP_OF_VIDEO_OBJECT_PLANE : PARSING_VIDEO_OBJECT_PLANE); // Compute this frame's presentation time: usingSource()->computePresentationTime(fTotalTicksSinceLastTimeCode); // This header ends the 'configuration' information: usingSource()->appendToNewConfig(fStartOfFrame, curFrameSize()); usingSource()->completeNewConfig(); return curFrameSize(); } unsigned MPEG4VideoStreamParser::parseGroupOfVideoObjectPlane() { #ifdef DEBUG fprintf(stderr, "parsing GroupOfVideoObjectPlane\n"); #endif // Note that we've already read the GROUP_VOP_START_CODE save4Bytes(GROUP_VOP_START_CODE); // Next, extract the (18-bit) time code from the next 3 bytes: u_int8_t next3Bytes[3]; getBytes(next3Bytes, 3); saveByte(next3Bytes[0]);saveByte(next3Bytes[1]);saveByte(next3Bytes[2]); unsigned time_code = (next3Bytes[0]<<10)|(next3Bytes[1]<<2)|(next3Bytes[2]>>6); unsigned time_code_hours = (time_code&0x0003E000)>>13; unsigned time_code_minutes = (time_code&0x00001F80)>>7; #if defined(DEBUG) || defined(DEBUG_TIMESTAMPS) Boolean marker_bit = (time_code&0x00000040) != 0; #endif unsigned time_code_seconds = (time_code&0x0000003F); #if defined(DEBUG) || defined(DEBUG_TIMESTAMPS) fprintf(stderr, "time_code: 0x%05x, hours %d, minutes %d, marker_bit %d, seconds %d\n", time_code, time_code_hours, time_code_minutes, marker_bit, time_code_seconds); #endif fJustSawTimeCode = True; // Now, copy all bytes that we see, up until we reach a VOP_START_CODE: u_int32_t next4Bytes = get4Bytes(); while (next4Bytes != VOP_START_CODE) { saveToNextCode(next4Bytes); } // Compute this frame's presentation time: usingSource()->computePresentationTime(fTotalTicksSinceLastTimeCode); // Record the time code: usingSource()->setTimeCode(time_code_hours, time_code_minutes, time_code_seconds, 0, 0); // Note: Because the GOV header can appear anywhere (not just at a 1s point), we // don't pass "fTotalTicksSinceLastTimeCode" as the "picturesSinceLastGOP" parameter. fSecondsSinceLastTimeCode = 0; if (fixed_vop_rate) fTotalTicksSinceLastTimeCode = 0; setParseState(PARSING_VIDEO_OBJECT_PLANE); return curFrameSize(); } unsigned MPEG4VideoStreamParser::parseVideoObjectPlane() { #ifdef DEBUG fprintf(stderr, "#parsing VideoObjectPlane\n"); #endif // Note that we've already read the VOP_START_CODE save4Bytes(VOP_START_CODE); // Get the "vop_coding_type" from the next byte: u_int8_t nextByte = get1Byte(); saveByte(nextByte); u_int8_t vop_coding_type = nextByte>>6; // Next, get the "modulo_time_base" by counting the '1' bits that follow. // We look at the next 32-bits only. This should be enough in most cases. u_int32_t next4Bytes = get4Bytes(); u_int32_t timeInfo = (nextByte<<(32-6))|(next4Bytes>>6); unsigned modulo_time_base = 0; u_int32_t mask = 0x80000000; while ((timeInfo&mask) != 0) { ++modulo_time_base; mask >>= 1; } mask >>= 1; // Check the following marker bit: if ((timeInfo&mask) == 0) { usingSource()->envir() << "MPEG4VideoStreamParser::parseVideoObjectPlane(): marker bit not set!\n"; } mask >>= 1; // Then, get the "vop_time_increment". // First, make sure we have enough bits left for this: if ((mask>>(fNumVTIRBits-1)) == 0) { usingSource()->envir() << "MPEG4VideoStreamParser::parseVideoObjectPlane(): 32-bits are not enough to get \"vop_time_increment\"!\n"; } unsigned vop_time_increment = 0; for (unsigned i = 0; i < fNumVTIRBits; ++i) { vop_time_increment |= timeInfo&mask; mask >>= 1; } while (mask != 0) { vop_time_increment >>= 1; mask >>= 1; } #ifdef DEBUG fprintf(stderr, "vop_coding_type: %d(%c), modulo_time_base: %d, vop_time_increment: %d\n", vop_coding_type, "IPBS"[vop_coding_type], modulo_time_base, vop_time_increment); #endif // Now, copy all bytes that we see, up until we reach a code of some sort: saveToNextCode(next4Bytes); // Update our counters based on the frame timing information that we saw: if (fixed_vop_time_increment > 0) { // This is a 'fixed_vop_rate' stream. Use 'fixed_vop_time_increment': usingSource()->fPictureCount += fixed_vop_time_increment; if (vop_time_increment > 0 || modulo_time_base > 0) { fTotalTicksSinceLastTimeCode += fixed_vop_time_increment; // Note: "fSecondsSinceLastTimeCode" and "fPrevNewTotalTicks" are not used. } } else { // Use 'vop_time_increment': unsigned newTotalTicks = (fSecondsSinceLastTimeCode + modulo_time_base)*vop_time_increment_resolution + vop_time_increment; if (newTotalTicks == fPrevNewTotalTicks && fPrevNewTotalTicks > 0) { // This is apparently a buggy MPEG-4 video stream, because // "vop_time_increment" did not change. Overcome this error, // by pretending that it did change. #ifdef DEBUG fprintf(stderr, "Buggy MPEG-4 video stream: \"vop_time_increment\" did not change!\n"); #endif // The following assumes that we don't have 'B' frames. If we do, then TARFU! usingSource()->fPictureCount += vop_time_increment; fTotalTicksSinceLastTimeCode += vop_time_increment; fSecondsSinceLastTimeCode += modulo_time_base; } else { if (newTotalTicks < fPrevNewTotalTicks && vop_coding_type != 2/*B*/ && modulo_time_base == 0 && vop_time_increment == 0 && !fJustSawTimeCode) { // This is another kind of buggy MPEG-4 video stream, in which // "vop_time_increment" wraps around, but without // "modulo_time_base" changing (or just having had a new time code). // Overcome this by pretending that "vop_time_increment" *did* wrap around: #ifdef DEBUG fprintf(stderr, "Buggy MPEG-4 video stream: \"vop_time_increment\" wrapped around, but without \"modulo_time_base\" changing!\n"); #endif ++fSecondsSinceLastTimeCode; newTotalTicks += vop_time_increment_resolution; } fPrevNewTotalTicks = newTotalTicks; if (vop_coding_type != 2/*B*/) { int pictureCountDelta = newTotalTicks - fTotalTicksSinceLastTimeCode; if (pictureCountDelta <= 0) pictureCountDelta = fPrevPictureCountDelta; // ensures that the picture count is always increasing usingSource()->fPictureCount += pictureCountDelta; fPrevPictureCountDelta = pictureCountDelta; fTotalTicksSinceLastTimeCode = newTotalTicks; fSecondsSinceLastTimeCode += modulo_time_base; } } } fJustSawTimeCode = False; // for next time // The next thing to parse depends on the code that we just saw, // but we are assumed to have ended the current picture: usingSource()->fPictureEndMarker = True; // HACK ##### switch (next4Bytes) { case VISUAL_OBJECT_SEQUENCE_END_CODE: { setParseState(PARSING_VISUAL_OBJECT_SEQUENCE_END_CODE); break; } case VISUAL_OBJECT_SEQUENCE_START_CODE: { setParseState(PARSING_VISUAL_OBJECT_SEQUENCE_SEEN_CODE); break; } case VISUAL_OBJECT_START_CODE: { setParseState(PARSING_VISUAL_OBJECT); break; } case GROUP_VOP_START_CODE: { setParseState(PARSING_GROUP_OF_VIDEO_OBJECT_PLANE); break; } case VOP_START_CODE: { setParseState(PARSING_VIDEO_OBJECT_PLANE); break; } default: { if (isVideoObjectStartCode(next4Bytes)) { setParseState(PARSING_VIDEO_OBJECT_LAYER); } else if (isVideoObjectLayerStartCode(next4Bytes)){ // copy all bytes that we see, up until we reach a VOP_START_CODE: u_int32_t next4Bytes = get4Bytes(); while (next4Bytes != VOP_START_CODE) { saveToNextCode(next4Bytes); } setParseState(PARSING_VIDEO_OBJECT_PLANE); } else { usingSource()->envir() << "MPEG4VideoStreamParser::parseVideoObjectPlane(): Saw unexpected code " << (void*)next4Bytes << "\n"; setParseState(PARSING_VIDEO_OBJECT_PLANE); // the safest way to recover... } break; } } // Compute this frame's presentation time: usingSource()->computePresentationTime(fTotalTicksSinceLastTimeCode); return curFrameSize(); } unsigned MPEG4VideoStreamParser::parseVisualObjectSequenceEndCode() { #ifdef DEBUG fprintf(stderr, "parsing VISUAL_OBJECT_SEQUENCE_END_CODE\n"); #endif // Note that we've already read the VISUAL_OBJECT_SEQUENCE_END_CODE save4Bytes(VISUAL_OBJECT_SEQUENCE_END_CODE); setParseState(PARSING_VISUAL_OBJECT_SEQUENCE); // Treat this as if we had ended a picture: usingSource()->fPictureEndMarker = True; // HACK ##### return curFrameSize(); } live/liveMedia/WAVAudioFileSource.cpp000444 001751 000000 00000027363 12265042432 017755 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A WAV audio file source // Implementation #include "WAVAudioFileSource.hh" #include "InputFile.hh" #include "GroupsockHelper.hh" ////////// WAVAudioFileSource ////////// WAVAudioFileSource* WAVAudioFileSource::createNew(UsageEnvironment& env, char const* fileName) { do { FILE* fid = OpenInputFile(env, fileName); if (fid == NULL) break; WAVAudioFileSource* newSource = new WAVAudioFileSource(env, fid); if (newSource != NULL && newSource->bitsPerSample() == 0) { // The WAV file header was apparently invalid. Medium::close(newSource); break; } newSource->fFileSize = (unsigned)GetFileSize(fileName, fid); return newSource; } while (0); return NULL; } unsigned WAVAudioFileSource::numPCMBytes() const { if (fFileSize < fWAVHeaderSize) return 0; return fFileSize - fWAVHeaderSize; } void WAVAudioFileSource::setScaleFactor(int scale) { if (!fFidIsSeekable) return; // we can't do 'trick play' operations on non-seekable files fScaleFactor = scale; if (fScaleFactor < 0 && TellFile64(fFid) > 0) { // Because we're reading backwards, seek back one sample, to ensure that // (i) we start reading the last sample before the start point, and // (ii) we don't hit end-of-file on the first read. int bytesPerSample = (fNumChannels*fBitsPerSample)/8; if (bytesPerSample == 0) bytesPerSample = 1; SeekFile64(fFid, -bytesPerSample, SEEK_CUR); } } void WAVAudioFileSource::seekToPCMByte(unsigned byteNumber, unsigned numBytesToStream) { byteNumber += fWAVHeaderSize; if (byteNumber > fFileSize) byteNumber = fFileSize; SeekFile64(fFid, byteNumber, SEEK_SET); fNumBytesToStream = numBytesToStream; fLimitNumBytesToStream = fNumBytesToStream > 0; } unsigned char WAVAudioFileSource::getAudioFormat() { return fAudioFormat; } #define nextc fgetc(fid) static Boolean get4Bytes(FILE* fid, u_int32_t& result) { // little-endian int c0, c1, c2, c3; if ((c0 = nextc) == EOF || (c1 = nextc) == EOF || (c2 = nextc) == EOF || (c3 = nextc) == EOF) return False; result = (c3<<24)|(c2<<16)|(c1<<8)|c0; return True; } static Boolean get2Bytes(FILE* fid, u_int16_t& result) {//little-endian int c0, c1; if ((c0 = nextc) == EOF || (c1 = nextc) == EOF) return False; result = (c1<<8)|c0; return True; } static Boolean skipBytes(FILE* fid, int num) { while (num-- > 0) { if (nextc == EOF) return False; } return True; } WAVAudioFileSource::WAVAudioFileSource(UsageEnvironment& env, FILE* fid) : AudioInputDevice(env, 0, 0, 0, 0)/* set the real parameters later */, fFid(fid), fFidIsSeekable(False), fLastPlayTime(0), fHaveStartedReading(False), fWAVHeaderSize(0), fFileSize(0), fScaleFactor(1), fLimitNumBytesToStream(False), fNumBytesToStream(0), fAudioFormat(WA_UNKNOWN) { // Check the WAV file header for validity. // Note: The following web pages contain info about the WAV format: // http://www.ringthis.com/dev/wave_format.htm // http://www.lightlink.com/tjweber/StripWav/Canon.html // http://www.onicos.com/staff/iz/formats/wav.html Boolean success = False; // until we learn otherwise do { // RIFF Chunk: if (nextc != 'R' || nextc != 'I' || nextc != 'F' || nextc != 'F') break; if (!skipBytes(fid, 4)) break; if (nextc != 'W' || nextc != 'A' || nextc != 'V' || nextc != 'E') break; // Skip over any chunk that's not a FORMAT ('fmt ') chunk: u_int32_t tmp; if (!get4Bytes(fid, tmp)) break; if (tmp != 0x20746d66/*'fmt ', little-endian*/) { // Skip this chunk: if (!get4Bytes(fid, tmp)) break; if (!skipBytes(fid, tmp)) break; } // FORMAT Chunk (the 4-byte header code has already been parsed): unsigned formatLength; if (!get4Bytes(fid, formatLength)) break; unsigned short audioFormat; if (!get2Bytes(fid, audioFormat)) break; fAudioFormat = (unsigned char)audioFormat; if (fAudioFormat != WA_PCM && fAudioFormat != WA_PCMA && fAudioFormat != WA_PCMU && fAudioFormat != WA_IMA_ADPCM) { // It's a format that we don't (yet) understand env.setResultMsg("Audio format is not one that we handle (PCM/PCMU/PCMA or IMA ADPCM)"); break; } unsigned short numChannels; if (!get2Bytes(fid, numChannels)) break; fNumChannels = (unsigned char)numChannels; if (fNumChannels < 1 || fNumChannels > 2) { // invalid # channels char errMsg[100]; sprintf(errMsg, "Bad # channels: %d", fNumChannels); env.setResultMsg(errMsg); break; } if (!get4Bytes(fid, fSamplingFrequency)) break; if (fSamplingFrequency == 0) { env.setResultMsg("Bad sampling frequency: 0"); break; } if (!skipBytes(fid, 6)) break; // "nAvgBytesPerSec" (4 bytes) + "nBlockAlign" (2 bytes) unsigned short bitsPerSample; if (!get2Bytes(fid, bitsPerSample)) break; fBitsPerSample = (unsigned char)bitsPerSample; if (fBitsPerSample == 0) { env.setResultMsg("Bad bits-per-sample: 0"); break; } if (!skipBytes(fid, formatLength - 16)) break; // FACT chunk (optional): int c = nextc; if (c == 'f') { if (nextc != 'a' || nextc != 'c' || nextc != 't') break; unsigned factLength; if (!get4Bytes(fid, factLength)) break; if (!skipBytes(fid, factLength)) break; c = nextc; } // DATA Chunk: if (c != 'd' || nextc != 'a' || nextc != 't' || nextc != 'a') break; if (!skipBytes(fid, 4)) break; // The header is good; the remaining data are the sample bytes. fWAVHeaderSize = (unsigned)TellFile64(fid); success = True; } while (0); if (!success) { env.setResultMsg("Bad WAV file format"); // Set "fBitsPerSample" to zero, to indicate failure: fBitsPerSample = 0; return; } fPlayTimePerSample = 1e6/(double)fSamplingFrequency; // Although PCM is a sample-based format, we group samples into // 'frames' for efficient delivery to clients. Set up our preferred // frame size to be close to 20 ms, if possible, but always no greater // than 1400 bytes (to ensure that it will fit in a single RTP packet) unsigned maxSamplesPerFrame = (1400*8)/(fNumChannels*fBitsPerSample); unsigned desiredSamplesPerFrame = (unsigned)(0.02*fSamplingFrequency); unsigned samplesPerFrame = desiredSamplesPerFrame < maxSamplesPerFrame ? desiredSamplesPerFrame : maxSamplesPerFrame; fPreferredFrameSize = (samplesPerFrame*fNumChannels*fBitsPerSample)/8; fFidIsSeekable = FileIsSeekable(fFid); #ifndef READ_FROM_FILES_SYNCHRONOUSLY // Now that we've finished reading the WAV header, all future reads (of audio samples) from the file will be asynchronous: makeSocketNonBlocking(fileno(fFid)); #endif } WAVAudioFileSource::~WAVAudioFileSource() { if (fFid == NULL) return; #ifndef READ_FROM_FILES_SYNCHRONOUSLY envir().taskScheduler().turnOffBackgroundReadHandling(fileno(fFid)); #endif CloseInputFile(fFid); } void WAVAudioFileSource::doGetNextFrame() { if (feof(fFid) || ferror(fFid) || (fLimitNumBytesToStream && fNumBytesToStream == 0)) { handleClosure(this); return; } fFrameSize = 0; // until it's set later #ifdef READ_FROM_FILES_SYNCHRONOUSLY doReadFromFile(); #else if (!fHaveStartedReading) { // Await readable data from the file: envir().taskScheduler().turnOnBackgroundReadHandling(fileno(fFid), (TaskScheduler::BackgroundHandlerProc*)&fileReadableHandler, this); fHaveStartedReading = True; } #endif } void WAVAudioFileSource::doStopGettingFrames() { #ifndef READ_FROM_FILES_SYNCHRONOUSLY envir().taskScheduler().turnOffBackgroundReadHandling(fileno(fFid)); fHaveStartedReading = False; #endif } void WAVAudioFileSource::fileReadableHandler(WAVAudioFileSource* source, int /*mask*/) { if (!source->isCurrentlyAwaitingData()) { source->doStopGettingFrames(); // we're not ready for the data yet return; } source->doReadFromFile(); } void WAVAudioFileSource::doReadFromFile() { // Try to read as many bytes as will fit in the buffer provided (or "fPreferredFrameSize" if less) if (fLimitNumBytesToStream && fNumBytesToStream < fMaxSize) { fMaxSize = fNumBytesToStream; } if (fPreferredFrameSize < fMaxSize) { fMaxSize = fPreferredFrameSize; } unsigned bytesPerSample = (fNumChannels*fBitsPerSample)/8; if (bytesPerSample == 0) bytesPerSample = 1; // because we can't read less than a byte at a time // For 'trick play', read one sample at a time; otherwise (normal case) read samples in bulk: unsigned bytesToRead = fScaleFactor == 1 ? fMaxSize - fMaxSize%bytesPerSample : bytesPerSample; unsigned numBytesRead; while (1) { // loop for 'trick play' only #ifdef READ_FROM_FILES_SYNCHRONOUSLY numBytesRead = fread(fTo, 1, bytesToRead, fFid); #else if (fFidIsSeekable) { numBytesRead = fread(fTo, 1, bytesToRead, fFid); } else { // For non-seekable files (e.g., pipes), call "read()" rather than "fread()", to ensure that the read doesn't block: numBytesRead = read(fileno(fFid), fTo, bytesToRead); } #endif if (numBytesRead == 0) { handleClosure(this); return; } fFrameSize += numBytesRead; fTo += numBytesRead; fMaxSize -= numBytesRead; fNumBytesToStream -= numBytesRead; // If we did an asynchronous read, and didn't read an integral number of samples, then we need to wait for another read: #ifndef READ_FROM_FILES_SYNCHRONOUSLY if (fFrameSize%bytesPerSample > 0) return; #endif // If we're doing 'trick play', then seek to the appropriate place for reading the next sample, // and keep reading until we fill the provided buffer: if (fScaleFactor != 1) { SeekFile64(fFid, (fScaleFactor-1)*bytesPerSample, SEEK_CUR); if (fMaxSize < bytesPerSample) break; } else { break; // from the loop (normal case) } } // Set the 'presentation time' and 'duration' of this frame: if (fPresentationTime.tv_sec == 0 && fPresentationTime.tv_usec == 0) { // This is the first frame, so use the current time: gettimeofday(&fPresentationTime, NULL); } else { // Increment by the play time of the previous data: unsigned uSeconds = fPresentationTime.tv_usec + fLastPlayTime; fPresentationTime.tv_sec += uSeconds/1000000; fPresentationTime.tv_usec = uSeconds%1000000; } // Remember the play time of this data: fDurationInMicroseconds = fLastPlayTime = (unsigned)((fPlayTimePerSample*fFrameSize)/bytesPerSample); // Inform the reader that he has data: #ifdef READ_FROM_FILES_SYNCHRONOUSLY // To avoid possible infinite recursion, we need to return to the event loop to do this: nextTask() = envir().taskScheduler().scheduleDelayedTask(0, (TaskFunc*)FramedSource::afterGetting, this); #else // Because the file read was done from the event loop, we can call the // 'after getting' function directly, without risk of infinite recursion: FramedSource::afterGetting(this); #endif } Boolean WAVAudioFileSource::setInputPort(int /*portIndex*/) { return True; } double WAVAudioFileSource::getAverageLevel() const { return 0.0;//##### fix this later } live/liveMedia/MPEG1or2AudioRTPSource.cpp000444 001751 000000 00000004323 12265042432 020331 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // MPEG-1 or MPEG-2 Audio RTP Sources // Implementation #include "MPEG1or2AudioRTPSource.hh" MPEG1or2AudioRTPSource* MPEG1or2AudioRTPSource::createNew(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, unsigned rtpTimestampFrequency) { return new MPEG1or2AudioRTPSource(env, RTPgs, rtpPayloadFormat, rtpTimestampFrequency); } MPEG1or2AudioRTPSource::MPEG1or2AudioRTPSource(UsageEnvironment& env, Groupsock* rtpGS, unsigned char rtpPayloadFormat, unsigned rtpTimestampFrequency) : MultiFramedRTPSource(env, rtpGS, rtpPayloadFormat, rtpTimestampFrequency) { } MPEG1or2AudioRTPSource::~MPEG1or2AudioRTPSource() { } Boolean MPEG1or2AudioRTPSource ::processSpecialHeader(BufferedPacket* packet, unsigned& resultSpecialHeaderSize) { // There's a 4-byte header indicating fragmentation. if (packet->dataSize() < 4) return False; // Note: This fragmentation header is actually useless to us, because // it doesn't tell us whether or not this RTP packet *ends* a // fragmented frame. Thus, we can't use it to properly set // "fCurrentPacketCompletesFrame". Instead, we assume that even // a partial audio frame will be usable to clients. resultSpecialHeaderSize = 4; return True; } char const* MPEG1or2AudioRTPSource::MIMEtype() const { return "audio/MPEG"; } live/liveMedia/MP3StreamState.hh000444 001751 000000 00000005427 12265042432 016743 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A class encapsulating the state of a MP3 stream // C++ header #ifndef _MP3_STREAM_STATE_HH #define _MP3_STREAM_STATE_HH #ifndef _USAGE_ENVIRONMENT_HH #include "UsageEnvironment.hh" #endif #ifndef _BOOLEAN_HH #include "Boolean.hh" #endif #ifndef _MP3_INTERNALS_HH #include "MP3Internals.hh" #endif #ifndef _NET_COMMON_H #include "NetCommon.h" #endif #include #define XING_TOC_LENGTH 100 class MP3StreamState { public: MP3StreamState(UsageEnvironment& env); virtual ~MP3StreamState(); void assignStream(FILE* fid, unsigned fileSize); unsigned findNextHeader(struct timeval& presentationTime); Boolean readFrame(unsigned char* outBuf, unsigned outBufSize, unsigned& resultFrameSize, unsigned& resultDurationInMicroseconds); // called after findNextHeader() void getAttributes(char* buffer, unsigned bufferSize) const; float filePlayTime() const; // in seconds unsigned fileSize() const { return fFileSize; } void setPresentationTimeScale(unsigned scale) { fPresentationTimeScale = scale; } unsigned getByteNumberFromPositionFraction(float fraction); // 0.0 <= fraction <= 1.0 void seekWithinFile(unsigned seekByteNumber); void checkForXingHeader(); // hack for Xing VBR files protected: // private->protected requested by Pierre l'Hussiez unsigned readFromStream(unsigned char* buf, unsigned numChars); private: MP3FrameParams& fr() {return fCurrentFrame;} MP3FrameParams const& fr() const {return fCurrentFrame;} struct timeval currentFramePlayTime() const; Boolean findNextFrame(); private: UsageEnvironment& fEnv; FILE* fFid; Boolean fFidIsReallyASocket; unsigned fFileSize; unsigned fNumFramesInFile; unsigned fPresentationTimeScale; // used if we're streaming at other than the normal rate Boolean fIsVBR, fHasXingTOC; u_int8_t fXingTOC[XING_TOC_LENGTH]; // set iff "fHasXingTOC" is True MP3FrameParams fCurrentFrame; struct timeval fNextFramePresentationTime; }; #endif live/liveMedia/H263plusVideoRTPSink.cpp000444 001751 000000 00000006502 12265042432 020075 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // RTP sink for H.263+ video (RFC 4629) // Implementation #include "H263plusVideoRTPSink.hh" H263plusVideoRTPSink ::H263plusVideoRTPSink(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, u_int32_t rtpTimestampFrequency) : VideoRTPSink(env, RTPgs, rtpPayloadFormat, rtpTimestampFrequency, "H263-1998") { } H263plusVideoRTPSink::~H263plusVideoRTPSink() { } H263plusVideoRTPSink* H263plusVideoRTPSink::createNew(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, u_int32_t rtpTimestampFrequency) { return new H263plusVideoRTPSink(env, RTPgs, rtpPayloadFormat, rtpTimestampFrequency); } Boolean H263plusVideoRTPSink ::frameCanAppearAfterPacketStart(unsigned char const* /*frameStart*/, unsigned /*numBytesInFrame*/) const { // A packet can contain only one frame return False; } void H263plusVideoRTPSink ::doSpecialFrameHandling(unsigned fragmentationOffset, unsigned char* frameStart, unsigned numBytesInFrame, struct timeval framePresentationTime, unsigned numRemainingBytes) { if (fragmentationOffset == 0) { // This packet contains the first (or only) fragment of the frame. // Set the 'P' bit in the special header: unsigned short specialHeader = 0x0400; // Also, reuse the first two bytes of the payload for this special // header. (They should both have been zero.) if (numBytesInFrame < 2) { envir() << "H263plusVideoRTPSink::doSpecialFrameHandling(): bad frame size " << numBytesInFrame << "\n"; return; } if (frameStart[0] != 0 || frameStart[1] != 0) { envir() << "H263plusVideoRTPSink::doSpecialFrameHandling(): unexpected non-zero first two bytes: " << (void*)(frameStart[0]) << "," << (void*)(frameStart[1]) << "\n"; } frameStart[0] = specialHeader>>8; frameStart[1] = (unsigned char)specialHeader; } else { unsigned short specialHeader = 0; setSpecialHeaderBytes((unsigned char*)&specialHeader, 2); } if (numRemainingBytes == 0) { // This packet contains the last (or only) fragment of the frame. // Set the RTP 'M' ('marker') bit: setMarkerBit(); } // Also set the RTP timestamp: setTimestamp(framePresentationTime); } unsigned H263plusVideoRTPSink::specialHeaderSize() const { // There's a 2-byte special video header. However, if we're the first // (or only) fragment of a frame, then we reuse the first 2 bytes of // the payload instead. return (curFragmentationOffset() == 0) ? 0 : 2; } live/liveMedia/MPEG1or2DemuxedElementaryStream.cpp000444 001751 000000 00000006145 12265042432 022322 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A MPEG 1 or 2 Elementary Stream, demultiplexed from a Program Stream // Implementation #include "MPEG1or2DemuxedElementaryStream.hh" ////////// MPEG1or2DemuxedElementaryStream ////////// MPEG1or2DemuxedElementaryStream:: MPEG1or2DemuxedElementaryStream(UsageEnvironment& env, u_int8_t streamIdTag, MPEG1or2Demux& sourceDemux) : FramedSource(env), fOurStreamIdTag(streamIdTag), fOurSourceDemux(sourceDemux), fMPEGversion(0) { // Set our MIME type string for known media types: if ((streamIdTag&0xE0) == 0xC0) { fMIMEtype = "audio/MPEG"; } else if ((streamIdTag&0xF0) == 0xE0) { fMIMEtype = "video/MPEG"; } else { fMIMEtype = MediaSource::MIMEtype(); } } MPEG1or2DemuxedElementaryStream::~MPEG1or2DemuxedElementaryStream() { fOurSourceDemux.noteElementaryStreamDeletion(this); } void MPEG1or2DemuxedElementaryStream::doGetNextFrame() { fOurSourceDemux.getNextFrame(fOurStreamIdTag, fTo, fMaxSize, afterGettingFrame, this, handleClosure, this); } void MPEG1or2DemuxedElementaryStream::doStopGettingFrames() { fOurSourceDemux.stopGettingFrames(fOurStreamIdTag); } char const* MPEG1or2DemuxedElementaryStream::MIMEtype() const { return fMIMEtype; } unsigned MPEG1or2DemuxedElementaryStream::maxFrameSize() const { return 6+65535; // because the MPEG spec allows for PES packets as large as // (6 + 65535) bytes (header + data) } void MPEG1or2DemuxedElementaryStream ::afterGettingFrame(void* clientData, unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime, unsigned durationInMicroseconds) { MPEG1or2DemuxedElementaryStream* stream = (MPEG1or2DemuxedElementaryStream*)clientData; stream->afterGettingFrame1(frameSize, numTruncatedBytes, presentationTime, durationInMicroseconds); } void MPEG1or2DemuxedElementaryStream ::afterGettingFrame1(unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime, unsigned durationInMicroseconds) { fFrameSize = frameSize; fNumTruncatedBytes = numTruncatedBytes; fPresentationTime = presentationTime; fDurationInMicroseconds = durationInMicroseconds; fLastSeenSCR = fOurSourceDemux.lastSeenSCR(); fMPEGversion = fOurSourceDemux.mpegVersion(); FramedSource::afterGetting(this); } live/liveMedia/MPEG1or2VideoRTPSource.cpp000444 001751 000000 00000005414 12265042432 020340 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // MPEG-1 or MPEG-2 Video RTP Sources // Implementation #include "MPEG1or2VideoRTPSource.hh" MPEG1or2VideoRTPSource* MPEG1or2VideoRTPSource::createNew(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, unsigned rtpTimestampFrequency) { return new MPEG1or2VideoRTPSource(env, RTPgs, rtpPayloadFormat, rtpTimestampFrequency); } MPEG1or2VideoRTPSource::MPEG1or2VideoRTPSource(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, unsigned rtpTimestampFrequency) : MultiFramedRTPSource(env, RTPgs, rtpPayloadFormat, rtpTimestampFrequency){ } MPEG1or2VideoRTPSource::~MPEG1or2VideoRTPSource() { } Boolean MPEG1or2VideoRTPSource ::processSpecialHeader(BufferedPacket* packet, unsigned& resultSpecialHeaderSize) { // There's a 4-byte video-specific header if (packet->dataSize() < 4) return False; u_int32_t header = ntohl(*(u_int32_t*)(packet->data())); u_int32_t sBit = header&0x00002000; // sequence-header-present u_int32_t bBit = header&0x00001000; // beginning-of-slice u_int32_t eBit = header&0x00000800; // end-of-slice fCurrentPacketBeginsFrame = (sBit|bBit) != 0; fCurrentPacketCompletesFrame = ((sBit != 0) && (bBit == 0)) || (eBit != 0); resultSpecialHeaderSize = 4; return True; } Boolean MPEG1or2VideoRTPSource ::packetIsUsableInJitterCalculation(unsigned char* packet, unsigned packetSize) { // There's a 4-byte video-specific header if (packetSize < 4) return False; // Extract the "Picture-Type" field from this, to determine whether // this packet can be used in jitter calculations: unsigned header = ntohl(*(u_int32_t*)packet); unsigned short pictureType = (header>>8)&0x7; if (pictureType == 1) { // an I frame return True; } else { // a P, B, D, or other unknown frame type return False; } } char const* MPEG1or2VideoRTPSource::MIMEtype() const { return "video/MPEG"; } live/liveMedia/MPEG1or2VideoStreamFramer.cpp000444 001751 000000 00000036613 12265042432 021107 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A filter that breaks up an MPEG 1 or 2 video elementary stream into // frames for: Video_Sequence_Header, GOP_Header, Picture_Header // Implementation #include "MPEG1or2VideoStreamFramer.hh" #include "MPEGVideoStreamParser.hh" #include ////////// MPEG1or2VideoStreamParser definition ////////// // An enum representing the current state of the parser: enum MPEGParseState { PARSING_VIDEO_SEQUENCE_HEADER, PARSING_VIDEO_SEQUENCE_HEADER_SEEN_CODE, PARSING_GOP_HEADER, PARSING_GOP_HEADER_SEEN_CODE, PARSING_PICTURE_HEADER, PARSING_SLICE }; #define VSH_MAX_SIZE 1000 class MPEG1or2VideoStreamParser: public MPEGVideoStreamParser { public: MPEG1or2VideoStreamParser(MPEG1or2VideoStreamFramer* usingSource, FramedSource* inputSource, Boolean iFramesOnly, double vshPeriod); virtual ~MPEG1or2VideoStreamParser(); private: // redefined virtual functions: virtual void flushInput(); virtual unsigned parse(); private: void reset(); MPEG1or2VideoStreamFramer* usingSource() { return (MPEG1or2VideoStreamFramer*)fUsingSource; } void setParseState(MPEGParseState parseState); unsigned parseVideoSequenceHeader(Boolean haveSeenStartCode); unsigned parseGOPHeader(Boolean haveSeenStartCode); unsigned parsePictureHeader(); unsigned parseSlice(); private: MPEGParseState fCurrentParseState; unsigned fPicturesSinceLastGOP; // can be used to compute timestamp for a video_sequence_header unsigned short fCurPicTemporalReference; // used to compute slice timestamp unsigned char fCurrentSliceNumber; // set when parsing a slice // A saved copy of the most recently seen 'video_sequence_header', // in case we need to insert it into the stream periodically: unsigned char fSavedVSHBuffer[VSH_MAX_SIZE]; unsigned fSavedVSHSize; double fSavedVSHTimestamp; double fVSHPeriod; Boolean fIFramesOnly, fSkippingCurrentPicture; void saveCurrentVSH(); Boolean needToUseSavedVSH(); unsigned useSavedVSH(); // returns the size of the saved VSH }; ////////// MPEG1or2VideoStreamFramer implementation ////////// MPEG1or2VideoStreamFramer::MPEG1or2VideoStreamFramer(UsageEnvironment& env, FramedSource* inputSource, Boolean iFramesOnly, double vshPeriod, Boolean createParser) : MPEGVideoStreamFramer(env, inputSource) { fParser = createParser ? new MPEG1or2VideoStreamParser(this, inputSource, iFramesOnly, vshPeriod) : NULL; } MPEG1or2VideoStreamFramer::~MPEG1or2VideoStreamFramer() { } MPEG1or2VideoStreamFramer* MPEG1or2VideoStreamFramer::createNew(UsageEnvironment& env, FramedSource* inputSource, Boolean iFramesOnly, double vshPeriod) { // Need to add source type checking here??? ##### return new MPEG1or2VideoStreamFramer(env, inputSource, iFramesOnly, vshPeriod); } double MPEG1or2VideoStreamFramer::getCurrentPTS() const { return fPresentationTime.tv_sec + fPresentationTime.tv_usec/1000000.0; } Boolean MPEG1or2VideoStreamFramer::isMPEG1or2VideoStreamFramer() const { return True; } ////////// MPEG1or2VideoStreamParser implementation ////////// MPEG1or2VideoStreamParser ::MPEG1or2VideoStreamParser(MPEG1or2VideoStreamFramer* usingSource, FramedSource* inputSource, Boolean iFramesOnly, double vshPeriod) : MPEGVideoStreamParser(usingSource, inputSource), fCurrentParseState(PARSING_VIDEO_SEQUENCE_HEADER), fVSHPeriod(vshPeriod), fIFramesOnly(iFramesOnly) { reset(); } MPEG1or2VideoStreamParser::~MPEG1or2VideoStreamParser() { } void MPEG1or2VideoStreamParser::setParseState(MPEGParseState parseState) { fCurrentParseState = parseState; MPEGVideoStreamParser::setParseState(); } void MPEG1or2VideoStreamParser::reset() { fPicturesSinceLastGOP = 0; fCurPicTemporalReference = 0; fCurrentSliceNumber = 0; fSavedVSHSize = 0; fSkippingCurrentPicture = False; } void MPEG1or2VideoStreamParser::flushInput() { reset(); StreamParser::flushInput(); if (fCurrentParseState != PARSING_VIDEO_SEQUENCE_HEADER) { setParseState(PARSING_GOP_HEADER); // start from the next GOP } } unsigned MPEG1or2VideoStreamParser::parse() { try { switch (fCurrentParseState) { case PARSING_VIDEO_SEQUENCE_HEADER: { return parseVideoSequenceHeader(False); } case PARSING_VIDEO_SEQUENCE_HEADER_SEEN_CODE: { return parseVideoSequenceHeader(True); } case PARSING_GOP_HEADER: { return parseGOPHeader(False); } case PARSING_GOP_HEADER_SEEN_CODE: { return parseGOPHeader(True); } case PARSING_PICTURE_HEADER: { return parsePictureHeader(); } case PARSING_SLICE: { return parseSlice(); } default: { return 0; // shouldn't happen } } } catch (int /*e*/) { #ifdef DEBUG fprintf(stderr, "MPEG1or2VideoStreamParser::parse() EXCEPTION (This is normal behavior - *not* an error)\n"); #endif return 0; // the parsing got interrupted } } void MPEG1or2VideoStreamParser::saveCurrentVSH() { unsigned frameSize = curFrameSize(); if (frameSize > sizeof fSavedVSHBuffer) return; // too big to save memmove(fSavedVSHBuffer, fStartOfFrame, frameSize); fSavedVSHSize = frameSize; fSavedVSHTimestamp = usingSource()->getCurrentPTS(); } Boolean MPEG1or2VideoStreamParser::needToUseSavedVSH() { return usingSource()->getCurrentPTS() > fSavedVSHTimestamp+fVSHPeriod && fSavedVSHSize > 0; } unsigned MPEG1or2VideoStreamParser::useSavedVSH() { unsigned bytesToUse = fSavedVSHSize; unsigned maxBytesToUse = fLimit - fStartOfFrame; if (bytesToUse > maxBytesToUse) bytesToUse = maxBytesToUse; memmove(fStartOfFrame, fSavedVSHBuffer, bytesToUse); // Also reset the saved timestamp: fSavedVSHTimestamp = usingSource()->getCurrentPTS(); #ifdef DEBUG fprintf(stderr, "used saved video_sequence_header (%d bytes)\n", bytesToUse); #endif return bytesToUse; } #define VIDEO_SEQUENCE_HEADER_START_CODE 0x000001B3 #define GROUP_START_CODE 0x000001B8 #define PICTURE_START_CODE 0x00000100 #define SEQUENCE_END_CODE 0x000001B7 static double const frameRateFromCode[] = { 0.0, // forbidden 24000/1001.0, // approx 23.976 24.0, 25.0, 30000/1001.0, // approx 29.97 30.0, 50.0, 60000/1001.0, // approx 59.94 60.0, 0.0, // reserved 0.0, // reserved 0.0, // reserved 0.0, // reserved 0.0, // reserved 0.0, // reserved 0.0 // reserved }; unsigned MPEG1or2VideoStreamParser ::parseVideoSequenceHeader(Boolean haveSeenStartCode) { #ifdef DEBUG fprintf(stderr, "parsing video sequence header\n"); #endif unsigned first4Bytes; if (!haveSeenStartCode) { while ((first4Bytes = test4Bytes()) != VIDEO_SEQUENCE_HEADER_START_CODE) { #ifdef DEBUG fprintf(stderr, "ignoring non video sequence header: 0x%08x\n", first4Bytes); #endif get1Byte(); setParseState(PARSING_VIDEO_SEQUENCE_HEADER); // ensures we progress over bad data } first4Bytes = get4Bytes(); } else { // We've already seen the start code first4Bytes = VIDEO_SEQUENCE_HEADER_START_CODE; } save4Bytes(first4Bytes); // Next, extract the size and rate parameters from the next 8 bytes unsigned paramWord1 = get4Bytes(); save4Bytes(paramWord1); unsigned next4Bytes = get4Bytes(); #ifdef DEBUG unsigned short horizontal_size_value = (paramWord1&0xFFF00000)>>(32-12); unsigned short vertical_size_value = (paramWord1&0x000FFF00)>>8; unsigned char aspect_ratio_information = (paramWord1&0x000000F0)>>4; #endif unsigned char frame_rate_code = (paramWord1&0x0000000F); usingSource()->fFrameRate = frameRateFromCode[frame_rate_code]; #ifdef DEBUG unsigned bit_rate_value = (next4Bytes&0xFFFFC000)>>(32-18); unsigned vbv_buffer_size_value = (next4Bytes&0x00001FF8)>>3; fprintf(stderr, "horizontal_size_value: %d, vertical_size_value: %d, aspect_ratio_information: %d, frame_rate_code: %d (=>%f fps), bit_rate_value: %d (=>%d bps), vbv_buffer_size_value: %d\n", horizontal_size_value, vertical_size_value, aspect_ratio_information, frame_rate_code, usingSource()->fFrameRate, bit_rate_value, bit_rate_value*400, vbv_buffer_size_value); #endif // Now, copy all bytes that we see, up until we reach a GROUP_START_CODE // or a PICTURE_START_CODE: do { saveToNextCode(next4Bytes); } while (next4Bytes != GROUP_START_CODE && next4Bytes != PICTURE_START_CODE); setParseState((next4Bytes == GROUP_START_CODE) ? PARSING_GOP_HEADER_SEEN_CODE : PARSING_PICTURE_HEADER); // Compute this frame's timestamp by noting how many pictures we've seen // since the last GOP header: usingSource()->computePresentationTime(fPicturesSinceLastGOP); // Save this video_sequence_header, in case we need to insert a copy // into the stream later: saveCurrentVSH(); return curFrameSize(); } unsigned MPEG1or2VideoStreamParser::parseGOPHeader(Boolean haveSeenStartCode) { // First check whether we should insert a previously-saved // 'video_sequence_header' here: if (needToUseSavedVSH()) return useSavedVSH(); #ifdef DEBUG fprintf(stderr, "parsing GOP header\n"); #endif unsigned first4Bytes; if (!haveSeenStartCode) { while ((first4Bytes = test4Bytes()) != GROUP_START_CODE) { #ifdef DEBUG fprintf(stderr, "ignoring non GOP start code: 0x%08x\n", first4Bytes); #endif get1Byte(); setParseState(PARSING_GOP_HEADER); // ensures we progress over bad data } first4Bytes = get4Bytes(); } else { // We've already seen the GROUP_START_CODE first4Bytes = GROUP_START_CODE; } save4Bytes(first4Bytes); // Next, extract the (25-bit) time code from the next 4 bytes: unsigned next4Bytes = get4Bytes(); unsigned time_code = (next4Bytes&0xFFFFFF80)>>(32-25); #if defined(DEBUG) || defined(DEBUG_TIMESTAMPS) Boolean drop_frame_flag = (time_code&0x01000000) != 0; #endif unsigned time_code_hours = (time_code&0x00F80000)>>19; unsigned time_code_minutes = (time_code&0x0007E000)>>13; unsigned time_code_seconds = (time_code&0x00000FC0)>>6; unsigned time_code_pictures = (time_code&0x0000003F); #if defined(DEBUG) || defined(DEBUG_TIMESTAMPS) fprintf(stderr, "time_code: 0x%07x, drop_frame %d, hours %d, minutes %d, seconds %d, pictures %d\n", time_code, drop_frame_flag, time_code_hours, time_code_minutes, time_code_seconds, time_code_pictures); #endif #ifdef DEBUG Boolean closed_gop = (next4Bytes&0x00000040) != 0; Boolean broken_link = (next4Bytes&0x00000020) != 0; fprintf(stderr, "closed_gop: %d, broken_link: %d\n", closed_gop, broken_link); #endif // Now, copy all bytes that we see, up until we reach a PICTURE_START_CODE: do { saveToNextCode(next4Bytes); } while (next4Bytes != PICTURE_START_CODE); // Record the time code: usingSource()->setTimeCode(time_code_hours, time_code_minutes, time_code_seconds, time_code_pictures, fPicturesSinceLastGOP); fPicturesSinceLastGOP = 0; // Compute this frame's timestamp: usingSource()->computePresentationTime(0); setParseState(PARSING_PICTURE_HEADER); return curFrameSize(); } inline Boolean isSliceStartCode(unsigned fourBytes) { if ((fourBytes&0xFFFFFF00) != 0x00000100) return False; unsigned char lastByte = fourBytes&0xFF; return lastByte <= 0xAF && lastByte >= 1; } unsigned MPEG1or2VideoStreamParser::parsePictureHeader() { #ifdef DEBUG fprintf(stderr, "parsing picture header\n"); #endif // Note that we've already read the PICTURE_START_CODE // Next, extract the temporal reference from the next 4 bytes: unsigned next4Bytes = get4Bytes(); unsigned short temporal_reference = (next4Bytes&0xFFC00000)>>(32-10); unsigned char picture_coding_type = (next4Bytes&0x00380000)>>19; #ifdef DEBUG unsigned short vbv_delay = (next4Bytes&0x0007FFF8)>>3; fprintf(stderr, "temporal_reference: %d, picture_coding_type: %d, vbv_delay: %d\n", temporal_reference, picture_coding_type, vbv_delay); #endif fSkippingCurrentPicture = fIFramesOnly && picture_coding_type != 1; if (fSkippingCurrentPicture) { // Skip all bytes that we see, up until we reach a slice_start_code: do { skipToNextCode(next4Bytes); } while (!isSliceStartCode(next4Bytes)); } else { // Save the PICTURE_START_CODE that we've already read: save4Bytes(PICTURE_START_CODE); // Copy all bytes that we see, up until we reach a slice_start_code: do { saveToNextCode(next4Bytes); } while (!isSliceStartCode(next4Bytes)); } setParseState(PARSING_SLICE); fCurrentSliceNumber = next4Bytes&0xFF; // Record the temporal reference: fCurPicTemporalReference = temporal_reference; // Compute this frame's timestamp: usingSource()->computePresentationTime(fCurPicTemporalReference); if (fSkippingCurrentPicture) { return parse(); // try again, until we get a non-skipped frame } else { return curFrameSize(); } } unsigned MPEG1or2VideoStreamParser::parseSlice() { // Note that we've already read the slice_start_code: unsigned next4Bytes = PICTURE_START_CODE|fCurrentSliceNumber; #ifdef DEBUG_SLICE fprintf(stderr, "parsing slice: 0x%08x\n", next4Bytes); #endif if (fSkippingCurrentPicture) { // Skip all bytes that we see, up until we reach a code of some sort: skipToNextCode(next4Bytes); } else { // Copy all bytes that we see, up until we reach a code of some sort: saveToNextCode(next4Bytes); } // The next thing to parse depends on the code that we just saw: if (isSliceStartCode(next4Bytes)) { // common case setParseState(PARSING_SLICE); fCurrentSliceNumber = next4Bytes&0xFF; } else { // Because we don't see any more slices, we are assumed to have ended // the current picture: ++fPicturesSinceLastGOP; ++usingSource()->fPictureCount; usingSource()->fPictureEndMarker = True; // HACK ##### switch (next4Bytes) { case SEQUENCE_END_CODE: { setParseState(PARSING_VIDEO_SEQUENCE_HEADER); break; } case VIDEO_SEQUENCE_HEADER_START_CODE: { setParseState(PARSING_VIDEO_SEQUENCE_HEADER_SEEN_CODE); break; } case GROUP_START_CODE: { setParseState(PARSING_GOP_HEADER_SEEN_CODE); break; } case PICTURE_START_CODE: { setParseState(PARSING_PICTURE_HEADER); break; } default: { usingSource()->envir() << "MPEG1or2VideoStreamParser::parseSlice(): Saw unexpected code " << (void*)next4Bytes << "\n"; setParseState(PARSING_SLICE); // the safest way to recover... break; } } } // Compute this frame's timestamp: usingSource()->computePresentationTime(fCurPicTemporalReference); if (fSkippingCurrentPicture) { return parse(); // try again, until we get a non-skipped frame } else { return curFrameSize(); } } live/liveMedia/MPEGVideoStreamParser.cpp000444 001751 000000 00000003215 12265042432 020413 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // An abstract parser for MPEG video streams // Implementation #include "MPEGVideoStreamParser.hh" MPEGVideoStreamParser ::MPEGVideoStreamParser(MPEGVideoStreamFramer* usingSource, FramedSource* inputSource) : StreamParser(inputSource, FramedSource::handleClosure, usingSource, &MPEGVideoStreamFramer::continueReadProcessing, usingSource), fUsingSource(usingSource) { } MPEGVideoStreamParser::~MPEGVideoStreamParser() { } void MPEGVideoStreamParser::restoreSavedParserState() { StreamParser::restoreSavedParserState(); fTo = fSavedTo; fNumTruncatedBytes = fSavedNumTruncatedBytes; } void MPEGVideoStreamParser::registerReadInterest(unsigned char* to, unsigned maxSize) { fStartOfFrame = fTo = fSavedTo = to; fLimit = to + maxSize; fNumTruncatedBytes = fSavedNumTruncatedBytes = 0; } live/liveMedia/FileServerMediaSubsession.cpp000444 001751 000000 00000002512 12265042432 021426 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A 'ServerMediaSubsession' object that creates new, unicast, "RTPSink"s // on demand, from a file. // Implementation #include "FileServerMediaSubsession.hh" FileServerMediaSubsession ::FileServerMediaSubsession(UsageEnvironment& env, char const* fileName, Boolean reuseFirstSource) : OnDemandServerMediaSubsession(env, reuseFirstSource), fFileSize(0) { fFileName = strDup(fileName); } FileServerMediaSubsession::~FileServerMediaSubsession() { delete[] (char*)fFileName; } live/liveMedia/MPEG1or2VideoStreamDiscreteFramer.cpp000444 001751 000000 00000017733 12265042432 022574 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A simplified version of "MPEG1or2VideoStreamFramer" that takes only // complete, discrete frames (rather than an arbitrary byte stream) as input. // This avoids the parsing and data copying overhead of the full // "MPEG1or2VideoStreamFramer". // Implementation #include "MPEG1or2VideoStreamDiscreteFramer.hh" MPEG1or2VideoStreamDiscreteFramer* MPEG1or2VideoStreamDiscreteFramer::createNew(UsageEnvironment& env, FramedSource* inputSource, Boolean iFramesOnly, double vshPeriod, Boolean leavePresentationTimesUnmodified) { // Need to add source type checking here??? ##### return new MPEG1or2VideoStreamDiscreteFramer(env, inputSource, iFramesOnly, vshPeriod, leavePresentationTimesUnmodified); } MPEG1or2VideoStreamDiscreteFramer ::MPEG1or2VideoStreamDiscreteFramer(UsageEnvironment& env, FramedSource* inputSource, Boolean iFramesOnly, double vshPeriod, Boolean leavePresentationTimesUnmodified) : MPEG1or2VideoStreamFramer(env, inputSource, iFramesOnly, vshPeriod, False/*don't create a parser*/), fLeavePresentationTimesUnmodified(leavePresentationTimesUnmodified), fLastNonBFrameTemporal_reference(0), fSavedVSHSize(0), fSavedVSHTimestamp(0.0), fIFramesOnly(iFramesOnly), fVSHPeriod(vshPeriod) { fLastNonBFramePresentationTime.tv_sec = 0; fLastNonBFramePresentationTime.tv_usec = 0; } MPEG1or2VideoStreamDiscreteFramer::~MPEG1or2VideoStreamDiscreteFramer() { } void MPEG1or2VideoStreamDiscreteFramer::doGetNextFrame() { // Arrange to read data (which should be a complete MPEG-1 or 2 video frame) // from our data source, directly into the client's input buffer. // After reading this, we'll do some parsing on the frame. fInputSource->getNextFrame(fTo, fMaxSize, afterGettingFrame, this, FramedSource::handleClosure, this); } void MPEG1or2VideoStreamDiscreteFramer ::afterGettingFrame(void* clientData, unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime, unsigned durationInMicroseconds) { MPEG1or2VideoStreamDiscreteFramer* source = (MPEG1or2VideoStreamDiscreteFramer*)clientData; source->afterGettingFrame1(frameSize, numTruncatedBytes, presentationTime, durationInMicroseconds); } static double const frameRateFromCode[] = { 0.0, // forbidden 24000/1001.0, // approx 23.976 24.0, 25.0, 30000/1001.0, // approx 29.97 30.0, 50.0, 60000/1001.0, // approx 59.94 60.0, 0.0, // reserved 0.0, // reserved 0.0, // reserved 0.0, // reserved 0.0, // reserved 0.0, // reserved 0.0 // reserved }; #define MILLION 1000000 void MPEG1or2VideoStreamDiscreteFramer ::afterGettingFrame1(unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime, unsigned durationInMicroseconds) { // Check that the first 4 bytes are a system code: if (frameSize >= 4 && fTo[0] == 0 && fTo[1] == 0 && fTo[2] == 1) { fPictureEndMarker = True; // Assume that we have a complete 'picture' here u_int8_t nextCode = fTo[3]; if (nextCode == 0xB3) { // VIDEO_SEQUENCE_HEADER_START_CODE // Note the following 'frame rate' code: if (frameSize >= 8) { u_int8_t frame_rate_code = fTo[7]&0x0F; fFrameRate = frameRateFromCode[frame_rate_code]; } // Also, save away this Video Sequence Header, in case we need it later: // First, figure out how big it is: unsigned vshSize; for (vshSize = 4; vshSize < frameSize-3; ++vshSize) { if (fTo[vshSize] == 0 && fTo[vshSize+1] == 0 && fTo[vshSize+2] == 1 && (fTo[vshSize+3] == 0xB8 || fTo[vshSize+3] == 0x00)) break; } if (vshSize == frameSize-3) vshSize = frameSize; // There was nothing else following it if (vshSize <= sizeof fSavedVSHBuffer) { memmove(fSavedVSHBuffer, fTo, vshSize); fSavedVSHSize = vshSize; fSavedVSHTimestamp = presentationTime.tv_sec + presentationTime.tv_usec/(double)MILLION; } } else if (nextCode == 0xB8) { // GROUP_START_CODE // If necessary, insert a saved Video Sequence Header in front of this: double pts = presentationTime.tv_sec + presentationTime.tv_usec/(double)MILLION; if (pts > fSavedVSHTimestamp + fVSHPeriod && fSavedVSHSize + frameSize <= fMaxSize) { memmove(&fTo[fSavedVSHSize], &fTo[0], frameSize); // make room for the header memmove(&fTo[0], fSavedVSHBuffer, fSavedVSHSize); // insert it frameSize += fSavedVSHSize; fSavedVSHTimestamp = pts; } } unsigned i = 3; if (nextCode == 0xB3 /*VIDEO_SEQUENCE_HEADER_START_CODE*/ || nextCode == 0xB8 /*GROUP_START_CODE*/) { // Skip to the following PICTURE_START_CODE (if any): for (i += 4; i < frameSize; ++i) { if (fTo[i] == 0x00 /*PICTURE_START_CODE*/ && fTo[i-1] == 1 && fTo[i-2] == 0 && fTo[i-3] == 0) { nextCode = fTo[i]; break; } } } if (nextCode == 0x00 /*PICTURE_START_CODE*/ && i+2 < frameSize) { // Get the 'temporal_reference' and 'picture_coding_type' from the // following 2 bytes: ++i; unsigned short temporal_reference = (fTo[i]<<2)|(fTo[i+1]>>6); unsigned char picture_coding_type = (fTo[i+1]&0x38)>>3; // If this is not an "I" frame, but we were asked for "I" frames only, then try again: if (fIFramesOnly && picture_coding_type != 1) { doGetNextFrame(); return; } // If this is a "B" frame, then we have to tweak "presentationTime": if (!fLeavePresentationTimesUnmodified && picture_coding_type == 3/*B*/ && (fLastNonBFramePresentationTime.tv_usec > 0 || fLastNonBFramePresentationTime.tv_sec > 0)) { int trIncrement = fLastNonBFrameTemporal_reference - temporal_reference; if (trIncrement < 0) trIncrement += 1024; // field is 10 bits in size unsigned usIncrement = fFrameRate == 0.0 ? 0 : (unsigned)((trIncrement*MILLION)/fFrameRate); unsigned secondsToSubtract = usIncrement/MILLION; unsigned uSecondsToSubtract = usIncrement%MILLION; presentationTime = fLastNonBFramePresentationTime; if ((unsigned)presentationTime.tv_usec < uSecondsToSubtract) { presentationTime.tv_usec += MILLION; if (presentationTime.tv_sec > 0) --presentationTime.tv_sec; } presentationTime.tv_usec -= uSecondsToSubtract; if ((unsigned)presentationTime.tv_sec > secondsToSubtract) { presentationTime.tv_sec -= secondsToSubtract; } else { presentationTime.tv_sec = presentationTime.tv_usec = 0; } } else { fLastNonBFramePresentationTime = presentationTime; fLastNonBFrameTemporal_reference = temporal_reference; } } } // ##### Later: // - do "iFramesOnly" if requested // Complete delivery to the client: fFrameSize = frameSize; fNumTruncatedBytes = numTruncatedBytes; fPresentationTime = presentationTime; fDurationInMicroseconds = durationInMicroseconds; afterGetting(this); } live/liveMedia/AC3AudioStreamFramer.cpp000444 001751 000000 00000024562 12265042432 020214 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A filter that breaks up an AC3 audio elementary stream into frames // Implementation #include "AC3AudioStreamFramer.hh" #include "StreamParser.hh" #include ////////// AC3AudioStreamParser definition ////////// class AC3FrameParams { public: AC3FrameParams() : samplingFreq(0) {} // 8-byte header at the start of each frame: // u_int32_t hdr0, hdr1; unsigned hdr0, hdr1; // parameters derived from the headers unsigned kbps, samplingFreq, frameSize; void setParamsFromHeader(); }; class AC3AudioStreamParser: public StreamParser { public: AC3AudioStreamParser(AC3AudioStreamFramer* usingSource, FramedSource* inputSource); virtual ~AC3AudioStreamParser(); public: void testStreamCode(unsigned char ourStreamCode, unsigned char* ptr, unsigned size); unsigned parseFrame(unsigned& numTruncatedBytes); // returns the size of the frame that was acquired, or 0 if none was void registerReadInterest(unsigned char* to, unsigned maxSize); AC3FrameParams const& currentFrame() const { return fCurrentFrame; } Boolean haveParsedAFrame() const { return fHaveParsedAFrame; } void readAndSaveAFrame(); private: static void afterGettingSavedFrame(void* clientData, unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime, unsigned durationInMicroseconds); void afterGettingSavedFrame1(unsigned frameSize); static void onSavedFrameClosure(void* clientData); void onSavedFrameClosure1(); private: AC3AudioStreamFramer* fUsingSource; unsigned char* fTo; unsigned fMaxSize; Boolean fHaveParsedAFrame; unsigned char* fSavedFrame; unsigned fSavedFrameSize; char fSavedFrameFlag; // Parameters of the most recently read frame: AC3FrameParams fCurrentFrame; }; ////////// AC3AudioStreamFramer implementation ////////// AC3AudioStreamFramer::AC3AudioStreamFramer(UsageEnvironment& env, FramedSource* inputSource, unsigned char streamCode) : FramedFilter(env, inputSource), fOurStreamCode(streamCode) { // Use the current wallclock time as the initial 'presentation time': gettimeofday(&fNextFramePresentationTime, NULL); fParser = new AC3AudioStreamParser(this, inputSource); } AC3AudioStreamFramer::~AC3AudioStreamFramer() { delete fParser; } AC3AudioStreamFramer* AC3AudioStreamFramer::createNew(UsageEnvironment& env, FramedSource* inputSource, unsigned char streamCode) { // Need to add source type checking here??? ##### return new AC3AudioStreamFramer(env, inputSource, streamCode); } unsigned AC3AudioStreamFramer::samplingRate() { if (!fParser->haveParsedAFrame()) { // Because we haven't yet parsed a frame, we don't yet know the input // stream's sampling rate. So, we first need to read a frame // (into a special buffer that we keep around for later use). fParser->readAndSaveAFrame(); } return fParser->currentFrame().samplingFreq; } void AC3AudioStreamFramer::flushInput() { fParser->flushInput(); } void AC3AudioStreamFramer::doGetNextFrame() { fParser->registerReadInterest(fTo, fMaxSize); parseNextFrame(); } #define MILLION 1000000 struct timeval AC3AudioStreamFramer::currentFramePlayTime() const { AC3FrameParams const& fr = fParser->currentFrame(); unsigned const numSamples = 1536; unsigned const freq = fr.samplingFreq; // result is numSamples/freq unsigned const uSeconds = (freq == 0) ? 0 : ((numSamples*2*MILLION)/freq + 1)/2; // rounds to nearest integer struct timeval result; result.tv_sec = uSeconds/MILLION; result.tv_usec = uSeconds%MILLION; return result; } void AC3AudioStreamFramer ::handleNewData(void* clientData, unsigned char* ptr, unsigned size, struct timeval /*presentationTime*/) { AC3AudioStreamFramer* framer = (AC3AudioStreamFramer*)clientData; framer->handleNewData(ptr, size); } void AC3AudioStreamFramer ::handleNewData(unsigned char* ptr, unsigned size) { fParser->testStreamCode(fOurStreamCode, ptr, size); parseNextFrame(); } void AC3AudioStreamFramer::parseNextFrame() { unsigned acquiredFrameSize = fParser->parseFrame(fNumTruncatedBytes); if (acquiredFrameSize > 0) { // We were able to acquire a frame from the input. // It has already been copied to the reader's space. fFrameSize = acquiredFrameSize; // Also set the presentation time, and increment it for next time, // based on the length of this frame: fPresentationTime = fNextFramePresentationTime; struct timeval framePlayTime = currentFramePlayTime(); fDurationInMicroseconds = framePlayTime.tv_sec*MILLION + framePlayTime.tv_usec; fNextFramePresentationTime.tv_usec += framePlayTime.tv_usec; fNextFramePresentationTime.tv_sec += framePlayTime.tv_sec + fNextFramePresentationTime.tv_usec/MILLION; fNextFramePresentationTime.tv_usec %= MILLION; // Call our own 'after getting' function. Because we're not a 'leaf' // source, we can call this directly, without risking infinite recursion. afterGetting(this); } else { // We were unable to parse a complete frame from the input, because: // - we had to read more data from the source stream, or // - the source stream has ended. } } ////////// AC3AudioStreamParser implementation ////////// static int const kbpsTable[] = {32, 40, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224, 256, 320, 384, 448, 512, 576, 640}; void AC3FrameParams::setParamsFromHeader() { unsigned char byte4 = hdr1 >> 24; unsigned char kbpsIndex = (byte4&0x3E) >> 1; if (kbpsIndex > 18) kbpsIndex = 18; kbps = kbpsTable[kbpsIndex]; unsigned char samplingFreqIndex = (byte4&0xC0) >> 6; switch (samplingFreqIndex) { case 0: samplingFreq = 48000; frameSize = 4*kbps; break; case 1: samplingFreq = 44100; frameSize = 2*(320*kbps/147 + (byte4&1)); break; case 2: case 3: // not legal? samplingFreq = 32000; frameSize = 6*kbps; } } AC3AudioStreamParser ::AC3AudioStreamParser(AC3AudioStreamFramer* usingSource, FramedSource* inputSource) : StreamParser(inputSource, FramedSource::handleClosure, usingSource, &AC3AudioStreamFramer::handleNewData, usingSource), fUsingSource(usingSource), fHaveParsedAFrame(False), fSavedFrame(NULL), fSavedFrameSize(0) { } AC3AudioStreamParser::~AC3AudioStreamParser() { } void AC3AudioStreamParser::registerReadInterest(unsigned char* to, unsigned maxSize) { fTo = to; fMaxSize = maxSize; } void AC3AudioStreamParser ::testStreamCode(unsigned char ourStreamCode, unsigned char* ptr, unsigned size) { if (ourStreamCode == 0) return; // we assume that there's no stream code at the beginning of the data if (size < 4) return; unsigned char streamCode = *ptr; if (streamCode == ourStreamCode) { // Remove the first 4 bytes from the stream: memmove(ptr, ptr + 4, size - 4); totNumValidBytes() = totNumValidBytes() - 4; } else { // Discard all of the data that was just read: totNumValidBytes() = totNumValidBytes() - size; } } unsigned AC3AudioStreamParser::parseFrame(unsigned& numTruncatedBytes) { if (fSavedFrameSize > 0) { // We've already read and parsed a frame. Use it instead: memmove(fTo, fSavedFrame, fSavedFrameSize); delete[] fSavedFrame; fSavedFrame = NULL; unsigned frameSize = fSavedFrameSize; fSavedFrameSize = 0; return frameSize; } try { saveParserState(); // We expect an AC3 audio header (first 2 bytes == 0x0B77) at the start: while (1) { unsigned next4Bytes = test4Bytes(); if (next4Bytes>>16 == 0x0B77) break; skipBytes(1); saveParserState(); } fCurrentFrame.hdr0 = get4Bytes(); fCurrentFrame.hdr1 = test4Bytes(); fCurrentFrame.setParamsFromHeader(); fHaveParsedAFrame = True; // Copy the frame to the requested destination: unsigned frameSize = fCurrentFrame.frameSize; if (frameSize > fMaxSize) { numTruncatedBytes = frameSize - fMaxSize; frameSize = fMaxSize; } else { numTruncatedBytes = 0; } fTo[0] = fCurrentFrame.hdr0 >> 24; fTo[1] = fCurrentFrame.hdr0 >> 16; fTo[2] = fCurrentFrame.hdr0 >> 8; fTo[3] = fCurrentFrame.hdr0; getBytes(&fTo[4], frameSize-4); skipBytes(numTruncatedBytes); return frameSize; } catch (int /*e*/) { #ifdef DEBUG fUsingSource->envir() << "AC3AudioStreamParser::parseFrame() EXCEPTION (This is normal behavior - *not* an error)\n"; #endif return 0; // the parsing got interrupted } } void AC3AudioStreamParser::readAndSaveAFrame() { unsigned const maxAC3FrameSize = 4000; fSavedFrame = new unsigned char[maxAC3FrameSize]; fSavedFrameSize = 0; fSavedFrameFlag = 0; fUsingSource->getNextFrame(fSavedFrame, maxAC3FrameSize, afterGettingSavedFrame, this, onSavedFrameClosure, this); fUsingSource->envir().taskScheduler().doEventLoop(&fSavedFrameFlag); } void AC3AudioStreamParser ::afterGettingSavedFrame(void* clientData, unsigned frameSize, unsigned /*numTruncatedBytes*/, struct timeval /*presentationTime*/, unsigned /*durationInMicroseconds*/) { AC3AudioStreamParser* parser = (AC3AudioStreamParser*)clientData; parser->afterGettingSavedFrame1(frameSize); } void AC3AudioStreamParser ::afterGettingSavedFrame1(unsigned frameSize) { fSavedFrameSize = frameSize; fSavedFrameFlag = ~0; } void AC3AudioStreamParser::onSavedFrameClosure(void* clientData) { AC3AudioStreamParser* parser = (AC3AudioStreamParser*)clientData; parser->onSavedFrameClosure1(); } void AC3AudioStreamParser::onSavedFrameClosure1() { delete[] fSavedFrame; fSavedFrame = NULL; fSavedFrameSize = 0; fSavedFrameFlag = ~0; } live/liveMedia/DarwinInjector.cpp000444 001751 000000 00000030505 12265042432 017267 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // An object that redirects one or more RTP/RTCP streams - forming a single // multimedia session - into a 'Darwin Streaming Server' (for subsequent // reflection to potentially arbitrarily many remote RTSP clients). // Implementation #include "DarwinInjector.hh" #include ////////// SubstreamDescriptor definition ////////// class SubstreamDescriptor { public: SubstreamDescriptor(RTPSink* rtpSink, RTCPInstance* rtcpInstance, unsigned trackId); ~SubstreamDescriptor(); SubstreamDescriptor*& next() { return fNext; } RTPSink* rtpSink() const { return fRTPSink; } RTCPInstance* rtcpInstance() const { return fRTCPInstance; } char const* sdpLines() const { return fSDPLines; } private: SubstreamDescriptor* fNext; RTPSink* fRTPSink; RTCPInstance* fRTCPInstance; char* fSDPLines; }; ////////// DarwinInjector implementation ////////// DarwinInjector* DarwinInjector::createNew(UsageEnvironment& env, char const* applicationName, int verbosityLevel) { return new DarwinInjector(env, applicationName, verbosityLevel); } Boolean DarwinInjector::lookupByName(UsageEnvironment& env, char const* name, DarwinInjector*& result) { result = NULL; // unless we succeed Medium* medium; if (!Medium::lookupByName(env, name, medium)) return False; if (!medium->isDarwinInjector()) { env.setResultMsg(name, " is not a 'Darwin injector'"); return False; } result = (DarwinInjector*)medium; return True; } DarwinInjector::DarwinInjector(UsageEnvironment& env, char const* applicationName, int verbosityLevel) : Medium(env), fApplicationName(strDup(applicationName)), fVerbosityLevel(verbosityLevel), fRTSPClient(NULL), fSubstreamSDPSizes(0), fHeadSubstream(NULL), fTailSubstream(NULL), fSession(NULL), fLastTrackId(0), fResultString(NULL) { } DarwinInjector::~DarwinInjector() { if (fSession != NULL) { // close down and delete the session fRTSPClient->sendTeardownCommand(*fSession, NULL); Medium::close(fSession); } delete fHeadSubstream; delete[] (char*)fApplicationName; Medium::close(fRTSPClient); } void DarwinInjector::addStream(RTPSink* rtpSink, RTCPInstance* rtcpInstance) { if (rtpSink == NULL) return; // "rtpSink" should be non-NULL SubstreamDescriptor* newDescriptor = new SubstreamDescriptor(rtpSink, rtcpInstance, ++fLastTrackId); if (fHeadSubstream == NULL) { fHeadSubstream = fTailSubstream = newDescriptor; } else { fTailSubstream->next() = newDescriptor; fTailSubstream = newDescriptor; } fSubstreamSDPSizes += strlen(newDescriptor->sdpLines()); } // Define a special subclass of "RTSPClient" that has a pointer field to a "DarwinInjector". We'll use this to implement RTSP ops: class RTSPClientForDarwinInjector: public RTSPClient { public: RTSPClientForDarwinInjector(UsageEnvironment& env, char const* rtspURL, int verbosityLevel, char const* applicationName, DarwinInjector* ourDarwinInjector) : RTSPClient(env, rtspURL, verbosityLevel, applicationName, 0, -1), fOurDarwinInjector(ourDarwinInjector) {} virtual ~RTSPClientForDarwinInjector() {} DarwinInjector* fOurDarwinInjector; }; Boolean DarwinInjector ::setDestination(char const* remoteRTSPServerNameOrAddress, char const* remoteFileName, char const* sessionName, char const* sessionInfo, portNumBits remoteRTSPServerPortNumber, char const* remoteUserName, char const* remotePassword, char const* sessionAuthor, char const* sessionCopyright, int timeout) { char* sdp = NULL; char* url = NULL; Boolean success = False; // until we learn otherwise do { // Construct a RTSP URL for the remote stream: char const* const urlFmt = "rtsp://%s:%u/%s"; unsigned urlLen = strlen(urlFmt) + strlen(remoteRTSPServerNameOrAddress) + 5 /* max short len */ + strlen(remoteFileName); url = new char[urlLen]; sprintf(url, urlFmt, remoteRTSPServerNameOrAddress, remoteRTSPServerPortNumber, remoteFileName); // Begin by creating our RTSP client object: fRTSPClient = new RTSPClientForDarwinInjector(envir(), url, fVerbosityLevel, fApplicationName, this); if (fRTSPClient == NULL) break; // Get the remote RTSP server's IP address: struct in_addr addr; { NetAddressList addresses(remoteRTSPServerNameOrAddress); if (addresses.numAddresses() == 0) break; NetAddress const* address = addresses.firstAddress(); addr.s_addr = *(unsigned*)(address->data()); } AddressString remoteRTSPServerAddressStr(addr); // Construct a SDP description for the session that we'll be streaming: char const* const sdpFmt = "v=0\r\n" "o=- %u %u IN IP4 127.0.0.1\r\n" "s=%s\r\n" "i=%s\r\n" "c=IN IP4 %s\r\n" "t=0 0\r\n" "a=x-qt-text-nam:%s\r\n" "a=x-qt-text-inf:%s\r\n" "a=x-qt-text-cmt:source application:%s\r\n" "a=x-qt-text-aut:%s\r\n" "a=x-qt-text-cpy:%s\r\n"; // plus, %s for each substream SDP unsigned sdpLen = strlen(sdpFmt) + 20 /* max int len */ + 20 /* max int len */ + strlen(sessionName) + strlen(sessionInfo) + strlen(remoteRTSPServerAddressStr.val()) + strlen(sessionName) + strlen(sessionInfo) + strlen(fApplicationName) + strlen(sessionAuthor) + strlen(sessionCopyright) + fSubstreamSDPSizes; unsigned const sdpSessionId = our_random32(); unsigned const sdpVersion = sdpSessionId; sdp = new char[sdpLen]; sprintf(sdp, sdpFmt, sdpSessionId, sdpVersion, // o= line sessionName, // s= line sessionInfo, // i= line remoteRTSPServerAddressStr.val(), // c= line sessionName, // a=x-qt-text-nam: line sessionInfo, // a=x-qt-text-inf: line fApplicationName, // a=x-qt-text-cmt: line sessionAuthor, // a=x-qt-text-aut: line sessionCopyright // a=x-qt-text-cpy: line ); char* p = &sdp[strlen(sdp)]; SubstreamDescriptor* ss; for (ss = fHeadSubstream; ss != NULL; ss = ss->next()) { sprintf(p, "%s", ss->sdpLines()); p += strlen(p); } // Do a RTSP "ANNOUNCE" with this SDP description: Authenticator auth; Authenticator* authToUse = NULL; if (remoteUserName[0] != '\0' || remotePassword[0] != '\0') { auth.setUsernameAndPassword(remoteUserName, remotePassword); authToUse = &auth; } fWatchVariable = 0; (void)fRTSPClient->sendAnnounceCommand(sdp, genericResponseHandler, authToUse); // Now block (but handling events) until we get a response: envir().taskScheduler().doEventLoop(&fWatchVariable); delete[] fResultString; if (fResultCode != 0) break; // an error occurred with the RTSP "ANNOUNCE" command // Next, tell the remote server to start receiving the stream from us. // (To do this, we first create a "MediaSession" object from the SDP description.) fSession = MediaSession::createNew(envir(), sdp); if (fSession == NULL) break; ss = fHeadSubstream; MediaSubsessionIterator iter(*fSession); MediaSubsession* subsession; ss = fHeadSubstream; unsigned streamChannelId = 0; while ((subsession = iter.next()) != NULL) { if (!subsession->initiate()) break; fWatchVariable = 0; (void)fRTSPClient->sendSetupCommand(*subsession, genericResponseHandler, True /*streamOutgoing*/, True /*streamUsingTCP*/); // Now block (but handling events) until we get a response: envir().taskScheduler().doEventLoop(&fWatchVariable); delete[] fResultString; if (fResultCode != 0) break; // an error occurred with the RTSP "SETUP" command // Tell this subsession's RTPSink and RTCPInstance to use // the RTSP TCP connection: ss->rtpSink()->setStreamSocket(fRTSPClient->socketNum(), streamChannelId++); if (ss->rtcpInstance() != NULL) { ss->rtcpInstance()->setStreamSocket(fRTSPClient->socketNum(), streamChannelId++); } ss = ss->next(); } if (subsession != NULL) break; // an error occurred above // Tell the RTSP server to start: fWatchVariable = 0; (void)fRTSPClient->sendPlayCommand(*fSession, genericResponseHandler); // Now block (but handling events) until we get a response: envir().taskScheduler().doEventLoop(&fWatchVariable); delete[] fResultString; if (fResultCode != 0) break; // an error occurred with the RTSP "PLAY" command // Finally, make sure that the output TCP buffer is a reasonable size: increaseSendBufferTo(envir(), fRTSPClient->socketNum(), 100*1024); success = True; } while (0); delete[] sdp; delete[] url; return success; } Boolean DarwinInjector::isDarwinInjector() const { return True; } void DarwinInjector::genericResponseHandler(RTSPClient* rtspClient, int responseCode, char* responseString) { DarwinInjector* di = ((RTSPClientForDarwinInjector*)rtspClient)-> fOurDarwinInjector; di->genericResponseHandler1(responseCode, responseString); } void DarwinInjector::genericResponseHandler1(int responseCode, char* responseString) { // Set result values: fResultCode = responseCode; fResultString = responseString; // Signal a break from the event loop (thereby returning from the blocking command): fWatchVariable = ~0; } ////////// SubstreamDescriptor implementation ////////// SubstreamDescriptor::SubstreamDescriptor(RTPSink* rtpSink, RTCPInstance* rtcpInstance, unsigned trackId) : fNext(NULL), fRTPSink(rtpSink), fRTCPInstance(rtcpInstance) { // Create the SDP description for this substream char const* mediaType = fRTPSink->sdpMediaType(); unsigned char rtpPayloadType = fRTPSink->rtpPayloadType(); char const* rtpPayloadFormatName = fRTPSink->rtpPayloadFormatName(); unsigned rtpTimestampFrequency = fRTPSink->rtpTimestampFrequency(); unsigned numChannels = fRTPSink->numChannels(); char* rtpmapLine; if (rtpPayloadType >= 96) { char* encodingParamsPart; if (numChannels != 1) { encodingParamsPart = new char[1 + 20 /* max int len */]; sprintf(encodingParamsPart, "/%d", numChannels); } else { encodingParamsPart = strDup(""); } char const* const rtpmapFmt = "a=rtpmap:%d %s/%d%s\r\n"; unsigned rtpmapFmtSize = strlen(rtpmapFmt) + 3 /* max char len */ + strlen(rtpPayloadFormatName) + 20 /* max int len */ + strlen(encodingParamsPart); rtpmapLine = new char[rtpmapFmtSize]; sprintf(rtpmapLine, rtpmapFmt, rtpPayloadType, rtpPayloadFormatName, rtpTimestampFrequency, encodingParamsPart); delete[] encodingParamsPart; } else { // Static payload type => no "a=rtpmap:" line rtpmapLine = strDup(""); } unsigned rtpmapLineSize = strlen(rtpmapLine); char const* auxSDPLine = fRTPSink->auxSDPLine(); if (auxSDPLine == NULL) auxSDPLine = ""; unsigned auxSDPLineSize = strlen(auxSDPLine); char const* const sdpFmt = "m=%s 0 RTP/AVP %u\r\n" "%s" // "a=rtpmap:" line (if present) "%s" // auxilliary (e.g., "a=fmtp:") line (if present) "a=control:trackID=%u\r\n"; unsigned sdpFmtSize = strlen(sdpFmt) + strlen(mediaType) + 3 /* max char len */ + rtpmapLineSize + auxSDPLineSize + 20 /* max int len */; char* sdpLines = new char[sdpFmtSize]; sprintf(sdpLines, sdpFmt, mediaType, // m= rtpPayloadType, // m= rtpmapLine, // a=rtpmap:... (if present) auxSDPLine, // optional extra SDP line trackId); // a=control: fSDPLines = strDup(sdpLines); delete[] sdpLines; delete[] rtpmapLine; } SubstreamDescriptor::~SubstreamDescriptor() { delete fSDPLines; delete fNext; } live/liveMedia/MP3AudioFileServerMediaSubsession.cpp000444 001751 000000 00000015431 12265042432 022734 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A 'ServerMediaSubsession' object that creates new, unicast, "RTPSink"s // on demand, from a MP3 audio file. // (Actually, any MPEG-1 or MPEG-2 audio file should work.) // Implementation #include "MP3AudioFileServerMediaSubsession.hh" #include "MPEG1or2AudioRTPSink.hh" #include "MP3ADURTPSink.hh" #include "MP3FileSource.hh" #include "MP3ADU.hh" MP3AudioFileServerMediaSubsession* MP3AudioFileServerMediaSubsession ::createNew(UsageEnvironment& env, char const* fileName, Boolean reuseFirstSource, Boolean generateADUs, Interleaving* interleaving) { return new MP3AudioFileServerMediaSubsession(env, fileName, reuseFirstSource, generateADUs, interleaving); } MP3AudioFileServerMediaSubsession ::MP3AudioFileServerMediaSubsession(UsageEnvironment& env, char const* fileName, Boolean reuseFirstSource, Boolean generateADUs, Interleaving* interleaving) : FileServerMediaSubsession(env, fileName, reuseFirstSource), fGenerateADUs(generateADUs), fInterleaving(interleaving), fFileDuration(0.0) { } MP3AudioFileServerMediaSubsession ::~MP3AudioFileServerMediaSubsession() { delete fInterleaving; } FramedSource* MP3AudioFileServerMediaSubsession ::createNewStreamSourceCommon(FramedSource* baseMP3Source, unsigned mp3NumBytes, unsigned& estBitrate) { FramedSource* streamSource; do { streamSource = baseMP3Source; // by default if (streamSource == NULL) break; // Use the MP3 file size, plus the duration, to estimate the stream's bitrate: if (mp3NumBytes > 0 && fFileDuration > 0.0) { estBitrate = (unsigned)(mp3NumBytes/(125*fFileDuration) + 0.5); // kbps, rounded } else { estBitrate = 128; // kbps, estimate } if (fGenerateADUs) { // Add a filter that converts the source MP3s to ADUs: streamSource = ADUFromMP3Source::createNew(envir(), streamSource); if (streamSource == NULL) break; if (fInterleaving != NULL) { // Add another filter that interleaves the ADUs before packetizing: streamSource = MP3ADUinterleaver::createNew(envir(), *fInterleaving, streamSource); if (streamSource == NULL) break; } } else if (fFileDuration > 0.0) { // Because this is a seekable file, insert a pair of filters: one that // converts the input MP3 stream to ADUs; another that converts these // ADUs back to MP3. This allows us to seek within the input stream without // tripping over the MP3 'bit reservoir': streamSource = ADUFromMP3Source::createNew(envir(), streamSource); if (streamSource == NULL) break; streamSource = MP3FromADUSource::createNew(envir(), streamSource); if (streamSource == NULL) break; } } while (0); return streamSource; } void MP3AudioFileServerMediaSubsession::getBaseStreams(FramedSource* frontStream, FramedSource*& sourceMP3Stream, ADUFromMP3Source*& aduStream/*if any*/) { if (fGenerateADUs) { // There's an ADU stream. if (fInterleaving != NULL) { // There's an interleaving filter in front of the ADU stream. So go back one, to reach the ADU stream: aduStream = (ADUFromMP3Source*)(((FramedFilter*)frontStream)->inputSource()); } else { aduStream = (ADUFromMP3Source*)frontStream; } // Then, go back one more, to reach the MP3 source: sourceMP3Stream = (MP3FileSource*)(aduStream->inputSource()); } else if (fFileDuration > 0.0) { // There are a pair of filters - MP3->ADU and ADU->MP3 - in front of the // original MP3 source. So, go back one, to reach the ADU source: aduStream = (ADUFromMP3Source*)(((FramedFilter*)frontStream)->inputSource()); // Then, go back one more, to reach the MP3 source: sourceMP3Stream = (MP3FileSource*)(aduStream->inputSource()); } else { // There's no filter in front of the source MP3 stream (and there's no ADU stream): aduStream = NULL; sourceMP3Stream = frontStream; } } void MP3AudioFileServerMediaSubsession ::seekStreamSource(FramedSource* inputSource, double& seekNPT, double streamDuration, u_int64_t& /*numBytes*/) { FramedSource* sourceMP3Stream; ADUFromMP3Source* aduStream; getBaseStreams(inputSource, sourceMP3Stream, aduStream); if (aduStream != NULL) aduStream->resetInput(); // because we're about to seek within its source ((MP3FileSource*)sourceMP3Stream)->seekWithinFile(seekNPT, streamDuration); } void MP3AudioFileServerMediaSubsession ::setStreamSourceScale(FramedSource* inputSource, float scale) { FramedSource* sourceMP3Stream; ADUFromMP3Source* aduStream; getBaseStreams(inputSource, sourceMP3Stream, aduStream); if (aduStream == NULL) return; // because, in this case, the stream's not scalable int iScale = (int)scale; aduStream->setScaleFactor(iScale); ((MP3FileSource*)sourceMP3Stream)->setPresentationTimeScale(iScale); } FramedSource* MP3AudioFileServerMediaSubsession ::createNewStreamSource(unsigned /*clientSessionId*/, unsigned& estBitrate) { MP3FileSource* mp3Source = MP3FileSource::createNew(envir(), fFileName); if (mp3Source == NULL) return NULL; fFileDuration = mp3Source->filePlayTime(); return createNewStreamSourceCommon(mp3Source, mp3Source->fileSize(), estBitrate); } RTPSink* MP3AudioFileServerMediaSubsession ::createNewRTPSink(Groupsock* rtpGroupsock, unsigned char rtpPayloadTypeIfDynamic, FramedSource* /*inputSource*/) { if (fGenerateADUs) { return MP3ADURTPSink::createNew(envir(), rtpGroupsock, rtpPayloadTypeIfDynamic); } else { return MPEG1or2AudioRTPSink::createNew(envir(), rtpGroupsock); } } void MP3AudioFileServerMediaSubsession::testScaleFactor(float& scale) { if (fFileDuration <= 0.0) { // The file is non-seekable, so is probably a live input source. // We don't support scale factors other than 1 scale = 1; } else { // We support any integral scale >= 1 int iScale = (int)(scale + 0.5); // round if (iScale < 1) iScale = 1; scale = (float)iScale; } } float MP3AudioFileServerMediaSubsession::duration() const { return fFileDuration; } live/liveMedia/H264VideoRTPSink.cpp000444 001751 000000 00000011305 12265042432 017167 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // RTP sink for H.264 video (RFC 3984) // Implementation #include "H264VideoRTPSink.hh" #include "H264VideoStreamFramer.hh" #include "Base64.hh" #include "H264VideoRTPSource.hh" // for "parseSPropParameterSets()" ////////// H264VideoRTPSink implementation ////////// H264VideoRTPSink ::H264VideoRTPSink(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, u_int8_t const* sps, unsigned spsSize, u_int8_t const* pps, unsigned ppsSize) : H264or5VideoRTPSink(264, env, RTPgs, rtpPayloadFormat, NULL, 0, sps, spsSize, pps, ppsSize) { } H264VideoRTPSink::~H264VideoRTPSink() { } H264VideoRTPSink* H264VideoRTPSink ::createNew(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat) { return new H264VideoRTPSink(env, RTPgs, rtpPayloadFormat); } H264VideoRTPSink* H264VideoRTPSink ::createNew(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, u_int8_t const* sps, unsigned spsSize, u_int8_t const* pps, unsigned ppsSize) { return new H264VideoRTPSink(env, RTPgs, rtpPayloadFormat, sps, spsSize, pps, ppsSize); } H264VideoRTPSink* H264VideoRTPSink ::createNew(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, char const* sPropParameterSetsStr) { u_int8_t* sps = NULL; unsigned spsSize = 0; u_int8_t* pps = NULL; unsigned ppsSize = 0; unsigned numSPropRecords; SPropRecord* sPropRecords = parseSPropParameterSets(sPropParameterSetsStr, numSPropRecords); for (unsigned i = 0; i < numSPropRecords; ++i) { if (sPropRecords[i].sPropLength == 0) continue; // bad data u_int8_t nal_unit_type = (sPropRecords[i].sPropBytes[0])&0x1F; if (nal_unit_type == 7/*SPS*/) { sps = sPropRecords[i].sPropBytes; spsSize = sPropRecords[i].sPropLength; } else if (nal_unit_type == 8/*PPS*/) { pps = sPropRecords[i].sPropBytes; ppsSize = sPropRecords[i].sPropLength; } } H264VideoRTPSink* result = new H264VideoRTPSink(env, RTPgs, rtpPayloadFormat, sps, spsSize, pps, ppsSize); delete[] sPropRecords; return result; } Boolean H264VideoRTPSink::sourceIsCompatibleWithUs(MediaSource& source) { // Our source must be an appropriate framer: return source.isH264VideoStreamFramer(); } char const* H264VideoRTPSink::auxSDPLine() { // Generate a new "a=fmtp:" line each time, using our SPS and PPS (if we have them), // otherwise parameters from our framer source (in case they've changed since the last time that // we were called): H264or5VideoStreamFramer* framerSource = NULL; u_int8_t* vpsDummy = NULL; unsigned vpsDummySize = 0; u_int8_t* sps = fSPS; unsigned spsSize = fSPSSize; u_int8_t* pps = fPPS; unsigned ppsSize = fPPSSize; if (sps == NULL || pps == NULL) { // We need to get SPS and PPS from our framer source: if (fOurFragmenter == NULL) return NULL; // we don't yet have a fragmenter (and therefore not a source) framerSource = (H264or5VideoStreamFramer*)(fOurFragmenter->inputSource()); if (framerSource == NULL) return NULL; // we don't yet have a source framerSource->getVPSandSPSandPPS(vpsDummy, vpsDummySize, sps, spsSize, pps, ppsSize); if (sps == NULL || pps == NULL) return NULL; // our source isn't ready } // Set up the "a=fmtp:" SDP line for this stream: char* sps_base64 = base64Encode((char*)sps, spsSize); char* pps_base64 = base64Encode((char*)pps, ppsSize); char const* fmtpFmt = "a=fmtp:%d packetization-mode=1" ";profile-level-id=%06X" ";sprop-parameter-sets=%s,%s\r\n"; unsigned fmtpFmtSize = strlen(fmtpFmt) + 3 /* max char len */ + 6 /* 3 bytes in hex */ + strlen(sps_base64) + strlen(pps_base64); char* fmtp = new char[fmtpFmtSize]; sprintf(fmtp, fmtpFmt, rtpPayloadType(), framerSource->profileLevelId(), sps_base64, pps_base64); delete[] sps_base64; delete[] pps_base64; delete[] fFmtpSDPLine; fFmtpSDPLine = fmtp; return fFmtpSDPLine; } live/liveMedia/AVIFileSink.cpp000444 001751 000000 00000065614 12265042432 016422 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A sink that generates an AVI file from a composite media session // Implementation #include "AVIFileSink.hh" #include "InputFile.hh" #include "OutputFile.hh" #include "GroupsockHelper.hh" #define fourChar(x,y,z,w) ( ((w)<<24)|((z)<<16)|((y)<<8)|(x) )/*little-endian*/ ////////// AVISubsessionIOState /////////// // A structure used to represent the I/O state of each input 'subsession': class SubsessionBuffer { public: SubsessionBuffer(unsigned bufferSize) : fBufferSize(bufferSize) { reset(); fData = new unsigned char[bufferSize]; } virtual ~SubsessionBuffer() { delete[] fData; } void reset() { fBytesInUse = 0; } void addBytes(unsigned numBytes) { fBytesInUse += numBytes; } unsigned char* dataStart() { return &fData[0]; } unsigned char* dataEnd() { return &fData[fBytesInUse]; } unsigned bytesInUse() const { return fBytesInUse; } unsigned bytesAvailable() const { return fBufferSize - fBytesInUse; } void setPresentationTime(struct timeval const& presentationTime) { fPresentationTime = presentationTime; } struct timeval const& presentationTime() const {return fPresentationTime;} private: unsigned fBufferSize; struct timeval fPresentationTime; unsigned char* fData; unsigned fBytesInUse; }; class AVISubsessionIOState { public: AVISubsessionIOState(AVIFileSink& sink, MediaSubsession& subsession); virtual ~AVISubsessionIOState(); void setAVIstate(unsigned subsessionIndex); void setFinalAVIstate(); void afterGettingFrame(unsigned packetDataSize, struct timeval presentationTime); void onSourceClosure(); UsageEnvironment& envir() const { return fOurSink.envir(); } public: SubsessionBuffer *fBuffer, *fPrevBuffer; AVIFileSink& fOurSink; MediaSubsession& fOurSubsession; unsigned short fLastPacketRTPSeqNum; Boolean fOurSourceIsActive; struct timeval fPrevPresentationTime; unsigned fMaxBytesPerSecond; Boolean fIsVideo, fIsAudio, fIsByteSwappedAudio; unsigned fAVISubsessionTag; unsigned fAVICodecHandlerType; unsigned fAVISamplingFrequency; // for audio u_int16_t fWAVCodecTag; // for audio unsigned fAVIScale; unsigned fAVIRate; unsigned fAVISize; unsigned fNumFrames; unsigned fSTRHFrameCountPosition; private: void useFrame(SubsessionBuffer& buffer); }; ///////// AVIIndexRecord definition & implementation ////////// class AVIIndexRecord { public: AVIIndexRecord(unsigned chunkId, unsigned flags, unsigned offset, unsigned size) : fNext(NULL), fChunkId(chunkId), fFlags(flags), fOffset(offset), fSize(size) { } AVIIndexRecord*& next() { return fNext; } unsigned chunkId() const { return fChunkId; } unsigned flags() const { return fFlags; } unsigned offset() const { return fOffset; } unsigned size() const { return fSize; } private: AVIIndexRecord* fNext; unsigned fChunkId; unsigned fFlags; unsigned fOffset; unsigned fSize; }; ////////// AVIFileSink implementation ////////// AVIFileSink::AVIFileSink(UsageEnvironment& env, MediaSession& inputSession, char const* outputFileName, unsigned bufferSize, unsigned short movieWidth, unsigned short movieHeight, unsigned movieFPS, Boolean packetLossCompensate) : Medium(env), fInputSession(inputSession), fIndexRecordsHead(NULL), fIndexRecordsTail(NULL), fNumIndexRecords(0), fBufferSize(bufferSize), fPacketLossCompensate(packetLossCompensate), fAreCurrentlyBeingPlayed(False), fNumSubsessions(0), fNumBytesWritten(0), fHaveCompletedOutputFile(False), fMovieWidth(movieWidth), fMovieHeight(movieHeight), fMovieFPS(movieFPS) { fOutFid = OpenOutputFile(env, outputFileName); if (fOutFid == NULL) return; // Set up I/O state for each input subsession: MediaSubsessionIterator iter(fInputSession); MediaSubsession* subsession; while ((subsession = iter.next()) != NULL) { // Ignore subsessions without a data source: FramedSource* subsessionSource = subsession->readSource(); if (subsessionSource == NULL) continue; // If "subsession's" SDP description specified screen dimension // or frame rate parameters, then use these. if (subsession->videoWidth() != 0) { fMovieWidth = subsession->videoWidth(); } if (subsession->videoHeight() != 0) { fMovieHeight = subsession->videoHeight(); } if (subsession->videoFPS() != 0) { fMovieFPS = subsession->videoFPS(); } AVISubsessionIOState* ioState = new AVISubsessionIOState(*this, *subsession); subsession->miscPtr = (void*)ioState; // Also set a 'BYE' handler for this subsession's RTCP instance: if (subsession->rtcpInstance() != NULL) { subsession->rtcpInstance()->setByeHandler(onRTCPBye, ioState); } ++fNumSubsessions; } // Begin by writing an AVI header: addFileHeader_AVI(); } AVIFileSink::~AVIFileSink() { completeOutputFile(); // Then, stop streaming and delete each active "AVISubsessionIOState": MediaSubsessionIterator iter(fInputSession); MediaSubsession* subsession; while ((subsession = iter.next()) != NULL) { subsession->readSource()->stopGettingFrames(); AVISubsessionIOState* ioState = (AVISubsessionIOState*)(subsession->miscPtr); if (ioState == NULL) continue; delete ioState; } // Then, delete the index records: AVIIndexRecord* cur = fIndexRecordsHead; while (cur != NULL) { AVIIndexRecord* next = cur->next(); delete cur; cur = next; } // Finally, close our output file: CloseOutputFile(fOutFid); } AVIFileSink* AVIFileSink ::createNew(UsageEnvironment& env, MediaSession& inputSession, char const* outputFileName, unsigned bufferSize, unsigned short movieWidth, unsigned short movieHeight, unsigned movieFPS, Boolean packetLossCompensate) { AVIFileSink* newSink = new AVIFileSink(env, inputSession, outputFileName, bufferSize, movieWidth, movieHeight, movieFPS, packetLossCompensate); if (newSink == NULL || newSink->fOutFid == NULL) { Medium::close(newSink); return NULL; } return newSink; } Boolean AVIFileSink::startPlaying(afterPlayingFunc* afterFunc, void* afterClientData) { // Make sure we're not already being played: if (fAreCurrentlyBeingPlayed) { envir().setResultMsg("This sink has already been played"); return False; } fAreCurrentlyBeingPlayed = True; fAfterFunc = afterFunc; fAfterClientData = afterClientData; return continuePlaying(); } Boolean AVIFileSink::continuePlaying() { // Run through each of our input session's 'subsessions', // asking for a frame from each one: Boolean haveActiveSubsessions = False; MediaSubsessionIterator iter(fInputSession); MediaSubsession* subsession; while ((subsession = iter.next()) != NULL) { FramedSource* subsessionSource = subsession->readSource(); if (subsessionSource == NULL) continue; if (subsessionSource->isCurrentlyAwaitingData()) continue; AVISubsessionIOState* ioState = (AVISubsessionIOState*)(subsession->miscPtr); if (ioState == NULL) continue; haveActiveSubsessions = True; unsigned char* toPtr = ioState->fBuffer->dataEnd(); unsigned toSize = ioState->fBuffer->bytesAvailable(); subsessionSource->getNextFrame(toPtr, toSize, afterGettingFrame, ioState, onSourceClosure, ioState); } if (!haveActiveSubsessions) { envir().setResultMsg("No subsessions are currently active"); return False; } return True; } void AVIFileSink ::afterGettingFrame(void* clientData, unsigned packetDataSize, unsigned numTruncatedBytes, struct timeval presentationTime, unsigned /*durationInMicroseconds*/) { AVISubsessionIOState* ioState = (AVISubsessionIOState*)clientData; if (numTruncatedBytes > 0) { ioState->envir() << "AVIFileSink::afterGettingFrame(): The input frame data was too large for our buffer. " << numTruncatedBytes << " bytes of trailing data was dropped! Correct this by increasing the \"bufferSize\" parameter in the \"createNew()\" call.\n"; } ioState->afterGettingFrame(packetDataSize, presentationTime); } void AVIFileSink::onSourceClosure(void* clientData) { AVISubsessionIOState* ioState = (AVISubsessionIOState*)clientData; ioState->onSourceClosure(); } void AVIFileSink::onSourceClosure1() { // Check whether *all* of the subsession sources have closed. // If not, do nothing for now: MediaSubsessionIterator iter(fInputSession); MediaSubsession* subsession; while ((subsession = iter.next()) != NULL) { AVISubsessionIOState* ioState = (AVISubsessionIOState*)(subsession->miscPtr); if (ioState == NULL) continue; if (ioState->fOurSourceIsActive) return; // this source hasn't closed } completeOutputFile(); // Call our specified 'after' function: if (fAfterFunc != NULL) { (*fAfterFunc)(fAfterClientData); } } void AVIFileSink::onRTCPBye(void* clientData) { AVISubsessionIOState* ioState = (AVISubsessionIOState*)clientData; struct timeval timeNow; gettimeofday(&timeNow, NULL); unsigned secsDiff = timeNow.tv_sec - ioState->fOurSink.fStartTime.tv_sec; MediaSubsession& subsession = ioState->fOurSubsession; ioState->envir() << "Received RTCP \"BYE\" on \"" << subsession.mediumName() << "/" << subsession.codecName() << "\" subsession (after " << secsDiff << " seconds)\n"; // Handle the reception of a RTCP "BYE" as if the source had closed: ioState->onSourceClosure(); } void AVIFileSink::addIndexRecord(AVIIndexRecord* newIndexRecord) { if (fIndexRecordsHead == NULL) { fIndexRecordsHead = newIndexRecord; } else { fIndexRecordsTail->next() = newIndexRecord; } fIndexRecordsTail = newIndexRecord; ++fNumIndexRecords; } void AVIFileSink::completeOutputFile() { if (fHaveCompletedOutputFile || fOutFid == NULL) return; // Update various AVI 'size' fields to take account of the codec data that // we've now written to the file: unsigned maxBytesPerSecond = 0; unsigned numVideoFrames = 0; unsigned numAudioFrames = 0; //// Subsession-specific fields: MediaSubsessionIterator iter(fInputSession); MediaSubsession* subsession; while ((subsession = iter.next()) != NULL) { AVISubsessionIOState* ioState = (AVISubsessionIOState*)(subsession->miscPtr); if (ioState == NULL) continue; maxBytesPerSecond += ioState->fMaxBytesPerSecond; setWord(ioState->fSTRHFrameCountPosition, ioState->fNumFrames); if (ioState->fIsVideo) numVideoFrames = ioState->fNumFrames; else if (ioState->fIsAudio) numAudioFrames = ioState->fNumFrames; } //// Global fields: add4ByteString("idx1"); addWord(fNumIndexRecords*4*4); // the size of all of the index records, which come next: for (AVIIndexRecord* indexRecord = fIndexRecordsHead; indexRecord != NULL; indexRecord = indexRecord->next()) { addWord(indexRecord->chunkId()); addWord(indexRecord->flags()); addWord(indexRecord->offset()); addWord(indexRecord->size()); } fRIFFSizeValue += fNumBytesWritten; setWord(fRIFFSizePosition, fRIFFSizeValue); setWord(fAVIHMaxBytesPerSecondPosition, maxBytesPerSecond); setWord(fAVIHFrameCountPosition, numVideoFrames > 0 ? numVideoFrames : numAudioFrames); fMoviSizeValue += fNumBytesWritten; setWord(fMoviSizePosition, fMoviSizeValue); // We're done: fHaveCompletedOutputFile = True; } ////////// AVISubsessionIOState implementation /////////// AVISubsessionIOState::AVISubsessionIOState(AVIFileSink& sink, MediaSubsession& subsession) : fOurSink(sink), fOurSubsession(subsession), fMaxBytesPerSecond(0), fIsVideo(False), fIsAudio(False), fIsByteSwappedAudio(False), fNumFrames(0) { fBuffer = new SubsessionBuffer(fOurSink.fBufferSize); fPrevBuffer = sink.fPacketLossCompensate ? new SubsessionBuffer(fOurSink.fBufferSize) : NULL; FramedSource* subsessionSource = subsession.readSource(); fOurSourceIsActive = subsessionSource != NULL; fPrevPresentationTime.tv_sec = 0; fPrevPresentationTime.tv_usec = 0; } AVISubsessionIOState::~AVISubsessionIOState() { delete fBuffer; delete fPrevBuffer; } void AVISubsessionIOState::setAVIstate(unsigned subsessionIndex) { fIsVideo = strcmp(fOurSubsession.mediumName(), "video") == 0; fIsAudio = strcmp(fOurSubsession.mediumName(), "audio") == 0; if (fIsVideo) { fAVISubsessionTag = fourChar('0'+subsessionIndex/10,'0'+subsessionIndex%10,'d','c'); if (strcmp(fOurSubsession.codecName(), "JPEG") == 0) { fAVICodecHandlerType = fourChar('m','j','p','g'); } else if (strcmp(fOurSubsession.codecName(), "MP4V-ES") == 0) { fAVICodecHandlerType = fourChar('D','I','V','X'); } else if (strcmp(fOurSubsession.codecName(), "MPV") == 0) { fAVICodecHandlerType = fourChar('m','p','g','1'); // what about MPEG-2? } else if (strcmp(fOurSubsession.codecName(), "H263-1998") == 0 || strcmp(fOurSubsession.codecName(), "H263-2000") == 0) { fAVICodecHandlerType = fourChar('H','2','6','3'); } else if (strcmp(fOurSubsession.codecName(), "H264") == 0) { fAVICodecHandlerType = fourChar('H','2','6','4'); } else { fAVICodecHandlerType = fourChar('?','?','?','?'); } fAVIScale = 1; // ??? ##### fAVIRate = fOurSink.fMovieFPS; // ??? ##### fAVISize = fOurSink.fMovieWidth*fOurSink.fMovieHeight*3; // ??? ##### } else if (fIsAudio) { fIsByteSwappedAudio = False; // by default fAVISubsessionTag = fourChar('0'+subsessionIndex/10,'0'+subsessionIndex%10,'w','b'); fAVICodecHandlerType = 1; // ??? #### unsigned numChannels = fOurSubsession.numChannels(); fAVISamplingFrequency = fOurSubsession.rtpTimestampFrequency(); // default if (strcmp(fOurSubsession.codecName(), "L16") == 0) { fIsByteSwappedAudio = True; // need to byte-swap data before writing it fWAVCodecTag = 0x0001; fAVIScale = fAVISize = 2*numChannels; // 2 bytes/sample fAVIRate = fAVISize*fAVISamplingFrequency; } else if (strcmp(fOurSubsession.codecName(), "L8") == 0) { fWAVCodecTag = 0x0001; fAVIScale = fAVISize = numChannels; // 1 byte/sample fAVIRate = fAVISize*fAVISamplingFrequency; } else if (strcmp(fOurSubsession.codecName(), "PCMA") == 0) { fWAVCodecTag = 0x0006; fAVIScale = fAVISize = numChannels; // 1 byte/sample fAVIRate = fAVISize*fAVISamplingFrequency; } else if (strcmp(fOurSubsession.codecName(), "PCMU") == 0) { fWAVCodecTag = 0x0007; fAVIScale = fAVISize = numChannels; // 1 byte/sample fAVIRate = fAVISize*fAVISamplingFrequency; } else if (strcmp(fOurSubsession.codecName(), "MPA") == 0) { fWAVCodecTag = 0x0050; fAVIScale = fAVISize = 1; fAVIRate = 0; // ??? ##### } else { fWAVCodecTag = 0x0001; // ??? ##### fAVIScale = fAVISize = 1; fAVIRate = 0; // ??? ##### } } else { // unknown medium fAVISubsessionTag = fourChar('0'+subsessionIndex/10,'0'+subsessionIndex%10,'?','?'); fAVICodecHandlerType = 0; fAVIScale = fAVISize = 1; fAVIRate = 0; // ??? ##### } } void AVISubsessionIOState::afterGettingFrame(unsigned packetDataSize, struct timeval presentationTime) { // Begin by checking whether there was a gap in the RTP stream. // If so, try to compensate for this (if desired): unsigned short rtpSeqNum = fOurSubsession.rtpSource()->curPacketRTPSeqNum(); if (fOurSink.fPacketLossCompensate && fPrevBuffer->bytesInUse() > 0) { short seqNumGap = rtpSeqNum - fLastPacketRTPSeqNum; for (short i = 1; i < seqNumGap; ++i) { // Insert a copy of the previous frame, to compensate for the loss: useFrame(*fPrevBuffer); } } fLastPacketRTPSeqNum = rtpSeqNum; // Now, continue working with the frame that we just got if (fBuffer->bytesInUse() == 0) { fBuffer->setPresentationTime(presentationTime); } fBuffer->addBytes(packetDataSize); useFrame(*fBuffer); if (fOurSink.fPacketLossCompensate) { // Save this frame, in case we need it for recovery: SubsessionBuffer* tmp = fPrevBuffer; // assert: != NULL fPrevBuffer = fBuffer; fBuffer = tmp; } fBuffer->reset(); // for the next input // Now, try getting more frames: fOurSink.continuePlaying(); } void AVISubsessionIOState::useFrame(SubsessionBuffer& buffer) { unsigned char* const frameSource = buffer.dataStart(); unsigned const frameSize = buffer.bytesInUse(); struct timeval const& presentationTime = buffer.presentationTime(); if (fPrevPresentationTime.tv_usec != 0||fPrevPresentationTime.tv_sec != 0) { int uSecondsDiff = (presentationTime.tv_sec - fPrevPresentationTime.tv_sec)*1000000 + (presentationTime.tv_usec - fPrevPresentationTime.tv_usec); if (uSecondsDiff > 0) { unsigned bytesPerSecond = (unsigned)((frameSize*1000000.0)/uSecondsDiff); if (bytesPerSecond > fMaxBytesPerSecond) { fMaxBytesPerSecond = bytesPerSecond; } } } fPrevPresentationTime = presentationTime; if (fIsByteSwappedAudio) { // We need to swap the 16-bit audio samples from big-endian // to little-endian order, before writing them to a file: for (unsigned i = 0; i < frameSize; i += 2) { unsigned char tmp = frameSource[i]; frameSource[i] = frameSource[i+1]; frameSource[i+1] = tmp; } } // Add an index record for this frame: AVIIndexRecord* newIndexRecord = new AVIIndexRecord(fAVISubsessionTag, // chunk id frameSource[0] == 0x67 ? 0x10 : 0, // flags fOurSink.fMoviSizePosition + 8 + fOurSink.fNumBytesWritten, // offset (note: 8 == size + 'movi') frameSize + 4); // size fOurSink.addIndexRecord(newIndexRecord); // Write the data into the file: fOurSink.fNumBytesWritten += fOurSink.addWord(fAVISubsessionTag); if (strcmp(fOurSubsession.codecName(), "H264") == 0) { // Insert a 'start code' (0x00 0x00 0x00 0x01) in front of the frame: fOurSink.fNumBytesWritten += fOurSink.addWord(4+frameSize); fOurSink.fNumBytesWritten += fOurSink.addWord(fourChar(0x00, 0x00, 0x00, 0x01));//add start code } else { fOurSink.fNumBytesWritten += fOurSink.addWord(frameSize); } fwrite(frameSource, 1, frameSize, fOurSink.fOutFid); fOurSink.fNumBytesWritten += frameSize; // Pad to an even length: if (frameSize%2 != 0) fOurSink.fNumBytesWritten += fOurSink.addByte(0); ++fNumFrames; } void AVISubsessionIOState::onSourceClosure() { fOurSourceIsActive = False; fOurSink.onSourceClosure1(); } ////////// AVI-specific implementation ////////// unsigned AVIFileSink::addWord(unsigned word) { // Add "word" to the file in little-endian order: addByte(word); addByte(word>>8); addByte(word>>16); addByte(word>>24); return 4; } unsigned AVIFileSink::addHalfWord(unsigned short halfWord) { // Add "halfWord" to the file in little-endian order: addByte((unsigned char)halfWord); addByte((unsigned char)(halfWord>>8)); return 2; } unsigned AVIFileSink::addZeroWords(unsigned numWords) { for (unsigned i = 0; i < numWords; ++i) { addWord(0); } return numWords*4; } unsigned AVIFileSink::add4ByteString(char const* str) { addByte(str[0]); addByte(str[1]); addByte(str[2]); addByte(str[3] == '\0' ? ' ' : str[3]); // e.g., for "AVI " return 4; } void AVIFileSink::setWord(unsigned filePosn, unsigned size) { do { if (SeekFile64(fOutFid, filePosn, SEEK_SET) < 0) break; addWord(size); if (SeekFile64(fOutFid, 0, SEEK_END) < 0) break; // go back to where we were return; } while (0); // One of the SeekFile64()s failed, probable because we're not a seekable file envir() << "AVIFileSink::setWord(): SeekFile64 failed (err " << envir().getErrno() << ")\n"; } // Methods for writing particular file headers. Note the following macros: #define addFileHeader(tag,name) \ unsigned AVIFileSink::addFileHeader_##name() { \ add4ByteString("" #tag ""); \ unsigned headerSizePosn = (unsigned)TellFile64(fOutFid); addWord(0); \ add4ByteString("" #name ""); \ unsigned ignoredSize = 8;/*don't include size of tag or size fields*/ \ unsigned size = 12 #define addFileHeader1(name) \ unsigned AVIFileSink::addFileHeader_##name() { \ add4ByteString("" #name ""); \ unsigned headerSizePosn = (unsigned)TellFile64(fOutFid); addWord(0); \ unsigned ignoredSize = 8;/*don't include size of name or size fields*/ \ unsigned size = 8 #define addFileHeaderEnd \ setWord(headerSizePosn, size-ignoredSize); \ return size; \ } addFileHeader(RIFF,AVI); size += addFileHeader_hdrl(); size += addFileHeader_movi(); fRIFFSizePosition = headerSizePosn; fRIFFSizeValue = size-ignoredSize; addFileHeaderEnd; addFileHeader(LIST,hdrl); size += addFileHeader_avih(); // Then, add a "strl" header for each subsession (stream): // (Make the video subsession (if any) come before the audio subsession.) unsigned subsessionCount = 0; MediaSubsessionIterator iter(fInputSession); MediaSubsession* subsession; while ((subsession = iter.next()) != NULL) { fCurrentIOState = (AVISubsessionIOState*)(subsession->miscPtr); if (fCurrentIOState == NULL) continue; if (strcmp(subsession->mediumName(), "video") != 0) continue; fCurrentIOState->setAVIstate(subsessionCount++); size += addFileHeader_strl(); } iter.reset(); while ((subsession = iter.next()) != NULL) { fCurrentIOState = (AVISubsessionIOState*)(subsession->miscPtr); if (fCurrentIOState == NULL) continue; if (strcmp(subsession->mediumName(), "video") == 0) continue; fCurrentIOState->setAVIstate(subsessionCount++); size += addFileHeader_strl(); } // Then add another JUNK entry ++fJunkNumber; size += addFileHeader_JUNK(); addFileHeaderEnd; #define AVIF_HASINDEX 0x00000010 // Index at end of file? #define AVIF_MUSTUSEINDEX 0x00000020 #define AVIF_ISINTERLEAVED 0x00000100 #define AVIF_TRUSTCKTYPE 0x00000800 // Use CKType to find key frames? #define AVIF_WASCAPTUREFILE 0x00010000 #define AVIF_COPYRIGHTED 0x00020000 addFileHeader1(avih); unsigned usecPerFrame = fMovieFPS == 0 ? 0 : 1000000/fMovieFPS; size += addWord(usecPerFrame); // dwMicroSecPerFrame fAVIHMaxBytesPerSecondPosition = (unsigned)TellFile64(fOutFid); size += addWord(0); // dwMaxBytesPerSec (fill in later) size += addWord(0); // dwPaddingGranularity size += addWord(AVIF_TRUSTCKTYPE|AVIF_HASINDEX|AVIF_ISINTERLEAVED); // dwFlags fAVIHFrameCountPosition = (unsigned)TellFile64(fOutFid); size += addWord(0); // dwTotalFrames (fill in later) size += addWord(0); // dwInitialFrame size += addWord(fNumSubsessions); // dwStreams size += addWord(fBufferSize); // dwSuggestedBufferSize size += addWord(fMovieWidth); // dwWidth size += addWord(fMovieHeight); // dwHeight size += addZeroWords(4); // dwReserved addFileHeaderEnd; addFileHeader(LIST,strl); size += addFileHeader_strh(); size += addFileHeader_strf(); fJunkNumber = 0; size += addFileHeader_JUNK(); addFileHeaderEnd; addFileHeader1(strh); size += add4ByteString(fCurrentIOState->fIsVideo ? "vids" : fCurrentIOState->fIsAudio ? "auds" : "????"); // fccType size += addWord(fCurrentIOState->fAVICodecHandlerType); // fccHandler size += addWord(0); // dwFlags size += addWord(0); // wPriority + wLanguage size += addWord(0); // dwInitialFrames size += addWord(fCurrentIOState->fAVIScale); // dwScale size += addWord(fCurrentIOState->fAVIRate); // dwRate size += addWord(0); // dwStart fCurrentIOState->fSTRHFrameCountPosition = (unsigned)TellFile64(fOutFid); size += addWord(0); // dwLength (fill in later) size += addWord(fBufferSize); // dwSuggestedBufferSize size += addWord((unsigned)-1); // dwQuality size += addWord(fCurrentIOState->fAVISize); // dwSampleSize size += addWord(0); // rcFrame (start) if (fCurrentIOState->fIsVideo) { size += addHalfWord(fMovieWidth); size += addHalfWord(fMovieHeight); } else { size += addWord(0); } addFileHeaderEnd; addFileHeader1(strf); if (fCurrentIOState->fIsVideo) { // Add a BITMAPINFO header: unsigned extraDataSize = 0; size += addWord(10*4 + extraDataSize); // size size += addWord(fMovieWidth); size += addWord(fMovieHeight); size += addHalfWord(1); // planes size += addHalfWord(24); // bits-per-sample ##### size += addWord(fCurrentIOState->fAVICodecHandlerType); // compr. type size += addWord(fCurrentIOState->fAVISize); size += addZeroWords(4); // ??? ##### // Later, add extra data here (if any) ##### } else if (fCurrentIOState->fIsAudio) { // Add a WAVFORMATEX header: size += addHalfWord(fCurrentIOState->fWAVCodecTag); unsigned numChannels = fCurrentIOState->fOurSubsession.numChannels(); size += addHalfWord(numChannels); size += addWord(fCurrentIOState->fAVISamplingFrequency); size += addWord(fCurrentIOState->fAVIRate); // bytes per second size += addHalfWord(fCurrentIOState->fAVISize); // block alignment unsigned bitsPerSample = (fCurrentIOState->fAVISize*8)/numChannels; size += addHalfWord(bitsPerSample); if (strcmp(fCurrentIOState->fOurSubsession.codecName(), "MPA") == 0) { // Assume MPEG layer II audio (not MP3): ##### size += addHalfWord(22); // wav_extra_size size += addHalfWord(2); // fwHeadLayer size += addWord(8*fCurrentIOState->fAVIRate); // dwHeadBitrate ##### size += addHalfWord(numChannels == 2 ? 1: 8); // fwHeadMode size += addHalfWord(0); // fwHeadModeExt size += addHalfWord(1); // wHeadEmphasis size += addHalfWord(16); // fwHeadFlags size += addWord(0); // dwPTSLow size += addWord(0); // dwPTSHigh } } addFileHeaderEnd; #define AVI_MASTER_INDEX_SIZE 256 addFileHeader1(JUNK); if (fJunkNumber == 0) { size += addHalfWord(4); // wLongsPerEntry size += addHalfWord(0); // bIndexSubType + bIndexType size += addWord(0); // nEntriesInUse ##### size += addWord(fCurrentIOState->fAVISubsessionTag); // dwChunkId size += addZeroWords(2); // dwReserved size += addZeroWords(AVI_MASTER_INDEX_SIZE*4); } else { size += add4ByteString("odml"); size += add4ByteString("dmlh"); unsigned wtfCount = 248; size += addWord(wtfCount); // ??? ##### size += addZeroWords(wtfCount/4); } addFileHeaderEnd; addFileHeader(LIST,movi); fMoviSizePosition = headerSizePosn; fMoviSizeValue = size-ignoredSize; addFileHeaderEnd; live/liveMedia/MPEG4ESVideoRTPSink.cpp000444 001751 000000 00000012717 12265042432 017620 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // RTP sink for MPEG-4 Elementary Stream video (RFC 3016) // Implementation #include "MPEG4ESVideoRTPSink.hh" #include "MPEG4VideoStreamFramer.hh" #include "MPEG4LATMAudioRTPSource.hh" // for "parseGeneralConfigStr()" MPEG4ESVideoRTPSink ::MPEG4ESVideoRTPSink(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, u_int32_t rtpTimestampFrequency, u_int8_t profileAndLevelIndication, char const* configStr) : VideoRTPSink(env, RTPgs, rtpPayloadFormat, rtpTimestampFrequency, "MP4V-ES"), fVOPIsPresent(False), fProfileAndLevelIndication(profileAndLevelIndication), fFmtpSDPLine(NULL) { fConfigBytes = parseGeneralConfigStr(configStr, fNumConfigBytes); } MPEG4ESVideoRTPSink::~MPEG4ESVideoRTPSink() { delete[] fFmtpSDPLine; delete[] fConfigBytes; } MPEG4ESVideoRTPSink* MPEG4ESVideoRTPSink::createNew(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, u_int32_t rtpTimestampFrequency) { return new MPEG4ESVideoRTPSink(env, RTPgs, rtpPayloadFormat, rtpTimestampFrequency); } MPEG4ESVideoRTPSink* MPEG4ESVideoRTPSink::createNew(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, u_int32_t rtpTimestampFrequency, u_int8_t profileAndLevelIndication, char const* configStr) { return new MPEG4ESVideoRTPSink(env, RTPgs, rtpPayloadFormat, rtpTimestampFrequency, profileAndLevelIndication, configStr); } Boolean MPEG4ESVideoRTPSink::sourceIsCompatibleWithUs(MediaSource& source) { // Our source must be an appropriate framer: return source.isMPEG4VideoStreamFramer(); } #define VOP_START_CODE 0x000001B6 void MPEG4ESVideoRTPSink ::doSpecialFrameHandling(unsigned fragmentationOffset, unsigned char* frameStart, unsigned numBytesInFrame, struct timeval framePresentationTime, unsigned numRemainingBytes) { if (fragmentationOffset == 0) { // Begin by inspecting the 4-byte code at the start of the frame: if (numBytesInFrame < 4) return; // shouldn't happen unsigned startCode = (frameStart[0]<<24) | (frameStart[1]<<16) | (frameStart[2]<<8) | frameStart[3]; fVOPIsPresent = startCode == VOP_START_CODE; } // Set the RTP 'M' (marker) bit iff this frame ends a VOP // (and there are no fragments remaining). // This relies on the source being a "MPEG4VideoStreamFramer". MPEG4VideoStreamFramer* framerSource = (MPEG4VideoStreamFramer*)fSource; if (framerSource != NULL && framerSource->pictureEndMarker() && numRemainingBytes == 0) { setMarkerBit(); framerSource->pictureEndMarker() = False; } // Also set the RTP timestamp. (We do this for each frame // in the packet, to ensure that the timestamp of the VOP (if present) // gets used.) setTimestamp(framePresentationTime); } Boolean MPEG4ESVideoRTPSink::allowFragmentationAfterStart() const { return True; } Boolean MPEG4ESVideoRTPSink ::frameCanAppearAfterPacketStart(unsigned char const* /*frameStart*/, unsigned /*numBytesInFrame*/) const { // Once we've packed a VOP into the packet, then no other // frame can be packed into it: return !fVOPIsPresent; } char const* MPEG4ESVideoRTPSink::auxSDPLine() { // Generate a new "a=fmtp:" line each time, using our own 'configuration' information (if we have it), // otherwise parameters from our framer source (in case they've changed since the last time that // we were called): unsigned configLength = fNumConfigBytes; unsigned char* config = fConfigBytes; if (fProfileAndLevelIndication == 0 || config == NULL) { // We need to get this information from our framer source: MPEG4VideoStreamFramer* framerSource = (MPEG4VideoStreamFramer*)fSource; if (framerSource == NULL) return NULL; // we don't yet have a source fProfileAndLevelIndication = framerSource->profile_and_level_indication(); if (fProfileAndLevelIndication == 0) return NULL; // our source isn't ready config = framerSource->getConfigBytes(configLength); if (config == NULL) return NULL; // our source isn't ready } char const* fmtpFmt = "a=fmtp:%d " "profile-level-id=%d;" "config="; unsigned fmtpFmtSize = strlen(fmtpFmt) + 3 /* max char len */ + 3 /* max char len */ + 2*configLength /* 2*, because each byte prints as 2 chars */ + 2 /* trailing \r\n */; char* fmtp = new char[fmtpFmtSize]; sprintf(fmtp, fmtpFmt, rtpPayloadType(), fProfileAndLevelIndication); char* endPtr = &fmtp[strlen(fmtp)]; for (unsigned i = 0; i < configLength; ++i) { sprintf(endPtr, "%02X", config[i]); endPtr += 2; } sprintf(endPtr, "\r\n"); delete[] fFmtpSDPLine; fFmtpSDPLine = strDup(fmtp); delete[] fmtp; return fFmtpSDPLine; } live/liveMedia/DigestAuthentication.cpp000444 001751 000000 00000011725 12265042432 020467 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A class used for digest authentication. // Implementation #include "DigestAuthentication.hh" #include "ourMD5.hh" #include #include // for gettimeofday() #include #include #include Authenticator::Authenticator() { assign(NULL, NULL, NULL, NULL, False); } Authenticator::Authenticator(char const* username, char const* password, Boolean passwordIsMD5) { assign(NULL, NULL, username, password, passwordIsMD5); } Authenticator::Authenticator(const Authenticator& orig) { assign(orig.realm(), orig.nonce(), orig.username(), orig.password(), orig.fPasswordIsMD5); } Authenticator& Authenticator::operator=(const Authenticator& rightSide) { if (&rightSide != this) { reset(); assign(rightSide.realm(), rightSide.nonce(), rightSide.username(), rightSide.password(), rightSide.fPasswordIsMD5); } return *this; } Authenticator::~Authenticator() { reset(); } void Authenticator::reset() { resetRealmAndNonce(); resetUsernameAndPassword(); } void Authenticator::setRealmAndNonce(char const* realm, char const* nonce) { resetRealmAndNonce(); assignRealmAndNonce(realm, nonce); } void Authenticator::setRealmAndRandomNonce(char const* realm) { resetRealmAndNonce(); // Construct data to seed the random nonce: struct { struct timeval timestamp; unsigned counter; } seedData; gettimeofday(&seedData.timestamp, NULL); static unsigned counter = 0; seedData.counter = ++counter; // Use MD5 to compute a 'random' nonce from this seed data: char nonceBuf[33]; our_MD5Data((unsigned char*)(&seedData), sizeof seedData, nonceBuf); assignRealmAndNonce(realm, nonceBuf); } void Authenticator::setUsernameAndPassword(char const* username, char const* password, Boolean passwordIsMD5) { resetUsernameAndPassword(); assignUsernameAndPassword(username, password, passwordIsMD5); } char const* Authenticator::computeDigestResponse(char const* cmd, char const* url) const { // The "response" field is computed as: // md5(md5(::)::md5(:)) // or, if "fPasswordIsMD5" is True: // md5(::md5(:)) char ha1Buf[33]; if (fPasswordIsMD5) { strncpy(ha1Buf, password(), 32); ha1Buf[32] = '\0'; // just in case } else { unsigned const ha1DataLen = strlen(username()) + 1 + strlen(realm()) + 1 + strlen(password()); unsigned char* ha1Data = new unsigned char[ha1DataLen+1]; sprintf((char*)ha1Data, "%s:%s:%s", username(), realm(), password()); our_MD5Data(ha1Data, ha1DataLen, ha1Buf); delete[] ha1Data; } unsigned const ha2DataLen = strlen(cmd) + 1 + strlen(url); unsigned char* ha2Data = new unsigned char[ha2DataLen+1]; sprintf((char*)ha2Data, "%s:%s", cmd, url); char ha2Buf[33]; our_MD5Data(ha2Data, ha2DataLen, ha2Buf); delete[] ha2Data; unsigned const digestDataLen = 32 + 1 + strlen(nonce()) + 1 + 32; unsigned char* digestData = new unsigned char[digestDataLen+1]; sprintf((char*)digestData, "%s:%s:%s", ha1Buf, nonce(), ha2Buf); char const* result = our_MD5Data(digestData, digestDataLen, NULL); delete[] digestData; return result; } void Authenticator::reclaimDigestResponse(char const* responseStr) const { delete[](char*)responseStr; } void Authenticator::resetRealmAndNonce() { delete[] fRealm; fRealm = NULL; delete[] fNonce; fNonce = NULL; } void Authenticator::resetUsernameAndPassword() { delete[] fUsername; fUsername = NULL; delete[] fPassword; fPassword = NULL; fPasswordIsMD5 = False; } void Authenticator::assignRealmAndNonce(char const* realm, char const* nonce) { fRealm = strDup(realm); fNonce = strDup(nonce); } void Authenticator::assignUsernameAndPassword(char const* username, char const* password, Boolean passwordIsMD5) { fUsername = strDup(username); fPassword = strDup(password); fPasswordIsMD5 = passwordIsMD5; } void Authenticator::assign(char const* realm, char const* nonce, char const* username, char const* password, Boolean passwordIsMD5) { assignRealmAndNonce(realm, nonce); assignUsernameAndPassword(username, password, passwordIsMD5); } live/liveMedia/PassiveServerMediaSubsession.cpp000444 001751 000000 00000017425 12265042432 022172 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A 'ServerMediaSubsession' object that represents an existing // 'RTPSink', rather than one that creates new 'RTPSink's on demand. // Implementation #include "PassiveServerMediaSubsession.hh" #include ////////// PassiveServerMediaSubsession ////////// PassiveServerMediaSubsession* PassiveServerMediaSubsession::createNew(RTPSink& rtpSink, RTCPInstance* rtcpInstance) { return new PassiveServerMediaSubsession(rtpSink, rtcpInstance); } PassiveServerMediaSubsession ::PassiveServerMediaSubsession(RTPSink& rtpSink, RTCPInstance* rtcpInstance) : ServerMediaSubsession(rtpSink.envir()), fSDPLines(NULL), fRTPSink(rtpSink), fRTCPInstance(rtcpInstance) { fClientRTCPSourceRecords = HashTable::create(ONE_WORD_HASH_KEYS); } class RTCPSourceRecord { public: RTCPSourceRecord(netAddressBits addr, Port const& port) : addr(addr), port(port) { } netAddressBits addr; Port port; }; PassiveServerMediaSubsession::~PassiveServerMediaSubsession() { delete[] fSDPLines; // Clean out the RTCPSourceRecord table: while (1) { RTCPSourceRecord* source = (RTCPSourceRecord*)(fClientRTCPSourceRecords->RemoveNext()); if (source == NULL) break; delete source; } delete fClientRTCPSourceRecords; } char const* PassiveServerMediaSubsession::sdpLines() { if (fSDPLines == NULL ) { // Construct a set of SDP lines that describe this subsession: // Use the components from "rtpSink": Groupsock const& gs = fRTPSink.groupsockBeingUsed(); AddressString groupAddressStr(gs.groupAddress()); unsigned short portNum = ntohs(gs.port().num()); unsigned char ttl = gs.ttl(); unsigned char rtpPayloadType = fRTPSink.rtpPayloadType(); char const* mediaType = fRTPSink.sdpMediaType(); unsigned estBitrate = fRTCPInstance == NULL ? 50 : fRTCPInstance->totSessionBW(); char* rtpmapLine = fRTPSink.rtpmapLine(); char const* rangeLine = rangeSDPLine(); char const* auxSDPLine = fRTPSink.auxSDPLine(); if (auxSDPLine == NULL) auxSDPLine = ""; char const* const sdpFmt = "m=%s %d RTP/AVP %d\r\n" "c=IN IP4 %s/%d\r\n" "b=AS:%u\r\n" "%s" "%s" "%s" "a=control:%s\r\n"; unsigned sdpFmtSize = strlen(sdpFmt) + strlen(mediaType) + 5 /* max short len */ + 3 /* max char len */ + strlen(groupAddressStr.val()) + 3 /* max char len */ + 20 /* max int len */ + strlen(rtpmapLine) + strlen(rangeLine) + strlen(auxSDPLine) + strlen(trackId()); char* sdpLines = new char[sdpFmtSize]; sprintf(sdpLines, sdpFmt, mediaType, // m= portNum, // m= rtpPayloadType, // m= groupAddressStr.val(), // c= ttl, // c= TTL estBitrate, // b=AS: rtpmapLine, // a=rtpmap:... (if present) rangeLine, // a=range:... (if present) auxSDPLine, // optional extra SDP line trackId()); // a=control: delete[] (char*)rangeLine; delete[] rtpmapLine; fSDPLines = strDup(sdpLines); delete[] sdpLines; } return fSDPLines; } void PassiveServerMediaSubsession ::getStreamParameters(unsigned clientSessionId, netAddressBits clientAddress, Port const& /*clientRTPPort*/, Port const& clientRTCPPort, int /*tcpSocketNum*/, unsigned char /*rtpChannelId*/, unsigned char /*rtcpChannelId*/, netAddressBits& destinationAddress, u_int8_t& destinationTTL, Boolean& isMulticast, Port& serverRTPPort, Port& serverRTCPPort, void*& streamToken) { isMulticast = True; Groupsock& gs = fRTPSink.groupsockBeingUsed(); if (destinationTTL == 255) destinationTTL = gs.ttl(); if (destinationAddress == 0) { // normal case destinationAddress = gs.groupAddress().s_addr; } else { // use the client-specified destination address instead: struct in_addr destinationAddr; destinationAddr.s_addr = destinationAddress; gs.changeDestinationParameters(destinationAddr, 0, destinationTTL); if (fRTCPInstance != NULL) { Groupsock* rtcpGS = fRTCPInstance->RTCPgs(); rtcpGS->changeDestinationParameters(destinationAddr, 0, destinationTTL); } } serverRTPPort = gs.port(); if (fRTCPInstance != NULL) { Groupsock* rtcpGS = fRTCPInstance->RTCPgs(); serverRTCPPort = rtcpGS->port(); } streamToken = NULL; // not used // Make a record of this client's source - for RTCP RR handling: RTCPSourceRecord* source = new RTCPSourceRecord(clientAddress, clientRTCPPort); fClientRTCPSourceRecords->Add((char const*)clientSessionId, source); } void PassiveServerMediaSubsession::startStream(unsigned clientSessionId, void* /*streamToken*/, TaskFunc* rtcpRRHandler, void* rtcpRRHandlerClientData, unsigned short& rtpSeqNum, unsigned& rtpTimestamp, ServerRequestAlternativeByteHandler* /*serverRequestAlternativeByteHandler*/, void* /*serverRequestAlternativeByteHandlerClientData*/) { rtpSeqNum = fRTPSink.currentSeqNo(); rtpTimestamp = fRTPSink.presetNextTimestamp(); // Try to use a big send buffer for RTP - at least 0.1 second of // specified bandwidth and at least 50 KB unsigned streamBitrate = fRTCPInstance == NULL ? 50 : fRTCPInstance->totSessionBW(); // in kbps unsigned rtpBufSize = streamBitrate * 25 / 2; // 1 kbps * 0.1 s = 12.5 bytes if (rtpBufSize < 50 * 1024) rtpBufSize = 50 * 1024; increaseSendBufferTo(envir(), fRTPSink.groupsockBeingUsed().socketNum(), rtpBufSize); if (fRTCPInstance != NULL) { // Hack: Send a RTCP "SR" packet now, so that receivers will (likely) be able to // get RTCP-synchronized presentation times immediately: fRTCPInstance->sendReport(); // Set up the handler for incoming RTCP "RR" packets from this client: RTCPSourceRecord* source = (RTCPSourceRecord*)(fClientRTCPSourceRecords->Lookup((char const*)clientSessionId)); if (source != NULL) { fRTCPInstance->setSpecificRRHandler(source->addr, source->port, rtcpRRHandler, rtcpRRHandlerClientData); } } } float PassiveServerMediaSubsession::getCurrentNPT(void* streamToken) { // Return the elapsed time between our "RTPSink"s creation time, and the current time: struct timeval const& creationTime = fRTPSink.creationTime(); // alias struct timeval timeNow; gettimeofday(&timeNow, NULL); return (float)(timeNow.tv_sec - creationTime.tv_sec + (timeNow.tv_usec - creationTime.tv_usec)/1000000.0); } void PassiveServerMediaSubsession::deleteStream(unsigned clientSessionId, void*& /*streamToken*/) { // Lookup and remove the 'RTCPSourceRecord' for this client. Also turn off RTCP "RR" handling: RTCPSourceRecord* source = (RTCPSourceRecord*)(fClientRTCPSourceRecords->Lookup((char const*)clientSessionId)); if (source != NULL) { if (fRTCPInstance != NULL) { fRTCPInstance->unsetSpecificRRHandler(source->addr, source->port); } fClientRTCPSourceRecords->Remove((char const*)clientSessionId); delete source; } } live/liveMedia/AC3AudioRTPSource.cpp000444 001751 000000 00000004313 12265042432 017442 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // AC3 Audio RTP Sources // Implementation #include "AC3AudioRTPSource.hh" AC3AudioRTPSource* AC3AudioRTPSource::createNew(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, unsigned rtpTimestampFrequency) { return new AC3AudioRTPSource(env, RTPgs, rtpPayloadFormat, rtpTimestampFrequency); } AC3AudioRTPSource::AC3AudioRTPSource(UsageEnvironment& env, Groupsock* rtpGS, unsigned char rtpPayloadFormat, unsigned rtpTimestampFrequency) : MultiFramedRTPSource(env, rtpGS, rtpPayloadFormat, rtpTimestampFrequency) { } AC3AudioRTPSource::~AC3AudioRTPSource() { } Boolean AC3AudioRTPSource ::processSpecialHeader(BufferedPacket* packet, unsigned& resultSpecialHeaderSize) { unsigned char* headerStart = packet->data(); unsigned packetSize = packet->dataSize(); // There's a 2-byte payload header at the beginning: if (packetSize < 2) return False; resultSpecialHeaderSize = 2; unsigned char FT = headerStart[0]&0x03; fCurrentPacketBeginsFrame = FT != 3; // The RTP "M" (marker) bit indicates the last fragment of a frame. // In case the sender did not set the "M" bit correctly, we also test for FT == 0: fCurrentPacketCompletesFrame = packet->rtpMarkerBit() || FT == 0; return True; } char const* AC3AudioRTPSource::MIMEtype() const { return "audio/AC3"; } live/liveMedia/GSMAudioRTPSink.cpp000444 001751 000000 00000002657 12265042432 017177 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // RTP sink for GSM audio // Implementation #include "GSMAudioRTPSink.hh" GSMAudioRTPSink::GSMAudioRTPSink(UsageEnvironment& env, Groupsock* RTPgs) : AudioRTPSink(env, RTPgs, 3, 8000, "GSM") { } GSMAudioRTPSink::~GSMAudioRTPSink() { } GSMAudioRTPSink* GSMAudioRTPSink::createNew(UsageEnvironment& env, Groupsock* RTPgs) { return new GSMAudioRTPSink(env, RTPgs); } Boolean GSMAudioRTPSink ::frameCanAppearAfterPacketStart(unsigned char const* /*frameStart*/, unsigned /*numBytesInFrame*/) const { // Allow at most 5 frames in a single packet: return numFramesUsedSoFar() < 5; } live/liveMedia/BasicUDPSource.cpp000444 001751 000000 00000005604 12265042432 017122 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A simple UDP source, where every UDP payload is a complete frame // Implementation #include "BasicUDPSource.hh" #include BasicUDPSource* BasicUDPSource::createNew(UsageEnvironment& env, Groupsock* inputGS) { return new BasicUDPSource(env, inputGS); } BasicUDPSource::BasicUDPSource(UsageEnvironment& env, Groupsock* inputGS) : FramedSource(env), fInputGS(inputGS), fHaveStartedReading(False) { // Try to use a large receive buffer (in the OS): increaseReceiveBufferTo(env, inputGS->socketNum(), 50*1024); // Make the socket non-blocking, even though it will be read from only asynchronously, when packets arrive. // The reason for this is that, in some OSs, reads on a blocking socket can (allegedly) sometimes block, // even if the socket was previously reported (e.g., by "select()") as having data available. // (This can supposedly happen if the UDP checksum fails, for example.) makeSocketNonBlocking(fInputGS->socketNum()); } BasicUDPSource::~BasicUDPSource(){ envir().taskScheduler().turnOffBackgroundReadHandling(fInputGS->socketNum()); } void BasicUDPSource::doGetNextFrame() { if (!fHaveStartedReading) { // Await incoming packets: envir().taskScheduler().turnOnBackgroundReadHandling(fInputGS->socketNum(), (TaskScheduler::BackgroundHandlerProc*)&incomingPacketHandler, this); fHaveStartedReading = True; } } void BasicUDPSource::doStopGettingFrames() { envir().taskScheduler().turnOffBackgroundReadHandling(fInputGS->socketNum()); fHaveStartedReading = False; } void BasicUDPSource::incomingPacketHandler(BasicUDPSource* source, int /*mask*/){ source->incomingPacketHandler1(); } void BasicUDPSource::incomingPacketHandler1() { if (!isCurrentlyAwaitingData()) return; // we're not ready for the data yet // Read the packet into our desired destination: struct sockaddr_in fromAddress; if (!fInputGS->handleRead(fTo, fMaxSize, fFrameSize, fromAddress)) return; // Tell our client that we have new data: afterGetting(this); // we're preceded by a net read; no infinite recursion } live/liveMedia/H264VideoRTPSource.cpp000444 001751 000000 00000014374 12265042432 017534 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // H.264 Video RTP Sources // Implementation #include "H264VideoRTPSource.hh" #include "Base64.hh" ////////// H264BufferedPacket and H264BufferedPacketFactory ////////// class H264BufferedPacket: public BufferedPacket { public: H264BufferedPacket(H264VideoRTPSource& ourSource); virtual ~H264BufferedPacket(); private: // redefined virtual functions virtual unsigned nextEnclosedFrameSize(unsigned char*& framePtr, unsigned dataSize); private: H264VideoRTPSource& fOurSource; }; class H264BufferedPacketFactory: public BufferedPacketFactory { private: // redefined virtual functions virtual BufferedPacket* createNewPacket(MultiFramedRTPSource* ourSource); }; ///////// H264VideoRTPSource implementation //////// H264VideoRTPSource* H264VideoRTPSource::createNew(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, unsigned rtpTimestampFrequency) { return new H264VideoRTPSource(env, RTPgs, rtpPayloadFormat, rtpTimestampFrequency); } H264VideoRTPSource ::H264VideoRTPSource(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, unsigned rtpTimestampFrequency) : MultiFramedRTPSource(env, RTPgs, rtpPayloadFormat, rtpTimestampFrequency, new H264BufferedPacketFactory) { } H264VideoRTPSource::~H264VideoRTPSource() { } Boolean H264VideoRTPSource ::processSpecialHeader(BufferedPacket* packet, unsigned& resultSpecialHeaderSize) { unsigned char* headerStart = packet->data(); unsigned packetSize = packet->dataSize(); // The header has a minimum size of 0, since the NAL header is used // as a payload header unsigned expectedHeaderSize = 0; // Check if the type field is 28 (FU-A) or 29 (FU-B) fCurPacketNALUnitType = (headerStart[0]&0x1F); switch (fCurPacketNALUnitType) { case 24: { // STAP-A expectedHeaderSize = 1; // discard the type byte break; } case 25: case 26: case 27: { // STAP-B, MTAP16, or MTAP24 expectedHeaderSize = 3; // discard the type byte, and the initial DON break; } case 28: case 29: { // // FU-A or FU-B // For these NALUs, the first two bytes are the FU indicator and the FU header. // If the start bit is set, we reconstruct the original NAL header: unsigned char startBit = headerStart[1]&0x80; unsigned char endBit = headerStart[1]&0x40; if (startBit) { expectedHeaderSize = 1; if (packetSize < expectedHeaderSize) return False; headerStart[1] = (headerStart[0]&0xE0)+(headerStart[1]&0x1F); fCurrentPacketBeginsFrame = True; } else { // If the startbit is not set, both the FU indicator and header // can be discarded expectedHeaderSize = 2; if (packetSize < expectedHeaderSize) return False; fCurrentPacketBeginsFrame = False; } fCurrentPacketCompletesFrame = (endBit != 0); break; } default: { // This packet contains one or more complete, decodable NAL units fCurrentPacketBeginsFrame = fCurrentPacketCompletesFrame = True; break; } } resultSpecialHeaderSize = expectedHeaderSize; return True; } char const* H264VideoRTPSource::MIMEtype() const { return "video/H264"; } SPropRecord* parseSPropParameterSets(char const* sPropParameterSetsStr, // result parameter: unsigned& numSPropRecords) { // Make a copy of the input string, so we can replace the commas with '\0's: char* inStr = strDup(sPropParameterSetsStr); if (inStr == NULL) { numSPropRecords = 0; return NULL; } // Count the number of commas (and thus the number of parameter sets): numSPropRecords = 1; char* s; for (s = inStr; *s != '\0'; ++s) { if (*s == ',') { ++numSPropRecords; *s = '\0'; } } // Allocate and fill in the result array: SPropRecord* resultArray = new SPropRecord[numSPropRecords]; s = inStr; for (unsigned i = 0; i < numSPropRecords; ++i) { resultArray[i].sPropBytes = base64Decode(s, resultArray[i].sPropLength); s += strlen(s) + 1; } delete[] inStr; return resultArray; } ////////// H264BufferedPacket and H264BufferedPacketFactory implementation ////////// H264BufferedPacket::H264BufferedPacket(H264VideoRTPSource& ourSource) : fOurSource(ourSource) { } H264BufferedPacket::~H264BufferedPacket() { } unsigned H264BufferedPacket ::nextEnclosedFrameSize(unsigned char*& framePtr, unsigned dataSize) { unsigned resultNALUSize = 0; // if an error occurs switch (fOurSource.fCurPacketNALUnitType) { case 24: case 25: { // STAP-A or STAP-B // The first two bytes are NALU size: if (dataSize < 2) break; resultNALUSize = (framePtr[0]<<8)|framePtr[1]; framePtr += 2; break; } case 26: { // MTAP16 // The first two bytes are NALU size. The next three are the DOND and TS offset: if (dataSize < 5) break; resultNALUSize = (framePtr[0]<<8)|framePtr[1]; framePtr += 5; break; } case 27: { // MTAP24 // The first two bytes are NALU size. The next four are the DOND and TS offset: if (dataSize < 6) break; resultNALUSize = (framePtr[0]<<8)|framePtr[1]; framePtr += 6; break; } default: { // Common case: We use the entire packet data: return dataSize; } } return (resultNALUSize <= dataSize) ? resultNALUSize : dataSize; } BufferedPacket* H264BufferedPacketFactory ::createNewPacket(MultiFramedRTPSource* ourSource) { return new H264BufferedPacket((H264VideoRTPSource&)(*ourSource)); } live/liveMedia/MPEG4VideoStreamDiscreteFramer.cpp000444 001751 000000 00000022500 12265042432 022140 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A simplified version of "MPEG4VideoStreamFramer" that takes only complete, // discrete frames (rather than an arbitrary byte stream) as input. // This avoids the parsing and data copying overhead of the full // "MPEG4VideoStreamFramer". // Implementation #include "MPEG4VideoStreamDiscreteFramer.hh" MPEG4VideoStreamDiscreteFramer* MPEG4VideoStreamDiscreteFramer::createNew(UsageEnvironment& env, FramedSource* inputSource, Boolean leavePresentationTimesUnmodified) { // Need to add source type checking here??? ##### return new MPEG4VideoStreamDiscreteFramer(env, inputSource, leavePresentationTimesUnmodified); } MPEG4VideoStreamDiscreteFramer ::MPEG4VideoStreamDiscreteFramer(UsageEnvironment& env, FramedSource* inputSource, Boolean leavePresentationTimesUnmodified) : MPEG4VideoStreamFramer(env, inputSource, False/*don't create a parser*/), fLeavePresentationTimesUnmodified(leavePresentationTimesUnmodified), vop_time_increment_resolution(0), fNumVTIRBits(0), fLastNonBFrameVop_time_increment(0) { fLastNonBFramePresentationTime.tv_sec = 0; fLastNonBFramePresentationTime.tv_usec = 0; } MPEG4VideoStreamDiscreteFramer::~MPEG4VideoStreamDiscreteFramer() { } void MPEG4VideoStreamDiscreteFramer::doGetNextFrame() { // Arrange to read data (which should be a complete MPEG-4 video frame) // from our data source, directly into the client's input buffer. // After reading this, we'll do some parsing on the frame. fInputSource->getNextFrame(fTo, fMaxSize, afterGettingFrame, this, FramedSource::handleClosure, this); } void MPEG4VideoStreamDiscreteFramer ::afterGettingFrame(void* clientData, unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime, unsigned durationInMicroseconds) { MPEG4VideoStreamDiscreteFramer* source = (MPEG4VideoStreamDiscreteFramer*)clientData; source->afterGettingFrame1(frameSize, numTruncatedBytes, presentationTime, durationInMicroseconds); } void MPEG4VideoStreamDiscreteFramer ::afterGettingFrame1(unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime, unsigned durationInMicroseconds) { // Check that the first 4 bytes are a system code: if (frameSize >= 4 && fTo[0] == 0 && fTo[1] == 0 && fTo[2] == 1) { fPictureEndMarker = True; // Assume that we have a complete 'picture' here unsigned i = 3; if (fTo[i] == 0xB0) { // VISUAL_OBJECT_SEQUENCE_START_CODE // The next byte is the "profile_and_level_indication": if (frameSize >= 5) fProfileAndLevelIndication = fTo[4]; // The start of this frame - up to the first GROUP_VOP_START_CODE // or VOP_START_CODE - is stream configuration information. Save this: for (i = 7; i < frameSize; ++i) { if ((fTo[i] == 0xB3 /*GROUP_VOP_START_CODE*/ || fTo[i] == 0xB6 /*VOP_START_CODE*/) && fTo[i-1] == 1 && fTo[i-2] == 0 && fTo[i-3] == 0) { break; // The configuration information ends here } } fNumConfigBytes = i < frameSize ? i-3 : frameSize; delete[] fConfigBytes; fConfigBytes = new unsigned char[fNumConfigBytes]; for (unsigned j = 0; j < fNumConfigBytes; ++j) fConfigBytes[j] = fTo[j]; // This information (should) also contain a VOL header, which we need // to analyze, to get "vop_time_increment_resolution" (which we need // - along with "vop_time_increment" - in order to generate accurate // presentation times for "B" frames). analyzeVOLHeader(); } if (i < frameSize) { u_int8_t nextCode = fTo[i]; if (nextCode == 0xB3 /*GROUP_VOP_START_CODE*/) { // Skip to the following VOP_START_CODE (if any): for (i += 4; i < frameSize; ++i) { if (fTo[i] == 0xB6 /*VOP_START_CODE*/ && fTo[i-1] == 1 && fTo[i-2] == 0 && fTo[i-3] == 0) { nextCode = fTo[i]; break; } } } if (nextCode == 0xB6 /*VOP_START_CODE*/ && i+5 < frameSize) { ++i; // Get the "vop_coding_type" from the next byte: u_int8_t nextByte = fTo[i++]; u_int8_t vop_coding_type = nextByte>>6; // Next, get the "modulo_time_base" by counting the '1' bits that // follow. We look at the next 32-bits only. // This should be enough in most cases. u_int32_t next4Bytes = (fTo[i]<<24)|(fTo[i+1]<<16)|(fTo[i+2]<<8)|fTo[i+3]; i += 4; u_int32_t timeInfo = (nextByte<<(32-6))|(next4Bytes>>6); unsigned modulo_time_base = 0; u_int32_t mask = 0x80000000; while ((timeInfo&mask) != 0) { ++modulo_time_base; mask >>= 1; } mask >>= 2; // Then, get the "vop_time_increment". unsigned vop_time_increment = 0; // First, make sure we have enough bits left for this: if ((mask>>(fNumVTIRBits-1)) != 0) { for (unsigned i = 0; i < fNumVTIRBits; ++i) { vop_time_increment |= timeInfo&mask; mask >>= 1; } while (mask != 0) { vop_time_increment >>= 1; mask >>= 1; } } // If this is a "B" frame, then we have to tweak "presentationTime": if (!fLeavePresentationTimesUnmodified && vop_coding_type == 2/*B*/ && (fLastNonBFramePresentationTime.tv_usec > 0 || fLastNonBFramePresentationTime.tv_sec > 0)) { int timeIncrement = fLastNonBFrameVop_time_increment - vop_time_increment; if (timeIncrement<0) timeIncrement += vop_time_increment_resolution; unsigned const MILLION = 1000000; double usIncrement = vop_time_increment_resolution == 0 ? 0.0 : ((double)timeIncrement*MILLION)/vop_time_increment_resolution; unsigned secondsToSubtract = (unsigned)(usIncrement/MILLION); unsigned uSecondsToSubtract = ((unsigned)usIncrement)%MILLION; presentationTime = fLastNonBFramePresentationTime; if ((unsigned)presentationTime.tv_usec < uSecondsToSubtract) { presentationTime.tv_usec += MILLION; if (presentationTime.tv_sec > 0) --presentationTime.tv_sec; } presentationTime.tv_usec -= uSecondsToSubtract; if ((unsigned)presentationTime.tv_sec > secondsToSubtract) { presentationTime.tv_sec -= secondsToSubtract; } else { presentationTime.tv_sec = presentationTime.tv_usec = 0; } } else { fLastNonBFramePresentationTime = presentationTime; fLastNonBFrameVop_time_increment = vop_time_increment; } } } } // Complete delivery to the client: fFrameSize = frameSize; fNumTruncatedBytes = numTruncatedBytes; fPresentationTime = presentationTime; fDurationInMicroseconds = durationInMicroseconds; afterGetting(this); } Boolean MPEG4VideoStreamDiscreteFramer::getNextFrameBit(u_int8_t& result) { if (fNumBitsSeenSoFar/8 >= fNumConfigBytes) return False; u_int8_t nextByte = fConfigBytes[fNumBitsSeenSoFar/8]; result = (nextByte>>(7-fNumBitsSeenSoFar%8))&1; ++fNumBitsSeenSoFar; return True; } Boolean MPEG4VideoStreamDiscreteFramer::getNextFrameBits(unsigned numBits, u_int32_t& result) { result = 0; for (unsigned i = 0; i < numBits; ++i) { u_int8_t nextBit; if (!getNextFrameBit(nextBit)) return False; result = (result<<1)|nextBit; } return True; } void MPEG4VideoStreamDiscreteFramer::analyzeVOLHeader() { // Begin by moving to the VOL header: unsigned i; for (i = 3; i < fNumConfigBytes; ++i) { if (fConfigBytes[i] >= 0x20 && fConfigBytes[i] <= 0x2F && fConfigBytes[i-1] == 1 && fConfigBytes[i-2] == 0 && fConfigBytes[i-3] == 0) { ++i; break; } } fNumBitsSeenSoFar = 8*i + 9; do { u_int8_t is_object_layer_identifier; if (!getNextFrameBit(is_object_layer_identifier)) break; if (is_object_layer_identifier) fNumBitsSeenSoFar += 7; u_int32_t aspect_ratio_info; if (!getNextFrameBits(4, aspect_ratio_info)) break; if (aspect_ratio_info == 15 /*extended_PAR*/) fNumBitsSeenSoFar += 16; u_int8_t vol_control_parameters; if (!getNextFrameBit(vol_control_parameters)) break; if (vol_control_parameters) { fNumBitsSeenSoFar += 3; // chroma_format; low_delay u_int8_t vbw_parameters; if (!getNextFrameBit(vbw_parameters)) break; if (vbw_parameters) fNumBitsSeenSoFar += 79; } fNumBitsSeenSoFar += 2; // video_object_layer_shape u_int8_t marker_bit; if (!getNextFrameBit(marker_bit)) break; if (marker_bit != 1) break; // sanity check if (!getNextFrameBits(16, vop_time_increment_resolution)) break; if (vop_time_increment_resolution == 0) break; // shouldn't happen // Compute how many bits are necessary to represent this: fNumVTIRBits = 0; for (unsigned test = vop_time_increment_resolution; test>0; test /= 2) { ++fNumVTIRBits; } } while (0); } live/liveMedia/MPEG2TransportStreamFromPESSource.cpp000444 001751 000000 00000005347 12265042432 022633 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A filter for converting a stream of MPEG PES packets to a MPEG-2 Transport Stream // Implementation #include "MPEG2TransportStreamFromPESSource.hh" #define MAX_PES_PACKET_SIZE (6+65535) MPEG2TransportStreamFromPESSource* MPEG2TransportStreamFromPESSource ::createNew(UsageEnvironment& env, MPEG1or2DemuxedElementaryStream* inputSource) { return new MPEG2TransportStreamFromPESSource(env, inputSource); } MPEG2TransportStreamFromPESSource ::MPEG2TransportStreamFromPESSource(UsageEnvironment& env, MPEG1or2DemuxedElementaryStream* inputSource) : MPEG2TransportStreamMultiplexor(env), fInputSource(inputSource) { fInputBuffer = new unsigned char[MAX_PES_PACKET_SIZE]; } MPEG2TransportStreamFromPESSource::~MPEG2TransportStreamFromPESSource() { Medium::close(fInputSource); delete[] fInputBuffer; } void MPEG2TransportStreamFromPESSource::doStopGettingFrames() { fInputSource->stopGettingFrames(); } void MPEG2TransportStreamFromPESSource ::awaitNewBuffer(unsigned char* /*oldBuffer*/) { fInputSource->getNextFrame(fInputBuffer, MAX_PES_PACKET_SIZE, afterGettingFrame, this, FramedSource::handleClosure, this); } void MPEG2TransportStreamFromPESSource ::afterGettingFrame(void* clientData, unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime, unsigned durationInMicroseconds) { MPEG2TransportStreamFromPESSource* source = (MPEG2TransportStreamFromPESSource*)clientData; source->afterGettingFrame1(frameSize, numTruncatedBytes, presentationTime, durationInMicroseconds); } void MPEG2TransportStreamFromPESSource ::afterGettingFrame1(unsigned frameSize, unsigned /*numTruncatedBytes*/, struct timeval /*presentationTime*/, unsigned /*durationInMicroseconds*/) { if (frameSize < 4) return; handleNewBuffer(fInputBuffer, frameSize, fInputSource->mpegVersion(), fInputSource->lastSeenSCR()); } live/liveMedia/H263plusVideoRTPSource.cpp000444 001751 000000 00000007221 12265042432 020430 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // H.263+ Video RTP Sources // Implementation #include "H263plusVideoRTPSource.hh" H263plusVideoRTPSource* H263plusVideoRTPSource::createNew(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, unsigned rtpTimestampFrequency) { return new H263plusVideoRTPSource(env, RTPgs, rtpPayloadFormat, rtpTimestampFrequency); } H263plusVideoRTPSource ::H263plusVideoRTPSource(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, unsigned rtpTimestampFrequency) : MultiFramedRTPSource(env, RTPgs, rtpPayloadFormat, rtpTimestampFrequency), fNumSpecialHeaders(0), fSpecialHeaderBytesLength(0) { } H263plusVideoRTPSource::~H263plusVideoRTPSource() { } Boolean H263plusVideoRTPSource ::processSpecialHeader(BufferedPacket* packet, unsigned& resultSpecialHeaderSize) { unsigned char* headerStart = packet->data(); unsigned packetSize = packet->dataSize(); // The H.263+ payload header is at least 2 bytes in size. // Extract the known fields from the first 2 bytes: unsigned expectedHeaderSize = 2; if (packetSize < expectedHeaderSize) return False; //unsigned char RR = headerStart[0]>>3; Boolean P = (headerStart[0]&0x4) != 0; Boolean V = (headerStart[0]&0x2) != 0; unsigned char PLEN = ((headerStart[0]&0x1)<<5)|(headerStart[1]>>3); //unsigned char PEBIT = headerStart[1]&0x7; if (V) { // There's an extra VRC byte at the end of the header: ++expectedHeaderSize; if (packetSize < expectedHeaderSize) return False; } if (PLEN > 0) { // There's an extra picture header at the end: expectedHeaderSize += PLEN; if (packetSize < expectedHeaderSize) return False; } fCurrentPacketBeginsFrame = P; if (fCurrentPacketBeginsFrame) { fNumSpecialHeaders = fSpecialHeaderBytesLength = 0; } // Make a copy of the special header bytes, in case a reader // can use them: unsigned bytesAvailable = SPECIAL_HEADER_BUFFER_SIZE - fSpecialHeaderBytesLength - 1; if (expectedHeaderSize <= bytesAvailable) { fSpecialHeaderBytes[fSpecialHeaderBytesLength++] = expectedHeaderSize; for (unsigned i = 0; i < expectedHeaderSize; ++i) { fSpecialHeaderBytes[fSpecialHeaderBytesLength++] = headerStart[i]; } fPacketSizes[fNumSpecialHeaders++] = packetSize; } if (P) { // Prepend two zero bytes to the start of the payload proper. // Hack: Do this by shrinking this special header by 2 bytes: expectedHeaderSize -= 2; headerStart[expectedHeaderSize] = 0; headerStart[expectedHeaderSize+1] = 0; } // The RTP "M" (marker) bit indicates the last fragment of a frame: fCurrentPacketCompletesFrame = packet->rtpMarkerBit(); resultSpecialHeaderSize = expectedHeaderSize; return True; } char const* H263plusVideoRTPSource::MIMEtype() const { return "video/H263-1998"; } live/liveMedia/DVVideoRTPSource.cpp000444 001751 000000 00000004267 12265042432 017422 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // DV Video RTP Sources // Implementation #include "DVVideoRTPSource.hh" DVVideoRTPSource* DVVideoRTPSource::createNew(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, unsigned rtpTimestampFrequency) { return new DVVideoRTPSource(env, RTPgs, rtpPayloadFormat, rtpTimestampFrequency); } DVVideoRTPSource::DVVideoRTPSource(UsageEnvironment& env, Groupsock* rtpGS, unsigned char rtpPayloadFormat, unsigned rtpTimestampFrequency) : MultiFramedRTPSource(env, rtpGS, rtpPayloadFormat, rtpTimestampFrequency) { } DVVideoRTPSource::~DVVideoRTPSource() { } #define DV_DIF_BLOCK_SIZE 80 #define DV_SECTION_HEADER 0x1F Boolean DVVideoRTPSource ::processSpecialHeader(BufferedPacket* packet, unsigned& resultSpecialHeaderSize) { unsigned const packetSize = packet->dataSize(); if (packetSize < DV_DIF_BLOCK_SIZE) return False; // TARFU! u_int8_t const* data = packet->data(); fCurrentPacketBeginsFrame = data[0] == DV_SECTION_HEADER && (data[1]&0xf8) == 0 && data[2] == 0; // thanks to Ben Hutchings // The RTP "M" (marker) bit indicates the last fragment of a frame: fCurrentPacketCompletesFrame = packet->rtpMarkerBit(); // There is no special header resultSpecialHeaderSize = 0; return True; } char const* DVVideoRTPSource::MIMEtype() const { return "video/DV"; } live/liveMedia/DVVideoStreamFramer.cpp000444 001751 000000 00000022676 12265042432 020170 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A filter that parses a DV input stream into DV frames to deliver to the downstream object // Implementation // (Thanks to Ben Hutchings for his help, including a prototype implementation.) #include "DVVideoStreamFramer.hh" #include "GroupsockHelper.hh" ////////// DVVideoStreamFramer implementation ////////// DVVideoStreamFramer::DVVideoStreamFramer(UsageEnvironment& env, FramedSource* inputSource, Boolean sourceIsSeekable, Boolean leavePresentationTimesUnmodified) : FramedFilter(env, inputSource), fLeavePresentationTimesUnmodified(leavePresentationTimesUnmodified), fOurProfile(NULL), fInitialBlocksPresent(False), fSourceIsSeekable(sourceIsSeekable) { fTo = NULL; // hack used when reading "fSavedInitialBlocks" // Use the current wallclock time as the initial 'presentation time': gettimeofday(&fNextFramePresentationTime, NULL); } DVVideoStreamFramer::~DVVideoStreamFramer() { } DVVideoStreamFramer* DVVideoStreamFramer::createNew(UsageEnvironment& env, FramedSource* inputSource, Boolean sourceIsSeekable, Boolean leavePresentationTimesUnmodified) { return new DVVideoStreamFramer(env, inputSource, sourceIsSeekable, leavePresentationTimesUnmodified); } // Define the parameters for the profiles that we understand: struct DVVideoProfile { char const* name; unsigned apt; unsigned sType; unsigned sequenceCount; unsigned channelCount; unsigned dvFrameSize; // in bytes (== sequenceCount*channelCount*(DV_NUM_BLOCKS_PER_SEQUENCE*DV_DIF_BLOCK_SIZE i.e. 12000)) double frameDuration; // duration of the above, in microseconds. (1000000/this == frame rate) }; static DVVideoProfile const profiles[] = { { "SD-VCR/525-60", 0, 0x00, 10, 1, 120000, (1000000*1001)/30000.0 }, { "SD-VCR/625-50", 0, 0x00, 12, 1, 144000, 1000000/25.0 }, { "314M-25/525-60", 1, 0x00, 10, 1, 120000, (1000000*1001)/30000.0 }, { "314M-25/625-50", 1, 0x00, 12, 1, 144000, 1000000/25.0 }, { "314M-50/525-60", 1, 0x04, 10, 2, 240000, (1000000*1001)/30000.0 }, { "314M-50/625-50", 1, 0x04, 12, 2, 288000, 1000000/25.0 }, { "370M/1080-60i", 1, 0x14, 10, 4, 480000, (1000000*1001)/30000.0 }, { "370M/1080-50i", 1, 0x14, 12, 4, 576000, 1000000/25.0 }, { "370M/720-60p", 1, 0x18, 10, 2, 240000, (1000000*1001)/60000.0 }, { "370M/720-50p", 1, 0x18, 12, 2, 288000, 1000000/50.0 }, { NULL, 0, 0, 0, 0, 0, 0.0 } }; char const* DVVideoStreamFramer::profileName() { if (fOurProfile == NULL) getProfile(); return fOurProfile != NULL ? ((DVVideoProfile const*)fOurProfile)->name : NULL; } Boolean DVVideoStreamFramer::getFrameParameters(unsigned& frameSize, double& frameDuration) { if (fOurProfile == NULL) getProfile(); if (fOurProfile == NULL) return False; frameSize = ((DVVideoProfile const*)fOurProfile)->dvFrameSize; frameDuration = ((DVVideoProfile const*)fOurProfile)->frameDuration; return True; } void DVVideoStreamFramer::getProfile() { // To determine the stream's profile, we need to first read a chunk of data that we can parse: fInputSource->getNextFrame(fSavedInitialBlocks, DV_SAVED_INITIAL_BLOCKS_SIZE, afterGettingFrame, this, FramedSource::handleClosure, this); // Handle events until the requested data arrives: envir().taskScheduler().doEventLoop(&fInitialBlocksPresent); } Boolean DVVideoStreamFramer::isDVVideoStreamFramer() const { return True; } void DVVideoStreamFramer::doGetNextFrame() { fFrameSize = 0; // initially, until we deliver data // If we have saved initial blocks (and won't be seeking back to re-read this data), so use this data first. if (fInitialBlocksPresent && !fSourceIsSeekable) { // For simplicity, we require the downstream object's buffer to be >= this data's size: if (fMaxSize < DV_SAVED_INITIAL_BLOCKS_SIZE) { fNumTruncatedBytes = fMaxSize; afterGetting(this); return; } memmove(fTo, fSavedInitialBlocks, DV_SAVED_INITIAL_BLOCKS_SIZE); fFrameSize = DV_SAVED_INITIAL_BLOCKS_SIZE; fTo += DV_SAVED_INITIAL_BLOCKS_SIZE; fInitialBlocksPresent = False; // for the future } // Arrange to read the (rest of the) requested data. // (But first, make sure that we read an integral multiple of the DV block size.) fMaxSize -= fMaxSize%DV_DIF_BLOCK_SIZE; getAndDeliverData(); } #define DV_SMALLEST_POSSIBLE_FRAME_SIZE 120000 void DVVideoStreamFramer::getAndDeliverData() { unsigned const totFrameSize = fOurProfile != NULL ? ((DVVideoProfile const*)fOurProfile)->dvFrameSize : DV_SMALLEST_POSSIBLE_FRAME_SIZE; unsigned totBytesToDeliver = totFrameSize < fMaxSize ? totFrameSize : fMaxSize; unsigned numBytesToRead = totBytesToDeliver - fFrameSize; fInputSource->getNextFrame(fTo, numBytesToRead, afterGettingFrame, this, FramedSource::handleClosure, this); } void DVVideoStreamFramer::afterGettingFrame(void* clientData, unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime, unsigned /*durationInMicroseconds*/) { DVVideoStreamFramer* source = (DVVideoStreamFramer*)clientData; source->afterGettingFrame(frameSize, numTruncatedBytes, presentationTime); } #define DVSectionId(n) ptr[(n)*DV_DIF_BLOCK_SIZE + 0] #define DVData(n,i) ptr[(n)*DV_DIF_BLOCK_SIZE + 3+(i)] #define DV_SECTION_HEADER 0x1F #define DV_PACK_HEADER_10 0x3F #define DV_PACK_HEADER_12 0xBF #define DV_SECTION_VAUX_MIN 0x50 #define DV_SECTION_VAUX_MAX 0x5F #define DV_PACK_VIDEO_SOURCE 60 #ifndef MILLION #define MILLION 1000000 #endif void DVVideoStreamFramer::afterGettingFrame(unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime) { if (fOurProfile == NULL && frameSize >= DV_SAVED_INITIAL_BLOCKS_SIZE) { // (Try to) parse this data enough to figure out its profile. // We assume that the data begins on a (80-byte) block boundary, but not necessarily on a (150-block) sequence boundary. // We therefore scan each 80-byte block, until we find the 6-block header that begins a sequence: u_int8_t const* data = (fTo == NULL) ? fSavedInitialBlocks : fTo; for (u_int8_t const* ptr = data; ptr + 6*DV_DIF_BLOCK_SIZE <= &data[DV_SAVED_INITIAL_BLOCKS_SIZE]; ptr += DV_DIF_BLOCK_SIZE) { // Check whether "ptr" points to an appropriate header: u_int8_t const sectionHeader = DVSectionId(0); u_int8_t const sectionVAUX = DVSectionId(5); u_int8_t const packHeaderNum = DVData(0,0); if (sectionHeader == DV_SECTION_HEADER && (packHeaderNum == DV_PACK_HEADER_10 || packHeaderNum == DV_PACK_HEADER_12) && (sectionVAUX >= DV_SECTION_VAUX_MIN && sectionVAUX <= DV_SECTION_VAUX_MAX)) { // This data begins a sequence; look up the DV profile from this: u_int8_t const apt = DVData(0,1)&0x07; u_int8_t const sType = DVData(5,48)&0x1F; u_int8_t const sequenceCount = (packHeaderNum == DV_PACK_HEADER_10) ? 10 : 12; // Use these three parameters (apt, sType, sequenceCount) to look up the DV profile: for (DVVideoProfile const* profile = profiles; profile->name != NULL; ++profile) { if (profile->apt == apt && profile->sType == sType && profile->sequenceCount == sequenceCount) { fOurProfile = profile; break; } } break; // because we found a correct sequence header (even if we don't happen to define a profile for it) } } } if (fTo != NULL) { // There is a downstream object; complete delivery to it (or read more data, if necessary) unsigned const totFrameSize = fOurProfile != NULL ? ((DVVideoProfile const*)fOurProfile)->dvFrameSize : DV_SMALLEST_POSSIBLE_FRAME_SIZE; fFrameSize += frameSize; fTo += frameSize; fPresentationTime = presentationTime; // by default; may get changed below if (fFrameSize < totFrameSize && fFrameSize < fMaxSize && numTruncatedBytes == 0) { // We have more data to deliver; get it now: getAndDeliverData(); } else { // We're done delivering this DV frame (but check for truncation): fNumTruncatedBytes = totFrameSize - fFrameSize; if (fOurProfile != NULL) { // Also set the presentation time, and increment it for next time, // based on the length of this frame: if (!fLeavePresentationTimesUnmodified) fPresentationTime = fNextFramePresentationTime; DVVideoProfile const* ourProfile =(DVVideoProfile const*)fOurProfile; double durationInMicroseconds = (fFrameSize*ourProfile->frameDuration)/ourProfile->dvFrameSize; fDurationInMicroseconds = (unsigned)durationInMicroseconds; fNextFramePresentationTime.tv_usec += fDurationInMicroseconds; fNextFramePresentationTime.tv_sec += fNextFramePresentationTime.tv_usec/MILLION; fNextFramePresentationTime.tv_usec %= MILLION; } afterGetting(this); } } else { // We read data into our special buffer; signal that it has arrived: fInitialBlocksPresent = True; } } live/liveMedia/RTSPServer.cpp000444 001751 000000 00000263662 12265042432 016340 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A RTSP server // Implementation #include "RTSPServer.hh" #include "RTSPCommon.hh" #include "RTSPRegisterSender.hh" #include "ProxyServerMediaSession.hh" #include "Base64.hh" #include ////////// RTSPServer implementation ////////// RTSPServer* RTSPServer::createNew(UsageEnvironment& env, Port ourPort, UserAuthenticationDatabase* authDatabase, unsigned reclamationTestSeconds) { int ourSocket = setUpOurSocket(env, ourPort); if (ourSocket == -1) return NULL; return new RTSPServer(env, ourSocket, ourPort, authDatabase, reclamationTestSeconds); } Boolean RTSPServer::lookupByName(UsageEnvironment& env, char const* name, RTSPServer*& resultServer) { resultServer = NULL; // unless we succeed Medium* medium; if (!Medium::lookupByName(env, name, medium)) return False; if (!medium->isRTSPServer()) { env.setResultMsg(name, " is not a RTSP server"); return False; } resultServer = (RTSPServer*)medium; return True; } void RTSPServer::addServerMediaSession(ServerMediaSession* serverMediaSession) { if (serverMediaSession == NULL) return; char const* sessionName = serverMediaSession->streamName(); if (sessionName == NULL) sessionName = ""; removeServerMediaSession(sessionName); // in case an existing "ServerMediaSession" with this name already exists fServerMediaSessions->Add(sessionName, (void*)serverMediaSession); } ServerMediaSession* RTSPServer::lookupServerMediaSession(char const* streamName) { return (ServerMediaSession*)(fServerMediaSessions->Lookup(streamName)); } void RTSPServer::removeServerMediaSession(ServerMediaSession* serverMediaSession) { if (serverMediaSession == NULL) return; fServerMediaSessions->Remove(serverMediaSession->streamName()); if (serverMediaSession->referenceCount() == 0) { Medium::close(serverMediaSession); } else { serverMediaSession->deleteWhenUnreferenced() = True; } } void RTSPServer::removeServerMediaSession(char const* streamName) { removeServerMediaSession((ServerMediaSession*)(fServerMediaSessions->Lookup(streamName))); } void RTSPServer::closeAllClientSessionsForServerMediaSession(ServerMediaSession* serverMediaSession) { if (serverMediaSession == NULL) return; HashTable::Iterator* iter = HashTable::Iterator::create(*fClientSessions); RTSPServer::RTSPClientSession* clientSession; char const* key; // dummy while ((clientSession = (RTSPServer::RTSPClientSession*)(iter->next(key))) != NULL) { if (clientSession->fOurServerMediaSession == serverMediaSession) { delete clientSession; } } delete iter; } void RTSPServer::closeAllClientSessionsForServerMediaSession(char const* streamName) { closeAllClientSessionsForServerMediaSession((ServerMediaSession*)(fServerMediaSessions->Lookup(streamName))); } void RTSPServer::deleteServerMediaSession(ServerMediaSession* serverMediaSession) { if (serverMediaSession == NULL) return; closeAllClientSessionsForServerMediaSession(serverMediaSession); removeServerMediaSession(serverMediaSession); } void RTSPServer::deleteServerMediaSession(char const* streamName) { deleteServerMediaSession((ServerMediaSession*)(fServerMediaSessions->Lookup(streamName))); } void rtspRegisterResponseHandler(RTSPClient* rtspClient, int resultCode, char* resultString); // forward // A class that represents the state of a "REGISTER" request in progress: class RegisterRequestRecord: public RTSPRegisterSender { public: RegisterRequestRecord(RTSPServer& ourServer, unsigned requestId, char const* remoteClientNameOrAddress, portNumBits remoteClientPortNum, char const* rtspURLToRegister, RTSPServer::responseHandlerForREGISTER* responseHandler, Authenticator* authenticator, Boolean requestStreamingViaTCP, char const* proxyURLSuffix) : RTSPRegisterSender(ourServer.envir(), remoteClientNameOrAddress, remoteClientPortNum, rtspURLToRegister, rtspRegisterResponseHandler, authenticator, requestStreamingViaTCP, proxyURLSuffix, True/*reuseConnection*/, #ifdef DEBUG 1/*verbosityLevel*/, #else 0/*verbosityLevel*/, #endif NULL), fOurServer(ourServer), fRequestId(requestId), fResponseHandler(responseHandler) { // Add ourself to our server's 'pending REGISTER requests' table: ourServer.fPendingRegisterRequests->Add((char const*)this, this); } virtual ~RegisterRequestRecord(){ // Remove ourself from the server's 'pending REGISTER requests' hash table before we go: fOurServer.fPendingRegisterRequests->Remove((char const*)this); } void handleResponse(int resultCode, char* resultString) { if (resultCode == 0) { // The "REGISTER" request succeeded, so use the still-open RTSP socket to await incoming commands from the remote endpoint: int sock; struct sockaddr_in remoteAddress; grabConnection(sock, remoteAddress); if (sock >= 0) (void)fOurServer.createNewClientConnection(sock, remoteAddress); } if (fResponseHandler != NULL) { // Call our (REGISTER-specific) response handler now: (*fResponseHandler)(&fOurServer, fRequestId, resultCode, resultString); } else { // We need to delete[] "resultString" before we leave: delete[] resultString; } // We're completely done with the REGISTER command now, so delete ourself now: delete this; } private: RTSPServer& fOurServer; unsigned fRequestId; RTSPServer::responseHandlerForREGISTER* fResponseHandler; }; void rtspRegisterResponseHandler(RTSPClient* rtspClient, int resultCode, char* resultString) { RegisterRequestRecord* registerRequestRecord = (RegisterRequestRecord*)rtspClient; registerRequestRecord->handleResponse(resultCode, resultString); } unsigned RTSPServer::registerStream(ServerMediaSession* serverMediaSession, char const* remoteClientNameOrAddress, portNumBits remoteClientPortNum, responseHandlerForREGISTER* responseHandler, char const* username, char const* password, Boolean receiveOurStreamViaTCP, char const* proxyURLSuffix) { // Create a new "RegisterRequestRecord" that will send the "REGISTER" command. // (This object will automatically get deleted after we get a response to the "REGISTER" command, or if we're deleted.) Authenticator* authenticator = NULL; if (username != NULL) { if (password == NULL) password = ""; authenticator = new Authenticator(username, password); } unsigned requestId = ++fRegisterRequestCounter; new RegisterRequestRecord(*this, requestId, remoteClientNameOrAddress, remoteClientPortNum, rtspURL(serverMediaSession), responseHandler, authenticator, receiveOurStreamViaTCP, proxyURLSuffix); delete authenticator; // we can do this here because it was copied to the "RegisterRequestRecord" return requestId; } char* RTSPServer ::rtspURL(ServerMediaSession const* serverMediaSession, int clientSocket) const { char* urlPrefix = rtspURLPrefix(clientSocket); char const* sessionName = serverMediaSession->streamName(); char* resultURL = new char[strlen(urlPrefix) + strlen(sessionName) + 1]; sprintf(resultURL, "%s%s", urlPrefix, sessionName); delete[] urlPrefix; return resultURL; } char* RTSPServer::rtspURLPrefix(int clientSocket) const { struct sockaddr_in ourAddress; if (clientSocket < 0) { // Use our default IP address in the URL: ourAddress.sin_addr.s_addr = ReceivingInterfaceAddr != 0 ? ReceivingInterfaceAddr : ourIPAddress(envir()); // hack } else { SOCKLEN_T namelen = sizeof ourAddress; getsockname(clientSocket, (struct sockaddr*)&ourAddress, &namelen); } char urlBuffer[100]; // more than big enough for "rtsp://:/" portNumBits portNumHostOrder = ntohs(fRTSPServerPort.num()); if (portNumHostOrder == 554 /* the default port number */) { sprintf(urlBuffer, "rtsp://%s/", AddressString(ourAddress).val()); } else { sprintf(urlBuffer, "rtsp://%s:%hu/", AddressString(ourAddress).val(), portNumHostOrder); } return strDup(urlBuffer); } UserAuthenticationDatabase* RTSPServer::setAuthenticationDatabase(UserAuthenticationDatabase* newDB) { UserAuthenticationDatabase* oldDB = fAuthDB; fAuthDB = newDB; return oldDB; } Boolean RTSPServer::setUpTunnelingOverHTTP(Port httpPort) { fHTTPServerSocket = setUpOurSocket(envir(), httpPort); if (fHTTPServerSocket >= 0) { fHTTPServerPort = httpPort; envir().taskScheduler().turnOnBackgroundReadHandling(fHTTPServerSocket, (TaskScheduler::BackgroundHandlerProc*)&incomingConnectionHandlerHTTP, this); return True; } return False; } portNumBits RTSPServer::httpServerPortNum() const { return ntohs(fHTTPServerPort.num()); } #define LISTEN_BACKLOG_SIZE 20 int RTSPServer::setUpOurSocket(UsageEnvironment& env, Port& ourPort) { int ourSocket = -1; do { // The following statement is enabled by default. // Don't disable it (by defining ALLOW_RTSP_SERVER_PORT_REUSE) unless you know what you're doing. #ifndef ALLOW_RTSP_SERVER_PORT_REUSE NoReuse dummy(env); // Don't use this socket if there's already a local server using it #endif ourSocket = setupStreamSocket(env, ourPort); if (ourSocket < 0) break; // Make sure we have a big send buffer: if (!increaseSendBufferTo(env, ourSocket, 50*1024)) break; // Allow multiple simultaneous connections: if (listen(ourSocket, LISTEN_BACKLOG_SIZE) < 0) { env.setResultErrMsg("listen() failed: "); break; } if (ourPort.num() == 0) { // bind() will have chosen a port for us; return it also: if (!getSourcePort(env, ourSocket, ourPort)) break; } return ourSocket; } while (0); if (ourSocket != -1) ::closeSocket(ourSocket); return -1; } char const* RTSPServer::allowedCommandNames() { return "OPTIONS, DESCRIBE, SETUP, TEARDOWN, PLAY, PAUSE, GET_PARAMETER, SET_PARAMETER"; } Boolean RTSPServer::weImplementREGISTER(char const* /*proxyURLSuffix*/, char*& responseStr) { // By default, servers do not implement our custom "REGISTER" command: responseStr = NULL; return False; } void RTSPServer::implementCmd_REGISTER(char const* /*url*/, char const* /*urlSuffix*/, int /*socketToRemoteServer*/, Boolean /*deliverViaTCP*/, char const* /*proxyURLSuffix*/) { // By default, this function is a 'noop' } UserAuthenticationDatabase* RTSPServer::getAuthenticationDatabaseForCommand(char const* /*cmdName*/) { // default implementation return fAuthDB; } Boolean RTSPServer::specialClientAccessCheck(int /*clientSocket*/, struct sockaddr_in& /*clientAddr*/, char const* /*urlSuffix*/) { // default implementation return True; } Boolean RTSPServer::specialClientUserAccessCheck(int /*clientSocket*/, struct sockaddr_in& /*clientAddr*/, char const* /*urlSuffix*/, char const * /*username*/) { // default implementation; no further access restrictions: return True; } RTSPServer::RTSPServer(UsageEnvironment& env, int ourSocket, Port ourPort, UserAuthenticationDatabase* authDatabase, unsigned reclamationTestSeconds) : Medium(env), fRTSPServerPort(ourPort), fRTSPServerSocket(ourSocket), fHTTPServerSocket(-1), fHTTPServerPort(0), fServerMediaSessions(HashTable::create(STRING_HASH_KEYS)), fClientConnections(HashTable::create(ONE_WORD_HASH_KEYS)), fClientConnectionsForHTTPTunneling(NULL), // will get created if needed fClientSessions(HashTable::create(STRING_HASH_KEYS)), fPendingRegisterRequests(HashTable::create(ONE_WORD_HASH_KEYS)), fRegisterRequestCounter(0), fAuthDB(authDatabase), fReclamationTestSeconds(reclamationTestSeconds) { ignoreSigPipeOnSocket(ourSocket); // so that clients on the same host that are killed don't also kill us // Arrange to handle connections from others: env.taskScheduler().turnOnBackgroundReadHandling(fRTSPServerSocket, (TaskScheduler::BackgroundHandlerProc*)&incomingConnectionHandlerRTSP, this); } RTSPServer::~RTSPServer() { // Turn off background read handling: envir().taskScheduler().turnOffBackgroundReadHandling(fRTSPServerSocket); ::closeSocket(fRTSPServerSocket); envir().taskScheduler().turnOffBackgroundReadHandling(fHTTPServerSocket); ::closeSocket(fHTTPServerSocket); // Close all client connection objects: RTSPServer::RTSPClientConnection* connection; while ((connection = (RTSPServer::RTSPClientConnection*)fClientConnections->getFirst()) != NULL) { delete connection; } delete fClientConnections; delete fClientConnectionsForHTTPTunneling; // all content was already removed as a result of the loop above // Close all client session objects: RTSPServer::RTSPClientSession* clientSession; while ((clientSession = (RTSPServer::RTSPClientSession*)fClientSessions->getFirst()) != NULL) { delete clientSession; } delete fClientSessions; // Delete all server media sessions ServerMediaSession* serverMediaSession; while ((serverMediaSession = (ServerMediaSession*)fServerMediaSessions->getFirst()) != NULL) { removeServerMediaSession(serverMediaSession); // will delete it, because it no longer has any 'client session' objects using it } delete fServerMediaSessions; // Delete any pending REGISTER requests: RegisterRequestRecord* registerRequest; while ((registerRequest = (RegisterRequestRecord*)fPendingRegisterRequests->getFirst()) != NULL) { delete registerRequest; } delete fPendingRegisterRequests; } Boolean RTSPServer::isRTSPServer() const { return True; } void RTSPServer::incomingConnectionHandlerRTSP(void* instance, int /*mask*/) { RTSPServer* server = (RTSPServer*)instance; server->incomingConnectionHandlerRTSP1(); } void RTSPServer::incomingConnectionHandlerRTSP1() { incomingConnectionHandler(fRTSPServerSocket); } void RTSPServer::incomingConnectionHandlerHTTP(void* instance, int /*mask*/) { RTSPServer* server = (RTSPServer*)instance; server->incomingConnectionHandlerHTTP1(); } void RTSPServer::incomingConnectionHandlerHTTP1() { incomingConnectionHandler(fHTTPServerSocket); } void RTSPServer::incomingConnectionHandler(int serverSocket) { struct sockaddr_in clientAddr; SOCKLEN_T clientAddrLen = sizeof clientAddr; int clientSocket = accept(serverSocket, (struct sockaddr*)&clientAddr, &clientAddrLen); if (clientSocket < 0) { int err = envir().getErrno(); if (err != EWOULDBLOCK) { envir().setResultErrMsg("accept() failed: "); } return; } makeSocketNonBlocking(clientSocket); increaseSendBufferTo(envir(), clientSocket, 50*1024); #ifdef DEBUG envir() << "accept()ed connection from " << AddressString(clientAddr).val() << "\n"; #endif // Create a new object for handling this RTSP connection: (void)createNewClientConnection(clientSocket, clientAddr); } ////////// RTSPServer::RTSPClientConnection implementation ////////// RTSPServer::RTSPClientConnection ::RTSPClientConnection(RTSPServer& ourServer, int clientSocket, struct sockaddr_in clientAddr) : fOurServer(ourServer), fIsActive(True), fClientInputSocket(clientSocket), fClientOutputSocket(clientSocket), fClientAddr(clientAddr), fRecursionCount(0), fOurSessionCookie(NULL) { // Add ourself to our 'client connections' table: fOurServer.fClientConnections->Add((char const*)this, this); // Arrange to handle incoming requests: resetRequestBuffer(); envir().taskScheduler().setBackgroundHandling(fClientInputSocket, SOCKET_READABLE|SOCKET_EXCEPTION, (TaskScheduler::BackgroundHandlerProc*)&incomingRequestHandler, this); } RTSPServer::RTSPClientConnection::~RTSPClientConnection() { // Remove ourself from the server's 'client connections' hash table before we go: fOurServer.fClientConnections->Remove((char const*)this); if (fOurSessionCookie != NULL) { // We were being used for RTSP-over-HTTP tunneling. Also remove ourselves from the 'session cookie' hash table before we go: fOurServer.fClientConnectionsForHTTPTunneling->Remove(fOurSessionCookie); delete[] fOurSessionCookie; } closeSockets(); } // Special mechanism for handling our custom "REGISTER" command: RTSPServer::RTSPClientConnection::ParamsForREGISTER ::ParamsForREGISTER(RTSPServer::RTSPClientConnection* ourConnection, char const* url, char const* urlSuffix, Boolean reuseConnection, Boolean deliverViaTCP, char const* proxyURLSuffix) : fOurConnection(ourConnection), fURL(strDup(url)), fURLSuffix(strDup(urlSuffix)), fReuseConnection(reuseConnection), fDeliverViaTCP(deliverViaTCP), fProxyURLSuffix(strDup(proxyURLSuffix)) { } RTSPServer::RTSPClientConnection::ParamsForREGISTER::~ParamsForREGISTER() { delete[] fURL; delete[] fURLSuffix; delete[] fProxyURLSuffix; } // Handler routines for specific RTSP commands: void RTSPServer::RTSPClientConnection::handleCmd_OPTIONS() { snprintf((char*)fResponseBuffer, sizeof fResponseBuffer, "RTSP/1.0 200 OK\r\nCSeq: %s\r\n%sPublic: %s\r\n\r\n", fCurrentCSeq, dateHeader(), fOurServer.allowedCommandNames()); } void RTSPServer::RTSPClientConnection ::handleCmd_GET_PARAMETER(char const* /*fullRequestStr*/) { // By default, we implement "GET_PARAMETER" (on the entire server) just as a 'no op', and send back a dummy response. // (If you want to handle this type of "GET_PARAMETER" differently, you can do so by defining a subclass of "RTSPServer" // and "RTSPServer::RTSPClientConnection", and then reimplement this virtual function in your subclass.) setRTSPResponse("200 OK", LIVEMEDIA_LIBRARY_VERSION_STRING); } void RTSPServer::RTSPClientConnection ::handleCmd_SET_PARAMETER(char const* /*fullRequestStr*/) { // By default, we implement "SET_PARAMETER" (on the entire server) just as a 'no op', and send back an empty response. // (If you want to handle this type of "SET_PARAMETER" differently, you can do so by defining a subclass of "RTSPServer" // and "RTSPServer::RTSPClientConnection", and then reimplement this virtual function in your subclass.) setRTSPResponse("200 OK"); } void RTSPServer::RTSPClientConnection ::handleCmd_DESCRIBE(char const* urlPreSuffix, char const* urlSuffix, char const* fullRequestStr) { char* sdpDescription = NULL; char* rtspURL = NULL; do { char urlTotalSuffix[RTSP_PARAM_STRING_MAX]; if (strlen(urlPreSuffix) + strlen(urlSuffix) + 2 > sizeof urlTotalSuffix) { handleCmd_bad(); break; } urlTotalSuffix[0] = '\0'; if (urlPreSuffix[0] != '\0') { strcat(urlTotalSuffix, urlPreSuffix); strcat(urlTotalSuffix, "/"); } strcat(urlTotalSuffix, urlSuffix); if (!authenticationOK("DESCRIBE", urlTotalSuffix, fullRequestStr)) break; // We should really check that the request contains an "Accept:" ##### // for "application/sdp", because that's what we're sending back ##### // Begin by looking up the "ServerMediaSession" object for the specified "urlTotalSuffix": ServerMediaSession* session = fOurServer.lookupServerMediaSession(urlTotalSuffix); if (session == NULL) { handleCmd_notFound(); break; } // Then, assemble a SDP description for this session: sdpDescription = session->generateSDPDescription(); if (sdpDescription == NULL) { // This usually means that a file name that was specified for a // "ServerMediaSubsession" does not exist. setRTSPResponse("404 File Not Found, Or In Incorrect Format"); break; } unsigned sdpDescriptionSize = strlen(sdpDescription); // Also, generate our RTSP URL, for the "Content-Base:" header // (which is necessary to ensure that the correct URL gets used in subsequent "SETUP" requests). rtspURL = fOurServer.rtspURL(session, fClientInputSocket); snprintf((char*)fResponseBuffer, sizeof fResponseBuffer, "RTSP/1.0 200 OK\r\nCSeq: %s\r\n" "%s" "Content-Base: %s/\r\n" "Content-Type: application/sdp\r\n" "Content-Length: %d\r\n\r\n" "%s", fCurrentCSeq, dateHeader(), rtspURL, sdpDescriptionSize, sdpDescription); } while (0); delete[] sdpDescription; delete[] rtspURL; } static void lookForHeader(char const* headerName, char const* source, unsigned sourceLen, char* resultStr, unsigned resultMaxSize) { resultStr[0] = '\0'; // by default, return an empty string unsigned headerNameLen = strlen(headerName); for (int i = 0; i < (int)(sourceLen-headerNameLen); ++i) { if (strncmp(&source[i], headerName, headerNameLen) == 0 && source[i+headerNameLen] == ':') { // We found the header. Skip over any whitespace, then copy the rest of the line to "resultStr": for (i += headerNameLen+1; i < (int)sourceLen && (source[i] == ' ' || source[i] == '\t'); ++i) {} for (unsigned j = i; j < sourceLen; ++j) { if (source[j] == '\r' || source[j] == '\n') { // We've found the end of the line. Copy it to the result (if it will fit): if (j-i+1 > resultMaxSize) break; char const* resultSource = &source[i]; char const* resultSourceEnd = &source[j]; while (resultSource < resultSourceEnd) *resultStr++ = *resultSource++; *resultStr = '\0'; break; } } } } } void RTSPServer ::RTSPClientConnection::handleCmd_REGISTER(char const* url, char const* urlSuffix, char const* fullRequestStr, Boolean reuseConnection, Boolean deliverViaTCP, char const* proxyURLSuffix) { char* responseStr; if (fOurServer.weImplementREGISTER(proxyURLSuffix, responseStr)) { // The "REGISTER" command - if we implement it - may require access control: if (!authenticationOK("REGISTER", urlSuffix, fullRequestStr)) return; // We implement the "REGISTER" command by first replying to it, then actually handling it // (in a separate event-loop task, that will get called after the reply has been done): setRTSPResponse(responseStr == NULL ? "200 OK" : responseStr); delete[] responseStr; ParamsForREGISTER* registerParams = new ParamsForREGISTER(this, url, urlSuffix, reuseConnection, deliverViaTCP, proxyURLSuffix); envir().taskScheduler().scheduleDelayedTask(0, (TaskFunc*)continueHandlingREGISTER, registerParams); } else if (responseStr != NULL) { setRTSPResponse(responseStr); delete[] responseStr; } else { handleCmd_notSupported(); } } void RTSPServer::RTSPClientConnection::handleCmd_bad() { // Don't do anything with "fCurrentCSeq", because it might be nonsense snprintf((char*)fResponseBuffer, sizeof fResponseBuffer, "RTSP/1.0 400 Bad Request\r\n%sAllow: %s\r\n\r\n", dateHeader(), fOurServer.allowedCommandNames()); } void RTSPServer::RTSPClientConnection::handleCmd_notSupported() { snprintf((char*)fResponseBuffer, sizeof fResponseBuffer, "RTSP/1.0 405 Method Not Allowed\r\nCSeq: %s\r\n%sAllow: %s\r\n\r\n", fCurrentCSeq, dateHeader(), fOurServer.allowedCommandNames()); } void RTSPServer::RTSPClientConnection::handleCmd_notFound() { setRTSPResponse("404 Stream Not Found"); } void RTSPServer::RTSPClientConnection::handleCmd_sessionNotFound() { setRTSPResponse("454 Session Not Found"); } void RTSPServer::RTSPClientConnection::handleCmd_unsupportedTransport() { setRTSPResponse("461 Unsupported Transport"); } Boolean RTSPServer::RTSPClientConnection::parseHTTPRequestString(char* resultCmdName, unsigned resultCmdNameMaxSize, char* urlSuffix, unsigned urlSuffixMaxSize, char* sessionCookie, unsigned sessionCookieMaxSize, char* acceptStr, unsigned acceptStrMaxSize) { // Check for the limited HTTP requests that we expect for specifying RTSP-over-HTTP tunneling. // This parser is currently rather dumb; it should be made smarter ##### char const* reqStr = (char const*)fRequestBuffer; unsigned const reqStrSize = fRequestBytesAlreadySeen; // Read everything up to the first space as the command name: Boolean parseSucceeded = False; unsigned i; for (i = 0; i < resultCmdNameMaxSize-1 && i < reqStrSize; ++i) { char c = reqStr[i]; if (c == ' ' || c == '\t') { parseSucceeded = True; break; } resultCmdName[i] = c; } resultCmdName[i] = '\0'; if (!parseSucceeded) return False; // Look for the string "HTTP/", before the first \r or \n: parseSucceeded = False; for (; i < reqStrSize-5 && reqStr[i] != '\r' && reqStr[i] != '\n'; ++i) { if (reqStr[i] == 'H' && reqStr[i+1] == 'T' && reqStr[i+2]== 'T' && reqStr[i+3]== 'P' && reqStr[i+4]== '/') { i += 5; // to advance past the "HTTP/" parseSucceeded = True; break; } } if (!parseSucceeded) return False; // Get the 'URL suffix' that occurred before this: unsigned k = i-6; while (k > 0 && reqStr[k] == ' ') --k; // back up over white space unsigned j = k; while (j > 0 && reqStr[j] != ' ' && reqStr[j] != '/') --j; // The URL suffix is in position (j,k]: if (k - j + 1 > urlSuffixMaxSize) return False; // there's no room> unsigned n = 0; while (++j <= k) urlSuffix[n++] = reqStr[j]; urlSuffix[n] = '\0'; // Look for various headers that we're interested in: lookForHeader("x-sessioncookie", &reqStr[i], reqStrSize-i, sessionCookie, sessionCookieMaxSize); lookForHeader("Accept", &reqStr[i], reqStrSize-i, acceptStr, acceptStrMaxSize); return True; } void RTSPServer::RTSPClientConnection::handleHTTPCmd_notSupported() { snprintf((char*)fResponseBuffer, sizeof fResponseBuffer, "HTTP/1.1 405 Method Not Allowed\r\n%s\r\n\r\n", dateHeader()); } void RTSPServer::RTSPClientConnection::handleHTTPCmd_notFound() { snprintf((char*)fResponseBuffer, sizeof fResponseBuffer, "HTTP/1.1 404 Not Found\r\n%s\r\n\r\n", dateHeader()); } void RTSPServer::RTSPClientConnection::handleHTTPCmd_OPTIONS() { #ifdef DEBUG fprintf(stderr, "Handled HTTP \"OPTIONS\" request\n"); #endif // Construct a response to the "OPTIONS" command that notes that our special headers (for RTSP-over-HTTP tunneling) are allowed: snprintf((char*)fResponseBuffer, sizeof fResponseBuffer, "HTTP/1.1 200 OK\r\n" "%s" "Access-Control-Allow-Origin: *\r\n" "Access-Control-Allow-Methods: POST, GET, OPTIONS\r\n" "Access-Control-Allow-Headers: x-sessioncookie, Pragma, Cache-Control\r\n" "Access-Control-Max-Age: 1728000\r\n" "\r\n", dateHeader()); } void RTSPServer::RTSPClientConnection::handleHTTPCmd_TunnelingGET(char const* sessionCookie) { // Record ourself as having this 'session cookie', so that a subsequent HTTP "POST" command (with the same 'session cookie') // can find us: if (fOurServer.fClientConnectionsForHTTPTunneling == NULL) { fOurServer.fClientConnectionsForHTTPTunneling = HashTable::create(STRING_HASH_KEYS); } delete[] fOurSessionCookie; fOurSessionCookie = strDup(sessionCookie); fOurServer.fClientConnectionsForHTTPTunneling->Add(sessionCookie, (void*)this); #ifdef DEBUG fprintf(stderr, "Handled HTTP \"GET\" request (client output socket: %d)\n", fClientOutputSocket); #endif // Construct our response: snprintf((char*)fResponseBuffer, sizeof fResponseBuffer, "HTTP/1.1 200 OK\r\n" "%s" "Cache-Control: no-cache\r\n" "Pragma: no-cache\r\n" "Content-Type: application/x-rtsp-tunnelled\r\n" "\r\n", dateHeader()); } Boolean RTSPServer::RTSPClientConnection ::handleHTTPCmd_TunnelingPOST(char const* sessionCookie, unsigned char const* extraData, unsigned extraDataSize) { // Use the "sessionCookie" string to look up the separate "RTSPClientConnection" object that should have been used to handle // an earlier HTTP "GET" request: if (fOurServer.fClientConnectionsForHTTPTunneling == NULL) { fOurServer.fClientConnectionsForHTTPTunneling = HashTable::create(STRING_HASH_KEYS); } RTSPServer::RTSPClientConnection* prevClientConnection = (RTSPServer::RTSPClientConnection*)(fOurServer.fClientConnectionsForHTTPTunneling->Lookup(sessionCookie)); if (prevClientConnection == NULL) { // There was no previous HTTP "GET" request; treat this "POST" request as bad: handleHTTPCmd_notSupported(); fIsActive = False; // triggers deletion of ourself return False; } #ifdef DEBUG fprintf(stderr, "Handled HTTP \"POST\" request (client input socket: %d)\n", fClientInputSocket); #endif // Change the previous "RTSPClientSession" object's input socket to ours. It will be used for subsequent requests: prevClientConnection->changeClientInputSocket(fClientInputSocket, extraData, extraDataSize); fClientInputSocket = fClientOutputSocket = -1; // so the socket doesn't get closed when we get deleted return True; } void RTSPServer::RTSPClientConnection::handleHTTPCmd_StreamingGET(char const* /*urlSuffix*/, char const* /*fullRequestStr*/) { // By default, we don't support requests to access streams via HTTP: handleHTTPCmd_notSupported(); } void RTSPServer::RTSPClientConnection::resetRequestBuffer() { fRequestBytesAlreadySeen = 0; fRequestBufferBytesLeft = sizeof fRequestBuffer; fLastCRLF = &fRequestBuffer[-3]; // hack: Ensures that we don't think we have end-of-msg if the data starts with fBase64RemainderCount = 0; } void RTSPServer::RTSPClientConnection::closeSockets() { // Turn off background handling on our input socket (and output socket, if different); then close it (or them): if (fClientOutputSocket != fClientInputSocket) { envir().taskScheduler().disableBackgroundHandling(fClientOutputSocket); ::closeSocket(fClientOutputSocket); } envir().taskScheduler().disableBackgroundHandling(fClientInputSocket); ::closeSocket(fClientInputSocket); fClientInputSocket = fClientOutputSocket = -1; } void RTSPServer::RTSPClientConnection::incomingRequestHandler(void* instance, int /*mask*/) { RTSPClientConnection* session = (RTSPClientConnection*)instance; session->incomingRequestHandler1(); } void RTSPServer::RTSPClientConnection::incomingRequestHandler1() { struct sockaddr_in dummy; // 'from' address, meaningless in this case int bytesRead = readSocket(envir(), fClientInputSocket, &fRequestBuffer[fRequestBytesAlreadySeen], fRequestBufferBytesLeft, dummy); handleRequestBytes(bytesRead); } void RTSPServer::RTSPClientConnection::handleAlternativeRequestByte(void* instance, u_int8_t requestByte) { RTSPClientConnection* session = (RTSPClientConnection*)instance; session->handleAlternativeRequestByte1(requestByte); } void RTSPServer::RTSPClientConnection::handleAlternativeRequestByte1(u_int8_t requestByte) { if (requestByte == 0xFF) { // Hack: The new handler of the input TCP socket encountered an error reading it. Indicate this: handleRequestBytes(-1); } else if (requestByte == 0xFE) { // Another hack: The new handler of the input TCP socket no longer needs it, so take back control of it: envir().taskScheduler().setBackgroundHandling(fClientInputSocket, SOCKET_READABLE|SOCKET_EXCEPTION, (TaskScheduler::BackgroundHandlerProc*)&incomingRequestHandler, this); } else { // Normal case: Add this character to our buffer; then try to handle the data that we have buffered so far: if (fRequestBufferBytesLeft == 0 || fRequestBytesAlreadySeen >= RTSP_BUFFER_SIZE) return; fRequestBuffer[fRequestBytesAlreadySeen] = requestByte; handleRequestBytes(1); } } // A special version of "parseTransportHeader()", used just for parsing the "Transport:" header in an incoming "REGISTER" command: static void parseTransportHeaderForREGISTER(char const* buf, Boolean &reuseConnection, Boolean& deliverViaTCP, char*& proxyURLSuffix) { // Initialize the result parameters to default values: reuseConnection = False; deliverViaTCP = False; proxyURLSuffix = NULL; // First, find "Transport:" while (1) { if (*buf == '\0') return; // not found if (*buf == '\r' && *(buf+1) == '\n' && *(buf+2) == '\r') return; // end of the headers => not found if (_strncasecmp(buf, "Transport:", 10) == 0) break; ++buf; } int reuseConnectionNum; // Then, run through each of the fields, looking for ones we handle: char const* fields = buf + 10; while (*fields == ' ') ++fields; char* field = strDupSize(fields); while (sscanf(fields, "%[^;\r\n]", field) == 1) { if (sscanf(field, "reuse_connection = %d", &reuseConnectionNum) == 1) { reuseConnection = reuseConnectionNum != 0; } else if (_strncasecmp(field, "preferred_delivery_protocol=udp", 31) == 0) { deliverViaTCP = False; } else if (_strncasecmp(field, "preferred_delivery_protocol=interleaved", 39) == 0) { deliverViaTCP = True; } else if (_strncasecmp(field, "proxy_url_suffix=", 17) == 0) { delete[] proxyURLSuffix; proxyURLSuffix = strDup(field+17); } fields += strlen(field); while (*fields == ';' || *fields == ' ' || *fields == '\t') ++fields; // skip over separating ';' chars or whitespace if (*fields == '\0' || *fields == '\r' || *fields == '\n') break; } delete[] field; } void RTSPServer::RTSPClientConnection::handleRequestBytes(int newBytesRead) { int numBytesRemaining = 0; ++fRecursionCount; do { RTSPServer::RTSPClientSession* clientSession = NULL; if (newBytesRead < 0 || (unsigned)newBytesRead >= fRequestBufferBytesLeft) { // Either the client socket has died, or the request was too big for us. // Terminate this connection: #ifdef DEBUG fprintf(stderr, "RTSPClientConnection[%p]::handleRequestBytes() read %d new bytes (of %d); terminating connection!\n", this, newBytesRead, fRequestBufferBytesLeft); #endif fIsActive = False; break; } Boolean endOfMsg = False; unsigned char* ptr = &fRequestBuffer[fRequestBytesAlreadySeen]; #ifdef DEBUG ptr[newBytesRead] = '\0'; fprintf(stderr, "RTSPClientConnection[%p]::handleRequestBytes() %s %d new bytes:%s\n", this, numBytesRemaining > 0 ? "processing" : "read", newBytesRead, ptr); #endif if (fClientOutputSocket != fClientInputSocket && numBytesRemaining == 0) { // We're doing RTSP-over-HTTP tunneling, and input commands are assumed to have been Base64-encoded. // We therefore Base64-decode as much of this new data as we can (i.e., up to a multiple of 4 bytes). // But first, we remove any whitespace that may be in the input data: unsigned toIndex = 0; for (int fromIndex = 0; fromIndex < newBytesRead; ++fromIndex) { char c = ptr[fromIndex]; if (!(c == ' ' || c == '\t' || c == '\r' || c == '\n')) { // not 'whitespace': space,tab,CR,NL ptr[toIndex++] = c; } } newBytesRead = toIndex; unsigned numBytesToDecode = fBase64RemainderCount + newBytesRead; unsigned newBase64RemainderCount = numBytesToDecode%4; numBytesToDecode -= newBase64RemainderCount; if (numBytesToDecode > 0) { ptr[newBytesRead] = '\0'; unsigned decodedSize; unsigned char* decodedBytes = base64Decode((char const*)(ptr-fBase64RemainderCount), numBytesToDecode, decodedSize); #ifdef DEBUG fprintf(stderr, "Base64-decoded %d input bytes into %d new bytes:", numBytesToDecode, decodedSize); for (unsigned k = 0; k < decodedSize; ++k) fprintf(stderr, "%c", decodedBytes[k]); fprintf(stderr, "\n"); #endif // Copy the new decoded bytes in place of the old ones (we can do this because there are fewer decoded bytes than original): unsigned char* to = ptr-fBase64RemainderCount; for (unsigned i = 0; i < decodedSize; ++i) *to++ = decodedBytes[i]; // Then copy any remaining (undecoded) bytes to the end: for (unsigned j = 0; j < newBase64RemainderCount; ++j) *to++ = (ptr-fBase64RemainderCount+numBytesToDecode)[j]; newBytesRead = decodedSize + newBase64RemainderCount; // adjust to allow for the size of the new decoded data (+ remainder) delete[] decodedBytes; } fBase64RemainderCount = newBase64RemainderCount; if (fBase64RemainderCount > 0) break; // because we know that we have more input bytes still to receive } // Look for the end of the message: unsigned char *tmpPtr = fLastCRLF + 2; if (tmpPtr < fRequestBuffer) tmpPtr = fRequestBuffer; while (tmpPtr < &ptr[newBytesRead-1]) { if (*tmpPtr == '\r' && *(tmpPtr+1) == '\n') { if (tmpPtr - fLastCRLF == 2) { // This is it: endOfMsg = True; break; } fLastCRLF = tmpPtr; } ++tmpPtr; } fRequestBufferBytesLeft -= newBytesRead; fRequestBytesAlreadySeen += newBytesRead; if (!endOfMsg) break; // subsequent reads will be needed to complete the request // Parse the request string into command name and 'CSeq', then handle the command: fRequestBuffer[fRequestBytesAlreadySeen] = '\0'; char cmdName[RTSP_PARAM_STRING_MAX]; char urlPreSuffix[RTSP_PARAM_STRING_MAX]; char urlSuffix[RTSP_PARAM_STRING_MAX]; char cseq[RTSP_PARAM_STRING_MAX]; char sessionIdStr[RTSP_PARAM_STRING_MAX]; unsigned contentLength = 0; fLastCRLF[2] = '\0'; // temporarily, for parsing Boolean parseSucceeded = parseRTSPRequestString((char*)fRequestBuffer, fLastCRLF+2 - fRequestBuffer, cmdName, sizeof cmdName, urlPreSuffix, sizeof urlPreSuffix, urlSuffix, sizeof urlSuffix, cseq, sizeof cseq, sessionIdStr, sizeof sessionIdStr, contentLength); fLastCRLF[2] = '\r'; // restore its value if (parseSucceeded) { #ifdef DEBUG fprintf(stderr, "parseRTSPRequestString() succeeded, returning cmdName \"%s\", urlPreSuffix \"%s\", urlSuffix \"%s\", CSeq \"%s\", Content-Length %u, with %ld bytes following the message.\n", cmdName, urlPreSuffix, urlSuffix, cseq, contentLength, ptr + newBytesRead - (tmpPtr + 2)); #endif // If there was a "Content-Length:" header, then make sure we've received all of the data that it specified: if (ptr + newBytesRead < tmpPtr + 2 + contentLength) break; // we still need more data; subsequent reads will give it to us // If the request included a "Session:" id, and it refers to a client session that's // current ongoing, then use this command to indicate 'liveness' on that client session: Boolean const requestIncludedSessionId = sessionIdStr[0] != '\0'; if (requestIncludedSessionId) { clientSession = (RTSPServer::RTSPClientSession*)(fOurServer.fClientSessions->Lookup(sessionIdStr)); if (clientSession != NULL) clientSession->noteLiveness(); } // We now have a complete RTSP request. // Handle the specified command (beginning with commands that are session-independent): fCurrentCSeq = cseq; if (strcmp(cmdName, "OPTIONS") == 0) { handleCmd_OPTIONS(); } else if (urlPreSuffix[0] == '\0' && urlSuffix[0] == '*' && urlSuffix[1] == '\0') { // The special "*" URL means: an operation on the entire server. This works only for GET_PARAMETER and SET_PARAMETER: if (strcmp(cmdName, "GET_PARAMETER") == 0) { handleCmd_GET_PARAMETER((char const*)fRequestBuffer); } else if (strcmp(cmdName, "SET_PARAMETER") == 0) { handleCmd_SET_PARAMETER((char const*)fRequestBuffer); } else { handleCmd_notSupported(); } } else if (strcmp(cmdName, "DESCRIBE") == 0) { handleCmd_DESCRIBE(urlPreSuffix, urlSuffix, (char const*)fRequestBuffer); } else if (strcmp(cmdName, "SETUP") == 0) { if (!requestIncludedSessionId) { // No session id was present in the request. So create a new "RTSPClientSession" object // for this request. Choose a random (unused) 32-bit integer for the session id // (it will be encoded as a 8-digit hex number). (We avoid choosing session id 0, // because that has a special use (by "OnDemandServerMediaSubsession").) u_int32_t sessionId; do { sessionId = (u_int32_t)our_random32(); sprintf(sessionIdStr, "%08X", sessionId); } while (sessionId == 0 || fOurServer.fClientSessions->Lookup(sessionIdStr) != NULL); clientSession = fOurServer.createNewClientSession(sessionId); fOurServer.fClientSessions->Add(sessionIdStr, clientSession); } if (clientSession != NULL) { clientSession->handleCmd_SETUP(this, urlPreSuffix, urlSuffix, (char const*)fRequestBuffer); } else { handleCmd_sessionNotFound(); } } else if (strcmp(cmdName, "TEARDOWN") == 0 || strcmp(cmdName, "PLAY") == 0 || strcmp(cmdName, "PAUSE") == 0 || strcmp(cmdName, "GET_PARAMETER") == 0 || strcmp(cmdName, "SET_PARAMETER") == 0) { if (clientSession != NULL) { clientSession->handleCmd_withinSession(this, cmdName, urlPreSuffix, urlSuffix, (char const*)fRequestBuffer); } else { handleCmd_sessionNotFound(); } } else if (strcmp(cmdName, "REGISTER") == 0) { // Because - unlike other commands - an implementation of this command needs // the entire URL, we re-parse the command to get it: char* url = strDupSize((char*)fRequestBuffer); if (sscanf((char*)fRequestBuffer, "%*s %s", url) == 1) { // Check for special command-specific parameters in a "Transport:" header: Boolean reuseConnection, deliverViaTCP; char* proxyURLSuffix; parseTransportHeaderForREGISTER((const char*)fRequestBuffer, reuseConnection, deliverViaTCP, proxyURLSuffix); handleCmd_REGISTER(url, urlSuffix, (char const*)fRequestBuffer, reuseConnection, deliverViaTCP, proxyURLSuffix); delete[] proxyURLSuffix; } else { handleCmd_bad(); } delete[] url; } else { // The command is one that we don't handle: handleCmd_notSupported(); } } else { #ifdef DEBUG fprintf(stderr, "parseRTSPRequestString() failed; checking now for HTTP commands (for RTSP-over-HTTP tunneling)...\n"); #endif // The request was not (valid) RTSP, but check for a special case: HTTP commands (for setting up RTSP-over-HTTP tunneling): char sessionCookie[RTSP_PARAM_STRING_MAX]; char acceptStr[RTSP_PARAM_STRING_MAX]; *fLastCRLF = '\0'; // temporarily, for parsing parseSucceeded = parseHTTPRequestString(cmdName, sizeof cmdName, urlSuffix, sizeof urlPreSuffix, sessionCookie, sizeof sessionCookie, acceptStr, sizeof acceptStr); *fLastCRLF = '\r'; if (parseSucceeded) { #ifdef DEBUG fprintf(stderr, "parseHTTPRequestString() succeeded, returning cmdName \"%s\", urlSuffix \"%s\", sessionCookie \"%s\", acceptStr \"%s\"\n", cmdName, urlSuffix, sessionCookie, acceptStr); #endif // Check that the HTTP command is valid for RTSP-over-HTTP tunneling: There must be a 'session cookie'. Boolean isValidHTTPCmd = True; if (strcmp(cmdName, "OPTIONS") == 0) { handleHTTPCmd_OPTIONS(); } else if (sessionCookie[0] == '\0') { // There was no "x-sessioncookie:" header. If there was an "Accept: application/x-rtsp-tunnelled" header, // then this is a bad tunneling request. Otherwise, assume that it's an attempt to access the stream via HTTP. if (strcmp(acceptStr, "application/x-rtsp-tunnelled") == 0) { isValidHTTPCmd = False; } else { handleHTTPCmd_StreamingGET(urlSuffix, (char const*)fRequestBuffer); } } else if (strcmp(cmdName, "GET") == 0) { handleHTTPCmd_TunnelingGET(sessionCookie); } else if (strcmp(cmdName, "POST") == 0) { // We might have received additional data following the HTTP "POST" command - i.e., the first Base64-encoded RTSP command. // Check for this, and handle it if it exists: unsigned char const* extraData = fLastCRLF+4; unsigned extraDataSize = &fRequestBuffer[fRequestBytesAlreadySeen] - extraData; if (handleHTTPCmd_TunnelingPOST(sessionCookie, extraData, extraDataSize)) { // We don't respond to the "POST" command, and we go away: fIsActive = False; break; } } else { isValidHTTPCmd = False; } if (!isValidHTTPCmd) { handleHTTPCmd_notSupported(); } } else { #ifdef DEBUG fprintf(stderr, "parseHTTPRequestString() failed!\n"); #endif handleCmd_bad(); } } #ifdef DEBUG fprintf(stderr, "sending response: %s", fResponseBuffer); #endif send(fClientOutputSocket, (char const*)fResponseBuffer, strlen((char*)fResponseBuffer), 0); if (clientSession != NULL && clientSession->fStreamAfterSETUP && strcmp(cmdName, "SETUP") == 0) { // The client has asked for streaming to commence now, rather than after a // subsequent "PLAY" command. So, simulate the effect of a "PLAY" command: clientSession->handleCmd_withinSession(this, "PLAY", urlPreSuffix, urlSuffix, (char const*)fRequestBuffer); } // Check whether there are extra bytes remaining in the buffer, after the end of the request (a rare case). // If so, move them to the front of our buffer, and keep processing it, because it might be a following, pipelined request. unsigned requestSize = (fLastCRLF+4-fRequestBuffer) + contentLength; numBytesRemaining = fRequestBytesAlreadySeen - requestSize; resetRequestBuffer(); // to prepare for any subsequent request if (numBytesRemaining > 0) { memmove(fRequestBuffer, &fRequestBuffer[requestSize], numBytesRemaining); newBytesRead = numBytesRemaining; } } while (numBytesRemaining > 0); --fRecursionCount; if (!fIsActive) { if (fRecursionCount > 0) closeSockets(); else delete this; // Note: The "fRecursionCount" test is for a pathological situation where we reenter the event loop and get called recursively // while handling a command (e.g., while handling a "DESCRIBE", to get a SDP description). // In such a case we don't want to actually delete ourself until we leave the outermost call. } } static Boolean parseAuthorizationHeader(char const* buf, char const*& username, char const*& realm, char const*& nonce, char const*& uri, char const*& response) { // Initialize the result parameters to default values: username = realm = nonce = uri = response = NULL; // First, find "Authorization:" while (1) { if (*buf == '\0') return False; // not found if (_strncasecmp(buf, "Authorization: Digest ", 22) == 0) break; ++buf; } // Then, run through each of the fields, looking for ones we handle: char const* fields = buf + 22; while (*fields == ' ') ++fields; char* parameter = strDupSize(fields); char* value = strDupSize(fields); while (1) { value[0] = '\0'; if (sscanf(fields, "%[^=]=\"%[^\"]\"", parameter, value) != 2 && sscanf(fields, "%[^=]=\"\"", parameter) != 1) { break; } if (strcmp(parameter, "username") == 0) { username = strDup(value); } else if (strcmp(parameter, "realm") == 0) { realm = strDup(value); } else if (strcmp(parameter, "nonce") == 0) { nonce = strDup(value); } else if (strcmp(parameter, "uri") == 0) { uri = strDup(value); } else if (strcmp(parameter, "response") == 0) { response = strDup(value); } fields += strlen(parameter) + 2 /*="*/ + strlen(value) + 1 /*"*/; while (*fields == ',' || *fields == ' ') ++fields; // skip over any separating ',' and ' ' chars if (*fields == '\0' || *fields == '\r' || *fields == '\n') break; } delete[] parameter; delete[] value; return True; } Boolean RTSPServer::RTSPClientConnection ::authenticationOK(char const* cmdName, char const* urlSuffix, char const* fullRequestStr) { if (!fOurServer.specialClientAccessCheck(fClientInputSocket, fClientAddr, urlSuffix)) { setRTSPResponse("401 Unauthorized"); return False; } // If we weren't set up with an authentication database, we're OK: UserAuthenticationDatabase* authDB = fOurServer.getAuthenticationDatabaseForCommand(cmdName); if (authDB == NULL) return True; char const* username = NULL; char const* realm = NULL; char const* nonce = NULL; char const* uri = NULL; char const* response = NULL; Boolean success = False; do { // To authenticate, we first need to have a nonce set up // from a previous attempt: if (fCurrentAuthenticator.nonce() == NULL) break; // Next, the request needs to contain an "Authorization:" header, // containing a username, (our) realm, (our) nonce, uri, // and response string: if (!parseAuthorizationHeader(fullRequestStr, username, realm, nonce, uri, response) || username == NULL || realm == NULL || strcmp(realm, fCurrentAuthenticator.realm()) != 0 || nonce == NULL || strcmp(nonce, fCurrentAuthenticator.nonce()) != 0 || uri == NULL || response == NULL) { break; } // Next, the username has to be known to us: char const* password = authDB->lookupPassword(username); #ifdef DEBUG fprintf(stderr, "lookupPassword(%s) returned password %s\n", username, password); #endif if (password == NULL) break; fCurrentAuthenticator.setUsernameAndPassword(username, password, authDB->passwordsAreMD5()); // Finally, compute a digest response from the information that we have, // and compare it to the one that we were given: char const* ourResponse = fCurrentAuthenticator.computeDigestResponse(cmdName, uri); success = (strcmp(ourResponse, response) == 0); fCurrentAuthenticator.reclaimDigestResponse(ourResponse); } while (0); delete[] (char*)realm; delete[] (char*)nonce; delete[] (char*)uri; delete[] (char*)response; if (success) { // The user has been authenticated. // Now allow subclasses a chance to validate the user against the IP address and/or URL suffix. if (!fOurServer.specialClientUserAccessCheck(fClientInputSocket, fClientAddr, urlSuffix, username)) { // Note: We don't return a "WWW-Authenticate" header here, because the user is valid, // even though the server has decided that they should not have access. setRTSPResponse("401 Unauthorized"); delete[] (char*)username; return False; } } delete[] (char*)username; if (success) return True; // If we get here, we failed to authenticate the user. // Send back a "401 Unauthorized" response, with a new random nonce: fCurrentAuthenticator.setRealmAndRandomNonce(authDB->realm()); snprintf((char*)fResponseBuffer, sizeof fResponseBuffer, "RTSP/1.0 401 Unauthorized\r\n" "CSeq: %s\r\n" "%s" "WWW-Authenticate: Digest realm=\"%s\", nonce=\"%s\"\r\n\r\n", fCurrentCSeq, dateHeader(), fCurrentAuthenticator.realm(), fCurrentAuthenticator.nonce()); return False; } void RTSPServer::RTSPClientConnection ::setRTSPResponse(char const* responseStr) { snprintf((char*)fResponseBuffer, sizeof fResponseBuffer, "RTSP/1.0 %s\r\n" "CSeq: %s\r\n" "%s\r\n", responseStr, fCurrentCSeq, dateHeader()); } void RTSPServer::RTSPClientConnection ::setRTSPResponse(char const* responseStr, u_int32_t sessionId) { snprintf((char*)fResponseBuffer, sizeof fResponseBuffer, "RTSP/1.0 %s\r\n" "CSeq: %s\r\n" "%s" "Session: %08X\r\n\r\n", responseStr, fCurrentCSeq, dateHeader(), sessionId); } void RTSPServer::RTSPClientConnection ::setRTSPResponse(char const* responseStr, char const* contentStr) { if (contentStr == NULL) contentStr = ""; unsigned const contentLen = strlen(contentStr); snprintf((char*)fResponseBuffer, sizeof fResponseBuffer, "RTSP/1.0 %s\r\n" "CSeq: %s\r\n" "%s" "Content-Length: %d\r\n\r\n" "%s", responseStr, fCurrentCSeq, dateHeader(), contentLen, contentStr); } void RTSPServer::RTSPClientConnection ::setRTSPResponse(char const* responseStr, u_int32_t sessionId, char const* contentStr) { if (contentStr == NULL) contentStr = ""; unsigned const contentLen = strlen(contentStr); snprintf((char*)fResponseBuffer, sizeof fResponseBuffer, "RTSP/1.0 %s\r\n" "CSeq: %s\r\n" "%s" "Session: %08X\r\n" "Content-Length: %d\r\n\r\n" "%s", responseStr, fCurrentCSeq, dateHeader(), sessionId, contentLen, contentStr); } void RTSPServer::RTSPClientConnection ::changeClientInputSocket(int newSocketNum, unsigned char const* extraData, unsigned extraDataSize) { envir().taskScheduler().disableBackgroundHandling(fClientInputSocket); fClientInputSocket = newSocketNum; envir().taskScheduler().setBackgroundHandling(fClientInputSocket, SOCKET_READABLE|SOCKET_EXCEPTION, (TaskScheduler::BackgroundHandlerProc*)&incomingRequestHandler, this); // Also write any extra data to our buffer, and handle it: if (extraDataSize > 0 && extraDataSize <= fRequestBufferBytesLeft/*sanity check; should always be true*/) { unsigned char* ptr = &fRequestBuffer[fRequestBytesAlreadySeen]; for (unsigned i = 0; i < extraDataSize; ++i) { ptr[i] = extraData[i]; } handleRequestBytes(extraDataSize); } } void RTSPServer::RTSPClientConnection::continueHandlingREGISTER(ParamsForREGISTER* params) { params->fOurConnection->continueHandlingREGISTER1(params); } void RTSPServer::RTSPClientConnection::continueHandlingREGISTER1(ParamsForREGISTER* params) { // Reuse our socket if requested: int socketNumToBackEndServer = params->fReuseConnection ? fClientOutputSocket : -1; RTSPServer* ourServer = &fOurServer; // copy the pointer now, in case we "delete this" below if (socketNumToBackEndServer >= 0) { // Because our socket will no longer be used by the server to handle incoming requests, we can now delete this // "RTSPClientConnection" object. We do this now, in case the "implementCmd_REGISTER()" call below would also end up // deleting this. fClientInputSocket = fClientOutputSocket = -1; // so the socket doesn't get closed when we get deleted delete this; } ourServer->implementCmd_REGISTER(params->fURL, params->fURLSuffix, socketNumToBackEndServer, params->fDeliverViaTCP, params->fProxyURLSuffix); delete params; } ////////// RTSPServer::RTSPClientSession implementation ////////// RTSPServer::RTSPClientSession ::RTSPClientSession(RTSPServer& ourServer, u_int32_t sessionId) : fOurServer(ourServer), fOurSessionId(sessionId), fOurServerMediaSession(NULL), fIsMulticast(False), fStreamAfterSETUP(False), fTCPStreamIdCount(0), fLivenessCheckTask(NULL), fNumStreamStates(0), fStreamStates(NULL) { noteLiveness(); } RTSPServer::RTSPClientSession::~RTSPClientSession() { // Turn off any liveness checking: envir().taskScheduler().unscheduleDelayedTask(fLivenessCheckTask); // Remove ourself from the server's 'client sessions' hash table before we go: char sessionIdStr[9]; sprintf(sessionIdStr, "%08X", fOurSessionId); fOurServer.fClientSessions->Remove(sessionIdStr); reclaimStreamStates(); if (fOurServerMediaSession != NULL) { fOurServerMediaSession->decrementReferenceCount(); if (fOurServerMediaSession->referenceCount() == 0 && fOurServerMediaSession->deleteWhenUnreferenced()) { fOurServer.removeServerMediaSession(fOurServerMediaSession); fOurServerMediaSession = NULL; } } } void RTSPServer::RTSPClientSession::reclaimStreamStates() { for (unsigned i = 0; i < fNumStreamStates; ++i) { if (fStreamStates[i].subsession != NULL) { fStreamStates[i].subsession->deleteStream(fOurSessionId, fStreamStates[i].streamToken); } } delete[] fStreamStates; fStreamStates = NULL; fNumStreamStates = 0; } typedef enum StreamingMode { RTP_UDP, RTP_TCP, RAW_UDP } StreamingMode; static void parseTransportHeader(char const* buf, StreamingMode& streamingMode, char*& streamingModeString, char*& destinationAddressStr, u_int8_t& destinationTTL, portNumBits& clientRTPPortNum, // if UDP portNumBits& clientRTCPPortNum, // if UDP unsigned char& rtpChannelId, // if TCP unsigned char& rtcpChannelId // if TCP ) { // Initialize the result parameters to default values: streamingMode = RTP_UDP; streamingModeString = NULL; destinationAddressStr = NULL; destinationTTL = 255; clientRTPPortNum = 0; clientRTCPPortNum = 1; rtpChannelId = rtcpChannelId = 0xFF; portNumBits p1, p2; unsigned ttl, rtpCid, rtcpCid; // First, find "Transport:" while (1) { if (*buf == '\0') return; // not found if (*buf == '\r' && *(buf+1) == '\n' && *(buf+2) == '\r') return; // end of the headers => not found if (_strncasecmp(buf, "Transport:", 10) == 0) break; ++buf; } // Then, run through each of the fields, looking for ones we handle: char const* fields = buf + 10; while (*fields == ' ') ++fields; char* field = strDupSize(fields); while (sscanf(fields, "%[^;\r\n]", field) == 1) { if (strcmp(field, "RTP/AVP/TCP") == 0) { streamingMode = RTP_TCP; } else if (strcmp(field, "RAW/RAW/UDP") == 0 || strcmp(field, "MP2T/H2221/UDP") == 0) { streamingMode = RAW_UDP; streamingModeString = strDup(field); } else if (_strncasecmp(field, "destination=", 12) == 0) { delete[] destinationAddressStr; destinationAddressStr = strDup(field+12); } else if (sscanf(field, "ttl%u", &ttl) == 1) { destinationTTL = (u_int8_t)ttl; } else if (sscanf(field, "client_port=%hu-%hu", &p1, &p2) == 2) { clientRTPPortNum = p1; clientRTCPPortNum = streamingMode == RAW_UDP ? 0 : p2; // ignore the second port number if the client asked for raw UDP } else if (sscanf(field, "client_port=%hu", &p1) == 1) { clientRTPPortNum = p1; clientRTCPPortNum = streamingMode == RAW_UDP ? 0 : p1 + 1; } else if (sscanf(field, "interleaved=%u-%u", &rtpCid, &rtcpCid) == 2) { rtpChannelId = (unsigned char)rtpCid; rtcpChannelId = (unsigned char)rtcpCid; } fields += strlen(field); while (*fields == ';' || *fields == ' ' || *fields == '\t') ++fields; // skip over separating ';' chars or whitespace if (*fields == '\0' || *fields == '\r' || *fields == '\n') break; } delete[] field; } static Boolean parsePlayNowHeader(char const* buf) { // Find "x-playNow:" header, if present while (1) { if (*buf == '\0') return False; // not found if (_strncasecmp(buf, "x-playNow:", 10) == 0) break; ++buf; } return True; } void RTSPServer::RTSPClientSession ::handleCmd_SETUP(RTSPServer::RTSPClientConnection* ourClientConnection, char const* urlPreSuffix, char const* urlSuffix, char const* fullRequestStr) { // Normally, "urlPreSuffix" should be the session (stream) name, and "urlSuffix" should be the subsession (track) name. // However (being "liberal in what we accept"), we also handle 'aggregate' SETUP requests (i.e., without a track name), // in the special case where we have only a single track. I.e., in this case, we also handle: // "urlPreSuffix" is empty and "urlSuffix" is the session (stream) name, or // "urlPreSuffix" concatenated with "urlSuffix" (with "/" inbetween) is the session (stream) name. char const* streamName = urlPreSuffix; // in the normal case char const* trackId = urlSuffix; // in the normal case char* concatenatedStreamName = NULL; // in the normal case do { // First, make sure the specified stream name exists: ServerMediaSession* sms = fOurServer.lookupServerMediaSession(streamName); if (sms == NULL) { // Check for the special case (noted above), before we give up: if (urlPreSuffix[0] == '\0') { streamName = urlSuffix; } else { concatenatedStreamName = new char[strlen(urlPreSuffix) + strlen(urlSuffix) + 2]; // allow for the "/" and the trailing '\0' sprintf(concatenatedStreamName, "%s/%s", urlPreSuffix, urlSuffix); streamName = concatenatedStreamName; } trackId = NULL; // Check again: sms = fOurServer.lookupServerMediaSession(streamName); } if (sms == NULL) { if (fOurServerMediaSession == NULL) { // The client asked for a stream that doesn't exist (and this session descriptor has not been used before): ourClientConnection->handleCmd_notFound(); } else { // The client asked for a stream that doesn't exist, but using a stream id for a stream that does exist. Bad request: ourClientConnection->handleCmd_bad(); } break; } else { if (fOurServerMediaSession == NULL) { // We're accessing the "ServerMediaSession" for the first time. fOurServerMediaSession = sms; fOurServerMediaSession->incrementReferenceCount(); } else if (sms != fOurServerMediaSession) { // The client asked for a stream that's different from the one originally requested for this stream id. Bad request: ourClientConnection->handleCmd_bad(); break; } } if (fStreamStates == NULL) { // This is the first "SETUP" for this session. Set up our array of states for all of this session's subsessions (tracks): ServerMediaSubsessionIterator iter(*fOurServerMediaSession); for (fNumStreamStates = 0; iter.next() != NULL; ++fNumStreamStates) {} // begin by counting the number of subsessions (tracks) fStreamStates = new struct streamState[fNumStreamStates]; iter.reset(); ServerMediaSubsession* subsession; for (unsigned i = 0; i < fNumStreamStates; ++i) { subsession = iter.next(); fStreamStates[i].subsession = subsession; fStreamStates[i].streamToken = NULL; // for now; it may be changed by the "getStreamParameters()" call that comes later } } // Look up information for the specified subsession (track): ServerMediaSubsession* subsession = NULL; unsigned streamNum; if (trackId != NULL && trackId[0] != '\0') { // normal case for (streamNum = 0; streamNum < fNumStreamStates; ++streamNum) { subsession = fStreamStates[streamNum].subsession; if (subsession != NULL && strcmp(trackId, subsession->trackId()) == 0) break; } if (streamNum >= fNumStreamStates) { // The specified track id doesn't exist, so this request fails: ourClientConnection->handleCmd_notFound(); break; } } else { // Weird case: there was no track id in the URL. // This works only if we have only one subsession: if (fNumStreamStates != 1 || fStreamStates[0].subsession == NULL) { ourClientConnection->handleCmd_bad(); break; } streamNum = 0; subsession = fStreamStates[streamNum].subsession; } // ASSERT: subsession != NULL // Look for a "Transport:" header in the request string, to extract client parameters: StreamingMode streamingMode; char* streamingModeString = NULL; // set when RAW_UDP streaming is specified char* clientsDestinationAddressStr; u_int8_t clientsDestinationTTL; portNumBits clientRTPPortNum, clientRTCPPortNum; unsigned char rtpChannelId, rtcpChannelId; parseTransportHeader(fullRequestStr, streamingMode, streamingModeString, clientsDestinationAddressStr, clientsDestinationTTL, clientRTPPortNum, clientRTCPPortNum, rtpChannelId, rtcpChannelId); if ((streamingMode == RTP_TCP && rtpChannelId == 0xFF) || (streamingMode != RTP_TCP && ourClientConnection->fClientOutputSocket != ourClientConnection->fClientInputSocket)) { // An anomolous situation, caused by a buggy client. Either: // 1/ TCP streaming was requested, but with no "interleaving=" fields. (QuickTime Player sometimes does this.), or // 2/ TCP streaming was not requested, but we're doing RTSP-over-HTTP tunneling (which implies TCP streaming). // In either case, we assume TCP streaming, and set the RTP and RTCP channel ids to proper values: streamingMode = RTP_TCP; rtpChannelId = fTCPStreamIdCount; rtcpChannelId = fTCPStreamIdCount+1; } if (streamingMode == RTP_TCP) fTCPStreamIdCount += 2; Port clientRTPPort(clientRTPPortNum); Port clientRTCPPort(clientRTCPPortNum); // Next, check whether a "Range:" or "x-playNow:" header is present in the request. // This isn't legal, but some clients do this to combine "SETUP" and "PLAY": double rangeStart = 0.0, rangeEnd = 0.0; char* absStart = NULL; char* absEnd = NULL; if (parseRangeHeader(fullRequestStr, rangeStart, rangeEnd, absStart, absEnd)) { delete[] absStart; delete[] absEnd; fStreamAfterSETUP = True; } else if (parsePlayNowHeader(fullRequestStr)) { fStreamAfterSETUP = True; } else { fStreamAfterSETUP = False; } // Then, get server parameters from the 'subsession': int tcpSocketNum = streamingMode == RTP_TCP ? ourClientConnection->fClientOutputSocket : -1; netAddressBits destinationAddress = 0; u_int8_t destinationTTL = 255; #ifdef RTSP_ALLOW_CLIENT_DESTINATION_SETTING if (clientsDestinationAddressStr != NULL) { // Use the client-provided "destination" address. // Note: This potentially allows the server to be used in denial-of-service // attacks, so don't enable this code unless you're sure that clients are // trusted. destinationAddress = our_inet_addr(clientsDestinationAddressStr); } // Also use the client-provided TTL. destinationTTL = clientsDestinationTTL; #endif delete[] clientsDestinationAddressStr; Port serverRTPPort(0); Port serverRTCPPort(0); // Make sure that we transmit on the same interface that's used by the client (in case we're a multi-homed server): struct sockaddr_in sourceAddr; SOCKLEN_T namelen = sizeof sourceAddr; getsockname(ourClientConnection->fClientInputSocket, (struct sockaddr*)&sourceAddr, &namelen); netAddressBits origSendingInterfaceAddr = SendingInterfaceAddr; netAddressBits origReceivingInterfaceAddr = ReceivingInterfaceAddr; // NOTE: The following might not work properly, so we ifdef it out for now: #ifdef HACK_FOR_MULTIHOMED_SERVERS ReceivingInterfaceAddr = SendingInterfaceAddr = sourceAddr.sin_addr.s_addr; #endif subsession->getStreamParameters(fOurSessionId, ourClientConnection->fClientAddr.sin_addr.s_addr, clientRTPPort, clientRTCPPort, tcpSocketNum, rtpChannelId, rtcpChannelId, destinationAddress, destinationTTL, fIsMulticast, serverRTPPort, serverRTCPPort, fStreamStates[streamNum].streamToken); SendingInterfaceAddr = origSendingInterfaceAddr; ReceivingInterfaceAddr = origReceivingInterfaceAddr; AddressString destAddrStr(destinationAddress); AddressString sourceAddrStr(sourceAddr); char timeoutParameterString[100]; if (fOurServer.fReclamationTestSeconds > 0) { sprintf(timeoutParameterString, ";timeout=%u", fOurServer.fReclamationTestSeconds); } else { timeoutParameterString[0] = '\0'; } if (fIsMulticast) { switch (streamingMode) { case RTP_UDP: { snprintf((char*)ourClientConnection->fResponseBuffer, sizeof ourClientConnection->fResponseBuffer, "RTSP/1.0 200 OK\r\n" "CSeq: %s\r\n" "%s" "Transport: RTP/AVP;multicast;destination=%s;source=%s;port=%d-%d;ttl=%d\r\n" "Session: %08X%s\r\n\r\n", ourClientConnection->fCurrentCSeq, dateHeader(), destAddrStr.val(), sourceAddrStr.val(), ntohs(serverRTPPort.num()), ntohs(serverRTCPPort.num()), destinationTTL, fOurSessionId, timeoutParameterString); break; } case RTP_TCP: { // multicast streams can't be sent via TCP ourClientConnection->handleCmd_unsupportedTransport(); break; } case RAW_UDP: { snprintf((char*)ourClientConnection->fResponseBuffer, sizeof ourClientConnection->fResponseBuffer, "RTSP/1.0 200 OK\r\n" "CSeq: %s\r\n" "%s" "Transport: %s;multicast;destination=%s;source=%s;port=%d;ttl=%d\r\n" "Session: %08X%s\r\n\r\n", ourClientConnection->fCurrentCSeq, dateHeader(), streamingModeString, destAddrStr.val(), sourceAddrStr.val(), ntohs(serverRTPPort.num()), destinationTTL, fOurSessionId, timeoutParameterString); break; } } } else { switch (streamingMode) { case RTP_UDP: { snprintf((char*)ourClientConnection->fResponseBuffer, sizeof ourClientConnection->fResponseBuffer, "RTSP/1.0 200 OK\r\n" "CSeq: %s\r\n" "%s" "Transport: RTP/AVP;unicast;destination=%s;source=%s;client_port=%d-%d;server_port=%d-%d\r\n" "Session: %08X%s\r\n\r\n", ourClientConnection->fCurrentCSeq, dateHeader(), destAddrStr.val(), sourceAddrStr.val(), ntohs(clientRTPPort.num()), ntohs(clientRTCPPort.num()), ntohs(serverRTPPort.num()), ntohs(serverRTCPPort.num()), fOurSessionId, timeoutParameterString); break; } case RTP_TCP: { snprintf((char*)ourClientConnection->fResponseBuffer, sizeof ourClientConnection->fResponseBuffer, "RTSP/1.0 200 OK\r\n" "CSeq: %s\r\n" "%s" "Transport: RTP/AVP/TCP;unicast;destination=%s;source=%s;interleaved=%d-%d\r\n" "Session: %08X%s\r\n\r\n", ourClientConnection->fCurrentCSeq, dateHeader(), destAddrStr.val(), sourceAddrStr.val(), rtpChannelId, rtcpChannelId, fOurSessionId, timeoutParameterString); break; } case RAW_UDP: { snprintf((char*)ourClientConnection->fResponseBuffer, sizeof ourClientConnection->fResponseBuffer, "RTSP/1.0 200 OK\r\n" "CSeq: %s\r\n" "%s" "Transport: %s;unicast;destination=%s;source=%s;client_port=%d;server_port=%d\r\n" "Session: %08X%s\r\n\r\n", ourClientConnection->fCurrentCSeq, dateHeader(), streamingModeString, destAddrStr.val(), sourceAddrStr.val(), ntohs(clientRTPPort.num()), ntohs(serverRTPPort.num()), fOurSessionId, timeoutParameterString); break; } } } delete[] streamingModeString; } while (0); delete[] concatenatedStreamName; } void RTSPServer::RTSPClientSession ::handleCmd_withinSession(RTSPServer::RTSPClientConnection* ourClientConnection, char const* cmdName, char const* urlPreSuffix, char const* urlSuffix, char const* fullRequestStr) { // This will either be: // - a non-aggregated operation, if "urlPreSuffix" is the session (stream) // name and "urlSuffix" is the subsession (track) name, or // - an aggregated operation, if "urlSuffix" is the session (stream) name, // or "urlPreSuffix" is the session (stream) name, and "urlSuffix" is empty, // or "urlPreSuffix" and "urlSuffix" are both nonempty, but when concatenated, (with "/") form the session (stream) name. // Begin by figuring out which of these it is: ServerMediaSubsession* subsession; if (fOurServerMediaSession == NULL) { // There wasn't a previous SETUP! ourClientConnection->handleCmd_notSupported(); return; } else if (urlSuffix[0] != '\0' && strcmp(fOurServerMediaSession->streamName(), urlPreSuffix) == 0) { // Non-aggregated operation. // Look up the media subsession whose track id is "urlSuffix": ServerMediaSubsessionIterator iter(*fOurServerMediaSession); while ((subsession = iter.next()) != NULL) { if (strcmp(subsession->trackId(), urlSuffix) == 0) break; // success } if (subsession == NULL) { // no such track! ourClientConnection->handleCmd_notFound(); return; } } else if (strcmp(fOurServerMediaSession->streamName(), urlSuffix) == 0 || (urlSuffix[0] == '\0' && strcmp(fOurServerMediaSession->streamName(), urlPreSuffix) == 0)) { // Aggregated operation subsession = NULL; } else if (urlPreSuffix[0] != '\0' && urlSuffix[0] != '\0') { // Aggregated operation, if / is the session (stream) name: unsigned const urlPreSuffixLen = strlen(urlPreSuffix); if (strncmp(fOurServerMediaSession->streamName(), urlPreSuffix, urlPreSuffixLen) == 0 && fOurServerMediaSession->streamName()[urlPreSuffixLen] == '/' && strcmp(&(fOurServerMediaSession->streamName())[urlPreSuffixLen+1], urlSuffix) == 0) { subsession = NULL; } else { ourClientConnection->handleCmd_notFound(); return; } } else { // the request doesn't match a known stream and/or track at all! ourClientConnection->handleCmd_notFound(); return; } if (strcmp(cmdName, "TEARDOWN") == 0) { handleCmd_TEARDOWN(ourClientConnection, subsession); } else if (strcmp(cmdName, "PLAY") == 0) { handleCmd_PLAY(ourClientConnection, subsession, fullRequestStr); } else if (strcmp(cmdName, "PAUSE") == 0) { handleCmd_PAUSE(ourClientConnection, subsession); } else if (strcmp(cmdName, "GET_PARAMETER") == 0) { handleCmd_GET_PARAMETER(ourClientConnection, subsession, fullRequestStr); } else if (strcmp(cmdName, "SET_PARAMETER") == 0) { handleCmd_SET_PARAMETER(ourClientConnection, subsession, fullRequestStr); } } void RTSPServer::RTSPClientSession ::handleCmd_TEARDOWN(RTSPServer::RTSPClientConnection* ourClientConnection, ServerMediaSubsession* subsession) { unsigned i; for (i = 0; i < fNumStreamStates; ++i) { if (subsession == NULL /* means: aggregated operation */ || subsession == fStreamStates[i].subsession) { if (fStreamStates[i].subsession != NULL) { fStreamStates[i].subsession->deleteStream(fOurSessionId, fStreamStates[i].streamToken); fStreamStates[i].subsession = NULL; } } } setRTSPResponse(ourClientConnection, "200 OK"); // Optimization: If all subsessions have now been torn down, then we know that we can reclaim our object now. // (Without this optimization, however, this object would still get reclaimed later, as a result of a 'liveness' timeout.) Boolean noSubsessionsRemain = True; for (i = 0; i < fNumStreamStates; ++i) { if (fStreamStates[i].subsession != NULL) { noSubsessionsRemain = False; break; } } if (noSubsessionsRemain) delete this; } void RTSPServer::RTSPClientSession ::handleCmd_PLAY(RTSPServer::RTSPClientConnection* ourClientConnection, ServerMediaSubsession* subsession, char const* fullRequestStr) { char* rtspURL = fOurServer.rtspURL(fOurServerMediaSession, ourClientConnection->fClientInputSocket); unsigned rtspURLSize = strlen(rtspURL); // Parse the client's "Scale:" header, if any: float scale; Boolean sawScaleHeader = parseScaleHeader(fullRequestStr, scale); // Try to set the stream's scale factor to this value: if (subsession == NULL /*aggregate op*/) { fOurServerMediaSession->testScaleFactor(scale); } else { subsession->testScaleFactor(scale); } char buf[100]; char* scaleHeader; if (!sawScaleHeader) { buf[0] = '\0'; // Because we didn't see a Scale: header, don't send one back } else { sprintf(buf, "Scale: %f\r\n", scale); } scaleHeader = strDup(buf); // Parse the client's "Range:" header, if any: float duration = 0.0; double rangeStart = 0.0, rangeEnd = 0.0; char* absStart = NULL; char* absEnd = NULL; Boolean sawRangeHeader = parseRangeHeader(fullRequestStr, rangeStart, rangeEnd, absStart, absEnd); if (sawRangeHeader && absStart == NULL/*not seeking by 'absolute' time*/) { // Use this information, plus the stream's duration (if known), to create our own "Range:" header, for the response: duration = subsession == NULL /*aggregate op*/ ? fOurServerMediaSession->duration() : subsession->duration(); if (duration < 0.0) { // We're an aggregate PLAY, but the subsessions have different durations. // Use the largest of these durations in our header duration = -duration; } // Make sure that "rangeStart" and "rangeEnd" (from the client's "Range:" header) have sane values // before we send back our own "Range:" header in our response: if (rangeStart < 0.0) rangeStart = 0.0; else if (rangeStart > duration) rangeStart = duration; if (rangeEnd < 0.0) rangeEnd = 0.0; else if (rangeEnd > duration) rangeEnd = duration; if ((scale > 0.0 && rangeStart > rangeEnd && rangeEnd > 0.0) || (scale < 0.0 && rangeStart < rangeEnd)) { // "rangeStart" and "rangeEnd" were the wrong way around; swap them: double tmp = rangeStart; rangeStart = rangeEnd; rangeEnd = tmp; } } // Create a "RTP-Info:" line. It will get filled in from each subsession's state: char const* rtpInfoFmt = "%s" // "RTP-Info:", plus any preceding rtpInfo items "%s" // comma separator, if needed "url=%s/%s" ";seq=%d" ";rtptime=%u" ; unsigned rtpInfoFmtSize = strlen(rtpInfoFmt); char* rtpInfo = strDup("RTP-Info: "); unsigned i, numRTPInfoItems = 0; // Do any required seeking/scaling on each subsession, before starting streaming. // (However, we don't do this if the "PLAY" request was for just a single subsession of a multiple-subsession stream; // for such streams, seeking/scaling can be done only with an aggregate "PLAY".) for (i = 0; i < fNumStreamStates; ++i) { if (subsession == NULL /* means: aggregated operation */ || fNumStreamStates == 1) { if (sawScaleHeader) { if (fStreamStates[i].subsession != NULL) { fStreamStates[i].subsession->setStreamScale(fOurSessionId, fStreamStates[i].streamToken, scale); } } if (sawRangeHeader) { if (absStart != NULL) { // Special case handling for seeking by 'absolute' time: if (fStreamStates[i].subsession != NULL) { fStreamStates[i].subsession->seekStream(fOurSessionId, fStreamStates[i].streamToken, absStart, absEnd); } } else { // Seeking by relative (NPT) time: double streamDuration = 0.0; // by default; means: stream until the end of the media if (rangeEnd > 0.0 && (rangeEnd+0.001) < duration) { // the 0.001 is because we limited the values to 3 decimal places // We want the stream to end early. Set the duration we want: streamDuration = rangeEnd - rangeStart; if (streamDuration < 0.0) streamDuration = -streamDuration; // should happen only if scale < 0.0 } if (fStreamStates[i].subsession != NULL) { u_int64_t numBytes; fStreamStates[i].subsession->seekStream(fOurSessionId, fStreamStates[i].streamToken, rangeStart, streamDuration, numBytes); } } } else { // No "Range:" header was specified in the "PLAY", so we do a 'null' seek (i.e., we don't seek at all): if (fStreamStates[i].subsession != NULL) { fStreamStates[i].subsession->nullSeekStream(fOurSessionId, fStreamStates[i].streamToken); } } } } // Create the "Range:" header that we'll send back in our response. // (Note that we do this after seeking, in case the seeking operation changed the range start time.) char* rangeHeader; if (!sawRangeHeader) { // There wasn't a "Range:" header in the request, so, in our response, begin the range with the current NPT (normal play time): float curNPT = 0.0; for (i = 0; i < fNumStreamStates; ++i) { if (subsession == NULL /* means: aggregated operation */ || subsession == fStreamStates[i].subsession) { if (fStreamStates[i].subsession == NULL) continue; float npt = fStreamStates[i].subsession->getCurrentNPT(fStreamStates[i].streamToken); if (npt > curNPT) curNPT = npt; // Note: If this is an aggregate "PLAY" on a multi-subsession stream, then it's conceivable that the NPTs of each subsession // may differ (if there has been a previous seek on just one subsession). In this (unusual) case, we just return the // largest NPT; I hope that turns out OK... } } sprintf(buf, "Range: npt=%.3f-\r\n", curNPT); } else if (absStart != NULL) { // We're seeking by 'absolute' time: if (absEnd == NULL) { sprintf(buf, "Range: clock=%s-\r\n", absStart); } else { sprintf(buf, "Range: clock=%s-%s\r\n", absStart, absEnd); } delete[] absStart; delete[] absEnd; } else { // We're seeking by relative (NPT) time: if (rangeEnd == 0.0 && scale >= 0.0) { sprintf(buf, "Range: npt=%.3f-\r\n", rangeStart); } else { sprintf(buf, "Range: npt=%.3f-%.3f\r\n", rangeStart, rangeEnd); } } rangeHeader = strDup(buf); // Now, start streaming: for (i = 0; i < fNumStreamStates; ++i) { if (subsession == NULL /* means: aggregated operation */ || subsession == fStreamStates[i].subsession) { unsigned short rtpSeqNum = 0; unsigned rtpTimestamp = 0; if (fStreamStates[i].subsession == NULL) continue; fStreamStates[i].subsession->startStream(fOurSessionId, fStreamStates[i].streamToken, (TaskFunc*)noteClientLiveness, this, rtpSeqNum, rtpTimestamp, RTSPServer::RTSPClientConnection::handleAlternativeRequestByte, ourClientConnection); const char *urlSuffix = fStreamStates[i].subsession->trackId(); char* prevRTPInfo = rtpInfo; unsigned rtpInfoSize = rtpInfoFmtSize + strlen(prevRTPInfo) + 1 + rtspURLSize + strlen(urlSuffix) + 5 /*max unsigned short len*/ + 10 /*max unsigned (32-bit) len*/ + 2 /*allows for trailing \r\n at final end of string*/; rtpInfo = new char[rtpInfoSize]; sprintf(rtpInfo, rtpInfoFmt, prevRTPInfo, numRTPInfoItems++ == 0 ? "" : ",", rtspURL, urlSuffix, rtpSeqNum, rtpTimestamp ); delete[] prevRTPInfo; } } if (numRTPInfoItems == 0) { rtpInfo[0] = '\0'; } else { unsigned rtpInfoLen = strlen(rtpInfo); rtpInfo[rtpInfoLen] = '\r'; rtpInfo[rtpInfoLen+1] = '\n'; rtpInfo[rtpInfoLen+2] = '\0'; } // Fill in the response: snprintf((char*)ourClientConnection->fResponseBuffer, sizeof ourClientConnection->fResponseBuffer, "RTSP/1.0 200 OK\r\n" "CSeq: %s\r\n" "%s" "%s" "%s" "Session: %08X\r\n" "%s\r\n", ourClientConnection->fCurrentCSeq, dateHeader(), scaleHeader, rangeHeader, fOurSessionId, rtpInfo); delete[] rtpInfo; delete[] rangeHeader; delete[] scaleHeader; delete[] rtspURL; } void RTSPServer::RTSPClientSession ::handleCmd_PAUSE(RTSPServer::RTSPClientConnection* ourClientConnection, ServerMediaSubsession* subsession) { for (unsigned i = 0; i < fNumStreamStates; ++i) { if (subsession == NULL /* means: aggregated operation */ || subsession == fStreamStates[i].subsession) { if (fStreamStates[i].subsession != NULL) { fStreamStates[i].subsession->pauseStream(fOurSessionId, fStreamStates[i].streamToken); } } } setRTSPResponse(ourClientConnection, "200 OK", fOurSessionId); } void RTSPServer::RTSPClientSession ::handleCmd_GET_PARAMETER(RTSPServer::RTSPClientConnection* ourClientConnection, ServerMediaSubsession* /*subsession*/, char const* /*fullRequestStr*/) { // By default, we implement "GET_PARAMETER" just as a 'keep alive', and send back a dummy response. // (If you want to handle "GET_PARAMETER" properly, you can do so by defining a subclass of "RTSPServer" // and "RTSPServer::RTSPClientSession", and then reimplement this virtual function in your subclass.) setRTSPResponse(ourClientConnection, "200 OK", fOurSessionId, LIVEMEDIA_LIBRARY_VERSION_STRING); } void RTSPServer::RTSPClientSession ::handleCmd_SET_PARAMETER(RTSPServer::RTSPClientConnection* ourClientConnection, ServerMediaSubsession* /*subsession*/, char const* /*fullRequestStr*/) { // By default, we implement "SET_PARAMETER" just as a 'keep alive', and send back an empty response. // (If you want to handle "SET_PARAMETER" properly, you can do so by defining a subclass of "RTSPServer" // and "RTSPServer::RTSPClientSession", and then reimplement this virtual function in your subclass.) setRTSPResponse(ourClientConnection, "200 OK", fOurSessionId); } RTSPServer::RTSPClientConnection* RTSPServer::createNewClientConnection(int clientSocket, struct sockaddr_in clientAddr) { return new RTSPClientConnection(*this, clientSocket, clientAddr); } RTSPServer::RTSPClientSession* RTSPServer::createNewClientSession(u_int32_t sessionId) { return new RTSPClientSession(*this, sessionId); } void RTSPServer::RTSPClientSession::noteLiveness() { if (fOurServer.fReclamationTestSeconds > 0) { envir().taskScheduler() .rescheduleDelayedTask(fLivenessCheckTask, fOurServer.fReclamationTestSeconds*1000000, (TaskFunc*)livenessTimeoutTask, this); } } void RTSPServer::RTSPClientSession ::noteClientLiveness(RTSPClientSession* clientSession) { #ifdef DEBUG char const* streamName = (clientSession->fOurServerMediaSession == NULL) ? "???" : clientSession->fOurServerMediaSession->streamName(); fprintf(stderr, "RTSP client session (id \"%08X\", stream name \"%s\"): Liveness indication\n", clientSession->fOurSessionId, streamName); #endif clientSession->noteLiveness(); } void RTSPServer::RTSPClientSession ::livenessTimeoutTask(RTSPClientSession* clientSession) { // If this gets called, the client session is assumed to have timed out, // so delete it: #ifdef DEBUG char const* streamName = (clientSession->fOurServerMediaSession == NULL) ? "???" : clientSession->fOurServerMediaSession->streamName(); fprintf(stderr, "RTSP client session (id \"%08X\", stream name \"%s\") has timed out (due to inactivity)\n", clientSession->fOurSessionId, streamName); #endif delete clientSession; } ////////// ServerMediaSessionIterator implementation ////////// RTSPServer::ServerMediaSessionIterator ::ServerMediaSessionIterator(RTSPServer& server) : fOurIterator((server.fServerMediaSessions == NULL) ? NULL : HashTable::Iterator::create(*server.fServerMediaSessions)) { } RTSPServer::ServerMediaSessionIterator::~ServerMediaSessionIterator() { delete fOurIterator; } ServerMediaSession* RTSPServer::ServerMediaSessionIterator::next() { if (fOurIterator == NULL) return NULL; char const* key; // dummy return (ServerMediaSession*)(fOurIterator->next(key)); } ////////// UserAuthenticationDatabase implementation ////////// UserAuthenticationDatabase::UserAuthenticationDatabase(char const* realm, Boolean passwordsAreMD5) : fTable(HashTable::create(STRING_HASH_KEYS)), fRealm(strDup(realm == NULL ? "LIVE555 Streaming Media" : realm)), fPasswordsAreMD5(passwordsAreMD5) { } UserAuthenticationDatabase::~UserAuthenticationDatabase() { delete[] fRealm; // Delete the allocated 'password' strings that we stored in the table, and then the table itself: char* password; while ((password = (char*)fTable->RemoveNext()) != NULL) { delete[] password; } delete fTable; } void UserAuthenticationDatabase::addUserRecord(char const* username, char const* password) { fTable->Add(username, (void*)(strDup(password))); } void UserAuthenticationDatabase::removeUserRecord(char const* username) { char* password = (char*)(fTable->Lookup(username)); fTable->Remove(username); delete[] password; } char const* UserAuthenticationDatabase::lookupPassword(char const* username) { return (char const*)(fTable->Lookup(username)); } ///////// RTSPServerWithREGISTERProxying implementation ///////// RTSPServerWithREGISTERProxying* RTSPServerWithREGISTERProxying ::createNew(UsageEnvironment& env, Port ourPort, UserAuthenticationDatabase* authDatabase, UserAuthenticationDatabase* authDatabaseForREGISTER, unsigned reclamationTestSeconds, Boolean streamRTPOverTCP, int verbosityLevelForProxying) { int ourSocket = setUpOurSocket(env, ourPort); if (ourSocket == -1) return NULL; return new RTSPServerWithREGISTERProxying(env, ourSocket, ourPort, authDatabase, authDatabaseForREGISTER, reclamationTestSeconds, streamRTPOverTCP, verbosityLevelForProxying); } RTSPServerWithREGISTERProxying ::RTSPServerWithREGISTERProxying(UsageEnvironment& env, int ourSocket, Port ourPort, UserAuthenticationDatabase* authDatabase, UserAuthenticationDatabase* authDatabaseForREGISTER, unsigned reclamationTestSeconds, Boolean streamRTPOverTCP, int verbosityLevelForProxying) : RTSPServer(env, ourSocket, ourPort, authDatabase, reclamationTestSeconds), fStreamRTPOverTCP(streamRTPOverTCP), fVerbosityLevelForProxying(verbosityLevelForProxying), fRegisteredProxyCounter(0), fAllowedCommandNames(NULL), fAuthDBForREGISTER(authDatabaseForREGISTER) { } RTSPServerWithREGISTERProxying::~RTSPServerWithREGISTERProxying() { delete[] fAllowedCommandNames; } char const* RTSPServerWithREGISTERProxying::allowedCommandNames() { if (fAllowedCommandNames == NULL) { char const* baseAllowedCommandNames = RTSPServer::allowedCommandNames(); char const* newAllowedCommandName = ", REGISTER"; fAllowedCommandNames = new char[strlen(baseAllowedCommandNames) + strlen(newAllowedCommandName) + 1/* for '\0' */]; sprintf(fAllowedCommandNames, "%s%s", baseAllowedCommandNames, newAllowedCommandName); } return fAllowedCommandNames; } Boolean RTSPServerWithREGISTERProxying::weImplementREGISTER(char const* proxyURLSuffix, char*& responseStr) { // First, check whether we have already proxied a stream as "proxyURLSuffix": if (proxyURLSuffix != NULL && lookupServerMediaSession(proxyURLSuffix) != NULL) { responseStr = strDup("451 Invalid parameter"); return False; } // Otherwise, we will implement it: responseStr = NULL; return True; } void RTSPServerWithREGISTERProxying::implementCmd_REGISTER(char const* url, char const* /*urlSuffix*/, int socketToRemoteServer, Boolean deliverViaTCP, char const* proxyURLSuffix) { // Continue setting up proxying for the specified URL. // By default: // - We use "registeredProxyStream-N" as the (front-end) stream name (ignoring the back-end stream's 'urlSuffix'), // unless "proxyURLSuffix" is non-NULL (in which case we use that) // - There is no 'username' and 'password' for the back-end stream. (Thus, access-controlled back-end streams will fail.) // - If "fStreamRTPOverTCP" is True, then we request delivery over TCP, regardless of the value of "deliverViaTCP". // (Otherwise, if "fStreamRTPOverTCP" is False, we use the value of "deliverViaTCP" to decide this.) // To change this default behavior, you will need to subclass "RTSPServerWithREGISTERProxying", and reimplement this function. char const* proxyStreamName; char proxyStreamNameBuf[100]; if (proxyURLSuffix == NULL) { sprintf(proxyStreamNameBuf, "registeredProxyStream-%u", ++fRegisteredProxyCounter); proxyStreamName = proxyStreamNameBuf; } else { proxyStreamName = proxyURLSuffix; } if (fStreamRTPOverTCP) deliverViaTCP = True; portNumBits tunnelOverHTTPPortNum = deliverViaTCP ? (portNumBits)(~0) : 0; // We don't support streaming from the back-end via RTSP/RTP/RTCP-over-HTTP; only via RTP/RTCP-over-TCP or RTP/RTCP-over-UDP ServerMediaSession* sms = ProxyServerMediaSession::createNew(envir(), this, url, proxyStreamName, NULL, NULL, tunnelOverHTTPPortNum, fVerbosityLevelForProxying, socketToRemoteServer); addServerMediaSession(sms); // (Regardless of the verbosity level) announce the fact that we're proxying this new stream, and the URL to use to access it: char* proxyStreamURL = rtspURL(sms); envir() << "Proxying the registered back-end stream \"" << url << "\".\n"; envir() << "\tPlay this stream using the URL: " << proxyStreamURL << "\n"; delete[] proxyStreamURL; } UserAuthenticationDatabase* RTSPServerWithREGISTERProxying::getAuthenticationDatabaseForCommand(char const* cmdName) { if (strcmp(cmdName, "REGISTER") == 0) return fAuthDBForREGISTER; return RTSPServer::getAuthenticationDatabaseForCommand(cmdName); } live/liveMedia/VP8VideoRTPSource.cpp000444 001751 000000 00000005305 12265042432 017520 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // VP8 Video RTP Sources // Implementation #include "VP8VideoRTPSource.hh" VP8VideoRTPSource* VP8VideoRTPSource::createNew(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, unsigned rtpTimestampFrequency) { return new VP8VideoRTPSource(env, RTPgs, rtpPayloadFormat, rtpTimestampFrequency); } VP8VideoRTPSource ::VP8VideoRTPSource(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, unsigned rtpTimestampFrequency) : MultiFramedRTPSource(env, RTPgs, rtpPayloadFormat, rtpTimestampFrequency) { } VP8VideoRTPSource::~VP8VideoRTPSource() { } Boolean VP8VideoRTPSource ::processSpecialHeader(BufferedPacket* packet, unsigned& resultSpecialHeaderSize) { unsigned char* headerStart = packet->data(); unsigned packetSize = packet->dataSize(); // The special header is from 1 to 6 bytes long. if (packetSize == 0) return False; // error resultSpecialHeaderSize = 1; // unless we learn otherwise u_int8_t const byte1 = headerStart[0]; Boolean const X = (byte1&0x80) != 0; Boolean const S = (byte1&0x10) != 0; u_int8_t const PartID = byte1&0x0F; fCurrentPacketBeginsFrame = S && PartID == 0; fCurrentPacketCompletesFrame = packet->rtpMarkerBit(); // RTP header's "M" bit if (X) { ++resultSpecialHeaderSize; u_int8_t const byte2 = headerStart[1]; Boolean const I = (byte2&0x80) != 0; Boolean const L = (byte2&0x40) != 0; Boolean const T = (byte2&0x20) != 0; Boolean const K = (byte2&0x10) != 0; if (I) { ++resultSpecialHeaderSize; if (headerStart[2]&0x80) { // extension flag in the PictureID is set ++resultSpecialHeaderSize; } } if (L) ++resultSpecialHeaderSize; if (T||K) ++resultSpecialHeaderSize; } return True; } char const* VP8VideoRTPSource::MIMEtype() const { return "video/VP8"; } live/liveMedia/QCELPAudioRTPSource.cpp000444 001751 000000 00000040003 12265042432 017734 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // Qualcomm "PureVoice" (aka. "QCELP") Audio RTP Sources // Implementation #include "QCELPAudioRTPSource.hh" #include "MultiFramedRTPSource.hh" #include "FramedFilter.hh" #include #include // This source is implemented internally by two separate sources: // (i) a RTP source for the raw (interleaved) QCELP frames, and // (ii) a deinterleaving filter that reads from this. // Define these two new classes here: class RawQCELPRTPSource: public MultiFramedRTPSource { public: static RawQCELPRTPSource* createNew(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, unsigned rtpTimestampFrequency); unsigned char interleaveL() const { return fInterleaveL; } unsigned char interleaveN() const { return fInterleaveN; } unsigned char& frameIndex() { return fFrameIndex; } // index within pkt private: RawQCELPRTPSource(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, unsigned rtpTimestampFrequency); // called only by createNew() virtual ~RawQCELPRTPSource(); private: // redefined virtual functions: virtual Boolean processSpecialHeader(BufferedPacket* packet, unsigned& resultSpecialHeaderSize); virtual char const* MIMEtype() const; virtual Boolean hasBeenSynchronizedUsingRTCP(); private: unsigned char fInterleaveL, fInterleaveN, fFrameIndex; unsigned fNumSuccessiveSyncedPackets; }; class QCELPDeinterleaver: public FramedFilter { public: static QCELPDeinterleaver* createNew(UsageEnvironment& env, RawQCELPRTPSource* inputSource); private: QCELPDeinterleaver(UsageEnvironment& env, RawQCELPRTPSource* inputSource); // called only by "createNew()" virtual ~QCELPDeinterleaver(); static void afterGettingFrame(void* clientData, unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime, unsigned durationInMicroseconds); void afterGettingFrame1(unsigned frameSize, struct timeval presentationTime); private: // Redefined virtual functions: void doGetNextFrame(); virtual void doStopGettingFrames(); private: class QCELPDeinterleavingBuffer* fDeinterleavingBuffer; Boolean fNeedAFrame; }; ////////// QCELPAudioRTPSource implementation ////////// FramedSource* QCELPAudioRTPSource::createNew(UsageEnvironment& env, Groupsock* RTPgs, RTPSource*& resultRTPSource, unsigned char rtpPayloadFormat, unsigned rtpTimestampFrequency) { RawQCELPRTPSource* rawRTPSource; resultRTPSource = rawRTPSource = RawQCELPRTPSource::createNew(env, RTPgs, rtpPayloadFormat, rtpTimestampFrequency); if (resultRTPSource == NULL) return NULL; QCELPDeinterleaver* deinterleaver = QCELPDeinterleaver::createNew(env, rawRTPSource); if (deinterleaver == NULL) { Medium::close(resultRTPSource); resultRTPSource = NULL; } return deinterleaver; } ////////// QCELPBufferedPacket and QCELPBufferedPacketFactory ////////// // A subclass of BufferedPacket, used to separate out QCELP frames. class QCELPBufferedPacket: public BufferedPacket { public: QCELPBufferedPacket(RawQCELPRTPSource& ourSource); virtual ~QCELPBufferedPacket(); private: // redefined virtual functions virtual unsigned nextEnclosedFrameSize(unsigned char*& framePtr, unsigned dataSize); private: RawQCELPRTPSource& fOurSource; }; class QCELPBufferedPacketFactory: public BufferedPacketFactory { private: // redefined virtual functions virtual BufferedPacket* createNewPacket(MultiFramedRTPSource* ourSource); }; ///////// RawQCELPRTPSource implementation //////// RawQCELPRTPSource* RawQCELPRTPSource::createNew(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, unsigned rtpTimestampFrequency) { return new RawQCELPRTPSource(env, RTPgs, rtpPayloadFormat, rtpTimestampFrequency); } RawQCELPRTPSource::RawQCELPRTPSource(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, unsigned rtpTimestampFrequency) : MultiFramedRTPSource(env, RTPgs, rtpPayloadFormat, rtpTimestampFrequency, new QCELPBufferedPacketFactory), fInterleaveL(0), fInterleaveN(0), fFrameIndex(0), fNumSuccessiveSyncedPackets(0) { } RawQCELPRTPSource::~RawQCELPRTPSource() { } Boolean RawQCELPRTPSource ::processSpecialHeader(BufferedPacket* packet, unsigned& resultSpecialHeaderSize) { unsigned char* headerStart = packet->data(); unsigned packetSize = packet->dataSize(); // First, check whether this packet's RTP timestamp is synchronized: if (RTPSource::hasBeenSynchronizedUsingRTCP()) { ++fNumSuccessiveSyncedPackets; } else { fNumSuccessiveSyncedPackets = 0; } // There's a 1-byte header indicating the interleave parameters if (packetSize < 1) return False; // Get the interleaving parameters from the 1-byte header, // and check them for validity: unsigned char const firstByte = headerStart[0]; unsigned char const interleaveL = (firstByte&0x38)>>3; unsigned char const interleaveN = firstByte&0x07; #ifdef DEBUG fprintf(stderr, "packetSize: %d, interleaveL: %d, interleaveN: %d\n", packetSize, interleaveL, interleaveN); #endif if (interleaveL > 5 || interleaveN > interleaveL) return False; //invalid fInterleaveL = interleaveL; fInterleaveN = interleaveN; fFrameIndex = 0; // initially resultSpecialHeaderSize = 1; return True; } char const* RawQCELPRTPSource::MIMEtype() const { return "audio/QCELP"; } Boolean RawQCELPRTPSource::hasBeenSynchronizedUsingRTCP() { // Don't report ourselves as being synchronized until we've received // at least a complete interleave cycle of synchronized packets. // This ensures that the receiver is currently getting a frame from // a packet that was synchronized. if (fNumSuccessiveSyncedPackets > (unsigned)(fInterleaveL+1)) { fNumSuccessiveSyncedPackets = fInterleaveL+2; // prevents overflow return True; } return False; } ///// QCELPBufferedPacket and QCELPBufferedPacketFactory implementation QCELPBufferedPacket::QCELPBufferedPacket(RawQCELPRTPSource& ourSource) : fOurSource(ourSource) { } QCELPBufferedPacket::~QCELPBufferedPacket() { } unsigned QCELPBufferedPacket:: nextEnclosedFrameSize(unsigned char*& framePtr, unsigned dataSize) { // The size of the QCELP frame is determined by the first byte: if (dataSize == 0) return 0; // sanity check unsigned char const firstByte = framePtr[0]; unsigned frameSize; switch (firstByte) { case 0: { frameSize = 1; break; } case 1: { frameSize = 4; break; } case 2: { frameSize = 8; break; } case 3: { frameSize = 17; break; } case 4: { frameSize = 35; break; } default: { frameSize = 0; break; } } #ifdef DEBUG fprintf(stderr, "QCELPBufferedPacket::nextEnclosedFrameSize(): frameSize: %d, dataSize: %d\n", frameSize, dataSize); #endif if (dataSize < frameSize) return 0; ++fOurSource.frameIndex(); return frameSize; } BufferedPacket* QCELPBufferedPacketFactory ::createNewPacket(MultiFramedRTPSource* ourSource) { return new QCELPBufferedPacket((RawQCELPRTPSource&)(*ourSource)); } ///////// QCELPDeinterleavingBuffer ///////// // (used to implement QCELPDeinterleaver) #define QCELP_MAX_FRAME_SIZE 35 #define QCELP_MAX_INTERLEAVE_L 5 #define QCELP_MAX_FRAMES_PER_PACKET 10 #define QCELP_MAX_INTERLEAVE_GROUP_SIZE \ ((QCELP_MAX_INTERLEAVE_L+1)*QCELP_MAX_FRAMES_PER_PACKET) class QCELPDeinterleavingBuffer { public: QCELPDeinterleavingBuffer(); virtual ~QCELPDeinterleavingBuffer(); void deliverIncomingFrame(unsigned frameSize, unsigned char interleaveL, unsigned char interleaveN, unsigned char frameIndex, unsigned short packetSeqNum, struct timeval presentationTime); Boolean retrieveFrame(unsigned char* to, unsigned maxSize, unsigned& resultFrameSize, unsigned& resultNumTruncatedBytes, struct timeval& resultPresentationTime); unsigned char* inputBuffer() { return fInputBuffer; } unsigned inputBufferSize() const { return QCELP_MAX_FRAME_SIZE; } private: class FrameDescriptor { public: FrameDescriptor(); virtual ~FrameDescriptor(); unsigned frameSize; unsigned char* frameData; struct timeval presentationTime; }; // Use two banks of descriptors - one for incoming, one for outgoing FrameDescriptor fFrames[QCELP_MAX_INTERLEAVE_GROUP_SIZE][2]; unsigned char fIncomingBankId; // toggles between 0 and 1 unsigned char fIncomingBinMax; // in the incoming bank unsigned char fOutgoingBinMax; // in the outgoing bank unsigned char fNextOutgoingBin; Boolean fHaveSeenPackets; u_int16_t fLastPacketSeqNumForGroup; unsigned char* fInputBuffer; struct timeval fLastRetrievedPresentationTime; }; ////////// QCELPDeinterleaver implementation ///////// QCELPDeinterleaver* QCELPDeinterleaver::createNew(UsageEnvironment& env, RawQCELPRTPSource* inputSource) { return new QCELPDeinterleaver(env, inputSource); } QCELPDeinterleaver::QCELPDeinterleaver(UsageEnvironment& env, RawQCELPRTPSource* inputSource) : FramedFilter(env, inputSource), fNeedAFrame(False) { fDeinterleavingBuffer = new QCELPDeinterleavingBuffer(); } QCELPDeinterleaver::~QCELPDeinterleaver() { delete fDeinterleavingBuffer; } static unsigned const uSecsPerFrame = 20000; // 20 ms void QCELPDeinterleaver::doGetNextFrame() { // First, try getting a frame from the deinterleaving buffer: if (fDeinterleavingBuffer->retrieveFrame(fTo, fMaxSize, fFrameSize, fNumTruncatedBytes, fPresentationTime)) { // Success! fNeedAFrame = False; fDurationInMicroseconds = uSecsPerFrame; // Call our own 'after getting' function. Because we're not a 'leaf' // source, we can call this directly, without risking // infinite recursion afterGetting(this); return; } // No luck, so ask our source for help: fNeedAFrame = True; if (!fInputSource->isCurrentlyAwaitingData()) { fInputSource->getNextFrame(fDeinterleavingBuffer->inputBuffer(), fDeinterleavingBuffer->inputBufferSize(), afterGettingFrame, this, FramedSource::handleClosure, this); } } void QCELPDeinterleaver::doStopGettingFrames() { fNeedAFrame = False; fInputSource->stopGettingFrames(); } void QCELPDeinterleaver ::afterGettingFrame(void* clientData, unsigned frameSize, unsigned /*numTruncatedBytes*/, struct timeval presentationTime, unsigned /*durationInMicroseconds*/) { QCELPDeinterleaver* deinterleaver = (QCELPDeinterleaver*)clientData; deinterleaver->afterGettingFrame1(frameSize, presentationTime); } void QCELPDeinterleaver ::afterGettingFrame1(unsigned frameSize, struct timeval presentationTime) { RawQCELPRTPSource* source = (RawQCELPRTPSource*)fInputSource; // First, put the frame into our deinterleaving buffer: fDeinterleavingBuffer ->deliverIncomingFrame(frameSize, source->interleaveL(), source->interleaveN(), source->frameIndex(), source->curPacketRTPSeqNum(), presentationTime); // Then, try delivering a frame to the client (if he wants one): if (fNeedAFrame) doGetNextFrame(); } ////////// QCELPDeinterleavingBuffer implementation ///////// QCELPDeinterleavingBuffer::QCELPDeinterleavingBuffer() : fIncomingBankId(0), fIncomingBinMax(0), fOutgoingBinMax(0), fNextOutgoingBin(0), fHaveSeenPackets(False) { fInputBuffer = new unsigned char[QCELP_MAX_FRAME_SIZE]; } QCELPDeinterleavingBuffer::~QCELPDeinterleavingBuffer() { delete[] fInputBuffer; } void QCELPDeinterleavingBuffer ::deliverIncomingFrame(unsigned frameSize, unsigned char interleaveL, unsigned char interleaveN, unsigned char frameIndex, unsigned short packetSeqNum, struct timeval presentationTime) { // First perform a sanity check on the parameters: // (This is overkill, as the source should have already done this.) if (frameSize > QCELP_MAX_FRAME_SIZE || interleaveL > QCELP_MAX_INTERLEAVE_L || interleaveN > interleaveL || frameIndex == 0 || frameIndex > QCELP_MAX_FRAMES_PER_PACKET) { #ifdef DEBUG fprintf(stderr, "QCELPDeinterleavingBuffer::deliverIncomingFrame() param sanity check failed (%d,%d,%d,%d)\n", frameSize, interleaveL, interleaveN, frameIndex); #endif return; } // The input "presentationTime" was that of the first frame in this // packet. Update it for the current frame: unsigned uSecIncrement = (frameIndex-1)*(interleaveL+1)*uSecsPerFrame; presentationTime.tv_usec += uSecIncrement; presentationTime.tv_sec += presentationTime.tv_usec/1000000; presentationTime.tv_usec = presentationTime.tv_usec%1000000; // Next, check whether this packet is part of a new interleave group if (!fHaveSeenPackets || seqNumLT(fLastPacketSeqNumForGroup, packetSeqNum)) { // We've moved to a new interleave group fHaveSeenPackets = True; fLastPacketSeqNumForGroup = packetSeqNum + interleaveL - interleaveN; // Switch the incoming and outgoing banks: fIncomingBankId ^= 1; unsigned char tmp = fIncomingBinMax; fIncomingBinMax = fOutgoingBinMax; fOutgoingBinMax = tmp; fNextOutgoingBin = 0; } // Now move the incoming frame into the appropriate bin: unsigned const binNumber = interleaveN + (frameIndex-1)*(interleaveL+1); FrameDescriptor& inBin = fFrames[binNumber][fIncomingBankId]; unsigned char* curBuffer = inBin.frameData; inBin.frameData = fInputBuffer; inBin.frameSize = frameSize; inBin.presentationTime = presentationTime; if (curBuffer == NULL) curBuffer = new unsigned char[QCELP_MAX_FRAME_SIZE]; fInputBuffer = curBuffer; if (binNumber >= fIncomingBinMax) { fIncomingBinMax = binNumber + 1; } } Boolean QCELPDeinterleavingBuffer ::retrieveFrame(unsigned char* to, unsigned maxSize, unsigned& resultFrameSize, unsigned& resultNumTruncatedBytes, struct timeval& resultPresentationTime) { if (fNextOutgoingBin >= fOutgoingBinMax) return False; // none left FrameDescriptor& outBin = fFrames[fNextOutgoingBin][fIncomingBankId^1]; unsigned char* fromPtr; unsigned char fromSize = outBin.frameSize; outBin.frameSize = 0; // for the next time this bin is used // Check whether this frame is missing; if so, return an 'erasure' frame: unsigned char erasure = 14; if (fromSize == 0) { fromPtr = &erasure; fromSize = 1; // Compute this erasure frame's presentation time via extrapolation: resultPresentationTime = fLastRetrievedPresentationTime; resultPresentationTime.tv_usec += uSecsPerFrame; if (resultPresentationTime.tv_usec >= 1000000) { ++resultPresentationTime.tv_sec; resultPresentationTime.tv_usec -= 1000000; } } else { // Normal case - a frame exists: fromPtr = outBin.frameData; resultPresentationTime = outBin.presentationTime; } fLastRetrievedPresentationTime = resultPresentationTime; if (fromSize > maxSize) { resultNumTruncatedBytes = fromSize - maxSize; resultFrameSize = maxSize; } else { resultNumTruncatedBytes = 0; resultFrameSize = fromSize; } memmove(to, fromPtr, resultFrameSize); ++fNextOutgoingBin; return True; } QCELPDeinterleavingBuffer::FrameDescriptor::FrameDescriptor() : frameSize(0), frameData(NULL) { } QCELPDeinterleavingBuffer::FrameDescriptor::~FrameDescriptor() { delete[] frameData; } live/liveMedia/MP3ADURTPSource.cpp000444 001751 000000 00000005526 12265042432 017052 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // RTP source for 'ADUized' MP3 frames ("mpa-robust") // Implementation #include "MP3ADURTPSource.hh" #include "MP3ADUdescriptor.hh" ////////// ADUBufferedPacket and ADUBufferedPacketFactory ////////// class ADUBufferedPacket: public BufferedPacket { private: // redefined virtual functions virtual unsigned nextEnclosedFrameSize(unsigned char*& framePtr, unsigned dataSize); }; class ADUBufferedPacketFactory: public BufferedPacketFactory { private: // redefined virtual functions virtual BufferedPacket* createNewPacket(MultiFramedRTPSource* ourSource); }; ///////// MP3ADURTPSource implementation //////// MP3ADURTPSource* MP3ADURTPSource::createNew(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, unsigned rtpTimestampFrequency) { return new MP3ADURTPSource(env, RTPgs, rtpPayloadFormat, rtpTimestampFrequency); } MP3ADURTPSource::MP3ADURTPSource(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, unsigned rtpTimestampFrequency) : MultiFramedRTPSource(env, RTPgs, rtpPayloadFormat, rtpTimestampFrequency, new ADUBufferedPacketFactory) { } MP3ADURTPSource::~MP3ADURTPSource() { } char const* MP3ADURTPSource::MIMEtype() const { return "audio/MPA-ROBUST"; } ////////// ADUBufferedPacket and ADUBufferredPacketFactory implementation unsigned ADUBufferedPacket ::nextEnclosedFrameSize(unsigned char*& framePtr, unsigned dataSize) { // Return the size of the next MP3 'ADU', on the assumption that // the input data is ADU-encoded MP3 frames. unsigned char* frameDataPtr = framePtr; unsigned remainingFrameSize = ADUdescriptor::getRemainingFrameSize(frameDataPtr); unsigned descriptorSize = (unsigned)(frameDataPtr - framePtr); unsigned fullADUSize = descriptorSize + remainingFrameSize; return (fullADUSize <= dataSize) ? fullADUSize : dataSize; } BufferedPacket* ADUBufferedPacketFactory ::createNewPacket(MultiFramedRTPSource* /*ourSource*/) { return new ADUBufferedPacket; } live/liveMedia/ByteStreamMultiFileSource.cpp000444 001751 000000 00000011066 12265042432 021421 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A source that consists of multiple byte-stream files, read sequentially // Implementation #include "ByteStreamMultiFileSource.hh" ByteStreamMultiFileSource ::ByteStreamMultiFileSource(UsageEnvironment& env, char const** fileNameArray, unsigned preferredFrameSize, unsigned playTimePerFrame) : FramedSource(env), fPreferredFrameSize(preferredFrameSize), fPlayTimePerFrame(playTimePerFrame), fCurrentlyReadSourceNumber(0), fHaveStartedNewFile(False) { // Begin by counting the number of sources: for (fNumSources = 0; ; ++fNumSources) { if (fileNameArray[fNumSources] == NULL) break; } // Next, copy the source file names into our own array: fFileNameArray = new char const*[fNumSources]; if (fFileNameArray == NULL) return; unsigned i; for (i = 0; i < fNumSources; ++i) { fFileNameArray[i] = strDup(fileNameArray[i]); } // Next, set up our array of component ByteStreamFileSources // Don't actually create these yet; instead, do this on demand fSourceArray = new ByteStreamFileSource*[fNumSources]; if (fSourceArray == NULL) return; for (i = 0; i < fNumSources; ++i) { fSourceArray[i] = NULL; } } ByteStreamMultiFileSource::~ByteStreamMultiFileSource() { unsigned i; for (i = 0; i < fNumSources; ++i) { Medium::close(fSourceArray[i]); } delete[] fSourceArray; for (i = 0; i < fNumSources; ++i) { delete[] (char*)(fFileNameArray[i]); } delete[] fFileNameArray; } ByteStreamMultiFileSource* ByteStreamMultiFileSource ::createNew(UsageEnvironment& env, char const** fileNameArray, unsigned preferredFrameSize, unsigned playTimePerFrame) { ByteStreamMultiFileSource* newSource = new ByteStreamMultiFileSource(env, fileNameArray, preferredFrameSize, playTimePerFrame); return newSource; } void ByteStreamMultiFileSource::doGetNextFrame() { do { // First, check whether we've run out of sources: if (fCurrentlyReadSourceNumber >= fNumSources) break; fHaveStartedNewFile = False; ByteStreamFileSource*& source = fSourceArray[fCurrentlyReadSourceNumber]; if (source == NULL) { // The current source hasn't been created yet. Do this now: source = ByteStreamFileSource::createNew(envir(), fFileNameArray[fCurrentlyReadSourceNumber], fPreferredFrameSize, fPlayTimePerFrame); if (source == NULL) break; fHaveStartedNewFile = True; } // (Attempt to) read from the current source. source->getNextFrame(fTo, fMaxSize, afterGettingFrame, this, onSourceClosure, this); return; } while (0); // An error occurred; consider ourselves closed: handleClosure(this); } void ByteStreamMultiFileSource ::afterGettingFrame(void* clientData, unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime, unsigned durationInMicroseconds) { ByteStreamMultiFileSource* source = (ByteStreamMultiFileSource*)clientData; source->fFrameSize = frameSize; source->fNumTruncatedBytes = numTruncatedBytes; source->fPresentationTime = presentationTime; source->fDurationInMicroseconds = durationInMicroseconds; FramedSource::afterGetting(source); } void ByteStreamMultiFileSource::onSourceClosure(void* clientData) { ByteStreamMultiFileSource* source = (ByteStreamMultiFileSource*)clientData; source->onSourceClosure1(); } void ByteStreamMultiFileSource::onSourceClosure1() { // This routine was called because the currently-read source was closed // (probably due to EOF). Close this source down, and move to the // next one: ByteStreamFileSource*& source = fSourceArray[fCurrentlyReadSourceNumber++]; Medium::close(source); source = NULL; // Try reading again: doGetNextFrame(); } live/liveMedia/MPEG1or2FileServerDemux.cpp000444 001751 000000 00000022577 12265042432 020605 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A server demultiplexer for a MPEG 1 or 2 Program Stream // Implementation #include "MPEG1or2FileServerDemux.hh" #include "MPEG1or2DemuxedServerMediaSubsession.hh" #include "ByteStreamFileSource.hh" MPEG1or2FileServerDemux* MPEG1or2FileServerDemux::createNew(UsageEnvironment& env, char const* fileName, Boolean reuseFirstSource) { return new MPEG1or2FileServerDemux(env, fileName, reuseFirstSource); } static float MPEG1or2ProgramStreamFileDuration(UsageEnvironment& env, char const* fileName, unsigned& fileSize); // forward MPEG1or2FileServerDemux ::MPEG1or2FileServerDemux(UsageEnvironment& env, char const* fileName, Boolean reuseFirstSource) : Medium(env), fReuseFirstSource(reuseFirstSource), fSession0Demux(NULL), fLastCreatedDemux(NULL), fLastClientSessionId(~0) { fFileName = strDup(fileName); fFileDuration = MPEG1or2ProgramStreamFileDuration(env, fileName, fFileSize); } MPEG1or2FileServerDemux::~MPEG1or2FileServerDemux() { Medium::close(fSession0Demux); delete[] (char*)fFileName; } ServerMediaSubsession* MPEG1or2FileServerDemux::newAudioServerMediaSubsession() { return MPEG1or2DemuxedServerMediaSubsession::createNew(*this, 0xC0, fReuseFirstSource); } ServerMediaSubsession* MPEG1or2FileServerDemux::newVideoServerMediaSubsession(Boolean iFramesOnly, double vshPeriod) { return MPEG1or2DemuxedServerMediaSubsession::createNew(*this, 0xE0, fReuseFirstSource, iFramesOnly, vshPeriod); } ServerMediaSubsession* MPEG1or2FileServerDemux::newAC3AudioServerMediaSubsession() { return MPEG1or2DemuxedServerMediaSubsession::createNew(*this, 0xBD, fReuseFirstSource); // because, in a VOB file, the AC3 audio has stream id 0xBD } MPEG1or2DemuxedElementaryStream* MPEG1or2FileServerDemux::newElementaryStream(unsigned clientSessionId, u_int8_t streamIdTag) { MPEG1or2Demux* demuxToUse; if (clientSessionId == 0) { // 'Session 0' is treated especially, because its audio & video streams // are created and destroyed one-at-a-time, rather than both streams being // created, and then (later) both streams being destroyed (as is the case // for other ('real') session ids). Because of this, a separate demux is // used for session 0, and its deletion is managed by us, rather than // happening automatically. if (fSession0Demux == NULL) { // Open our input file as a 'byte-stream file source': ByteStreamFileSource* fileSource = ByteStreamFileSource::createNew(envir(), fFileName); if (fileSource == NULL) return NULL; fSession0Demux = MPEG1or2Demux::createNew(envir(), fileSource, False/*note!*/); } demuxToUse = fSession0Demux; } else { // First, check whether this is a new client session. If so, create a new // demux for it: if (clientSessionId != fLastClientSessionId) { // Open our input file as a 'byte-stream file source': ByteStreamFileSource* fileSource = ByteStreamFileSource::createNew(envir(), fFileName); if (fileSource == NULL) return NULL; fLastCreatedDemux = MPEG1or2Demux::createNew(envir(), fileSource, True); // Note: We tell the demux to delete itself when its last // elementary stream is deleted. fLastClientSessionId = clientSessionId; // Note: This code relies upon the fact that the creation of streams for // different client sessions do not overlap - so one "MPEG1or2Demux" is used // at a time. } demuxToUse = fLastCreatedDemux; } if (demuxToUse == NULL) return NULL; // shouldn't happen return demuxToUse->newElementaryStream(streamIdTag); } static Boolean getMPEG1or2TimeCode(FramedSource* dataSource, MPEG1or2Demux& parentDemux, Boolean returnFirstSeenCode, float& timeCode); // forward static float MPEG1or2ProgramStreamFileDuration(UsageEnvironment& env, char const* fileName, unsigned& fileSize) { FramedSource* dataSource = NULL; float duration = 0.0; // until we learn otherwise fileSize = 0; // ditto do { // Open the input file as a 'byte-stream file source': ByteStreamFileSource* fileSource = ByteStreamFileSource::createNew(env, fileName); if (fileSource == NULL) break; dataSource = fileSource; fileSize = (unsigned)(fileSource->fileSize()); if (fileSize == 0) break; // Create a MPEG demultiplexor that reads from that source. MPEG1or2Demux* baseDemux = MPEG1or2Demux::createNew(env, dataSource, True); if (baseDemux == NULL) break; // Create, from this, a source that returns raw PES packets: dataSource = baseDemux->newRawPESStream(); // Read the first time code from the file: float firstTimeCode; if (!getMPEG1or2TimeCode(dataSource, *baseDemux, True, firstTimeCode)) break; // Then, read the last time code from the file. // (Before doing this, flush the demux's input buffers, // and seek towards the end of the file, for efficiency.) baseDemux->flushInput(); unsigned const startByteFromEnd = 100000; unsigned newFilePosition = fileSize < startByteFromEnd ? 0 : fileSize - startByteFromEnd; if (newFilePosition > 0) fileSource->seekToByteAbsolute(newFilePosition); float lastTimeCode; if (!getMPEG1or2TimeCode(dataSource, *baseDemux, False, lastTimeCode)) break; // Take the difference between these time codes as being the file duration: float timeCodeDiff = lastTimeCode - firstTimeCode; if (timeCodeDiff < 0) break; duration = timeCodeDiff; } while (0); Medium::close(dataSource); return duration; } #define MFSD_DUMMY_SINK_BUFFER_SIZE (6+65535) /* large enough for a PES packet */ class MFSD_DummySink: public MediaSink { public: MFSD_DummySink(MPEG1or2Demux& demux, Boolean returnFirstSeenCode); virtual ~MFSD_DummySink(); char watchVariable; private: // redefined virtual function: virtual Boolean continuePlaying(); private: static void afterGettingFrame(void* clientData, unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime, unsigned durationInMicroseconds); void afterGettingFrame1(); private: MPEG1or2Demux& fOurDemux; Boolean fReturnFirstSeenCode; unsigned char fBuf[MFSD_DUMMY_SINK_BUFFER_SIZE]; }; static void afterPlayingMFSD_DummySink(MFSD_DummySink* sink); // forward static float computeSCRTimeCode(MPEG1or2Demux::SCR const& scr); // forward static Boolean getMPEG1or2TimeCode(FramedSource* dataSource, MPEG1or2Demux& parentDemux, Boolean returnFirstSeenCode, float& timeCode) { // Start reading through "dataSource", until we see a SCR time code: parentDemux.lastSeenSCR().isValid = False; UsageEnvironment& env = dataSource->envir(); // alias MFSD_DummySink sink(parentDemux, returnFirstSeenCode); sink.startPlaying(*dataSource, (MediaSink::afterPlayingFunc*)afterPlayingMFSD_DummySink, &sink); env.taskScheduler().doEventLoop(&sink.watchVariable); timeCode = computeSCRTimeCode(parentDemux.lastSeenSCR()); return parentDemux.lastSeenSCR().isValid; } ////////// MFSD_DummySink implementation ////////// MFSD_DummySink::MFSD_DummySink(MPEG1or2Demux& demux, Boolean returnFirstSeenCode) : MediaSink(demux.envir()), watchVariable(0), fOurDemux(demux), fReturnFirstSeenCode(returnFirstSeenCode) { } MFSD_DummySink::~MFSD_DummySink() { } Boolean MFSD_DummySink::continuePlaying() { if (fSource == NULL) return False; // sanity check fSource->getNextFrame(fBuf, sizeof fBuf, afterGettingFrame, this, onSourceClosure, this); return True; } void MFSD_DummySink::afterGettingFrame(void* clientData, unsigned /*frameSize*/, unsigned /*numTruncatedBytes*/, struct timeval /*presentationTime*/, unsigned /*durationInMicroseconds*/) { MFSD_DummySink* sink = (MFSD_DummySink*)clientData; sink->afterGettingFrame1(); } void MFSD_DummySink::afterGettingFrame1() { if (fReturnFirstSeenCode && fOurDemux.lastSeenSCR().isValid) { // We were asked to return the first SCR that we saw, and we've seen one, // so we're done. (Handle this as if the input source had closed.) onSourceClosure(); return; } continuePlaying(); } static void afterPlayingMFSD_DummySink(MFSD_DummySink* sink) { // Return from the "doEventLoop()" call: sink->watchVariable = ~0; } static float computeSCRTimeCode(MPEG1or2Demux::SCR const& scr) { double result = scr.remainingBits/90000.0 + scr.extension/300.0; if (scr.highBit) { // Add (2^32)/90000 == (2^28)/5625 double const highBitValue = (256*1024*1024)/5625.0; result += highBitValue; } return (float)result; } live/liveMedia/ourMD5.cpp000444 001751 000000 00000025116 12265042432 015462 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // Because MD5 may not be implemented (at least, with the same interface) on all systems, // we have our own implementation. // Implementation #include "ourMD5.hh" #include // for u_int32_t, u_int64_t #include #define DIGEST_SIZE_IN_BYTES 16 #define DIGEST_SIZE_IN_HEX_DIGITS (2*DIGEST_SIZE_IN_BYTES) #define DIGEST_SIZE_AS_STRING (DIGEST_SIZE_IN_HEX_DIGITS+1) // The state of a MD5 computation in progress: class MD5Context { public: MD5Context(); ~MD5Context(); void addData(unsigned char const* inputData, unsigned inputDataSize); void end(char* outputDigest /*must point to an array of size DIGEST_SIZE_AS_STRING*/); private: void finalize(unsigned char* outputDigestInBytes); // Like "end()", except that the argument is a byte array, of size DIGEST_SIZE_IN_BYTES. // This function is used to implement "end()". void zeroize(); // to remove potentially sensitive information void transform64Bytes(unsigned char const block[64]); // does the actual MD5 transform private: u_int32_t fState[4]; // ABCD u_int64_t fBitCount; // number of bits, modulo 2^64 unsigned char fWorkingBuffer[64]; }; char* our_MD5Data(unsigned char const* data, unsigned dataSize, char* outputDigest) { MD5Context ctx; ctx.addData(data, dataSize); if (outputDigest == NULL) outputDigest = new char[DIGEST_SIZE_AS_STRING]; ctx.end(outputDigest); return outputDigest; } ////////// MD5Context implementation ////////// MD5Context::MD5Context() : fBitCount(0) { // Initialize with magic constants: fState[0] = 0x67452301; fState[1] = 0xefcdab89; fState[2] = 0x98badcfe; fState[3] = 0x10325476; } MD5Context::~MD5Context() { zeroize(); } void MD5Context::addData(unsigned char const* inputData, unsigned inputDataSize) { // Begin by noting how much of our 64-byte working buffer remains unfilled: u_int64_t const byteCount = fBitCount>>3; unsigned bufferBytesInUse = (unsigned)(byteCount&0x3F); unsigned bufferBytesRemaining = 64 - bufferBytesInUse; // Then update our bit count: fBitCount += inputDataSize<<3; unsigned i = 0; if (inputDataSize >= bufferBytesRemaining) { // We have enough input data to do (64-byte) MD5 transforms. // Do this now, starting with a transform on our working buffer, then with // (as many as possible) transforms on rest of the input data. memcpy((unsigned char*)&fWorkingBuffer[bufferBytesInUse], (unsigned char*)inputData, bufferBytesRemaining); transform64Bytes(fWorkingBuffer); bufferBytesInUse = 0; for (i = bufferBytesRemaining; i + 63 < inputDataSize; i += 64) { transform64Bytes(&inputData[i]); } } // Copy any remaining (and currently un-transformed) input data into our working buffer: if (i < inputDataSize) { memcpy((unsigned char*)&fWorkingBuffer[bufferBytesInUse], (unsigned char*)&inputData[i], inputDataSize - i); } } void MD5Context::end(char* outputDigest) { unsigned char digestInBytes[DIGEST_SIZE_IN_BYTES]; finalize(digestInBytes); // Convert the digest from bytes (binary) to hex digits: static char const hex[]="0123456789abcdef"; unsigned i; for (i = 0; i < DIGEST_SIZE_IN_BYTES; ++i) { outputDigest[2*i] = hex[digestInBytes[i] >> 4]; outputDigest[2*i+1] = hex[digestInBytes[i] & 0x0F]; } outputDigest[2*i] = '\0'; } // Routines that unpack 32 and 64-bit values into arrays of bytes (in little-endian order). // (These are used to implement "finalize()".) static void unpack32(unsigned char out[4], u_int32_t in) { for (unsigned i = 0; i < 4; ++i) { out[i] = (unsigned char)((in>>(8*i))&0xFF); } } static void unpack64(unsigned char out[8], u_int64_t in) { for (unsigned i = 0; i < 8; ++i) { out[i] = (unsigned char)((in>>(8*i))&0xFF); } } static unsigned char const PADDING[64] = { 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; void MD5Context::finalize(unsigned char* outputDigestInBytes) { // Unpack our bit count: unsigned char bitCountInBytes[8]; unpack64(bitCountInBytes, fBitCount); // Before 'finalizing', make sure that we transform any remaining bytes in our working buffer: u_int64_t const byteCount = fBitCount>>3; unsigned bufferBytesInUse = (unsigned)(byteCount&0x3F); unsigned numPaddingBytes = (bufferBytesInUse < 56) ? (56 - bufferBytesInUse) : (64 + 56 - bufferBytesInUse); addData(PADDING, numPaddingBytes); addData(bitCountInBytes, 8); // Unpack our 'state' into the output digest: unpack32(&outputDigestInBytes[0], fState[0]); unpack32(&outputDigestInBytes[4], fState[1]); unpack32(&outputDigestInBytes[8], fState[2]); unpack32(&outputDigestInBytes[12], fState[3]); zeroize(); } void MD5Context::zeroize() { fState[0] = fState[1] = fState[2] = fState[3] = 0; fBitCount = 0; for (unsigned i = 0; i < 64; ++i) fWorkingBuffer[i] = 0; } ////////// Implementation of the MD5 transform ("MD5Context::transform64Bytes()") ////////// // Constants for the transform: #define S11 7 #define S12 12 #define S13 17 #define S14 22 #define S21 5 #define S22 9 #define S23 14 #define S24 20 #define S31 4 #define S32 11 #define S33 16 #define S34 23 #define S41 6 #define S42 10 #define S43 15 #define S44 21 // Basic MD5 functions: #define F(x, y, z) (((x) & (y)) | ((~x) & (z))) #define G(x, y, z) (((x) & (z)) | ((y) & (~z))) #define H(x, y, z) ((x) ^ (y) ^ (z)) #define I(x, y, z) ((y) ^ ((x) | (~z))) // Rotate "x" left "n" bits: #define ROTATE_LEFT(x, n) (((x) << (n)) | ((x) >> (32-(n)))) // Other transforms: #define FF(a, b, c, d, x, s, ac) { \ (a) += F((b), (c), (d)) + (x) + (u_int32_t)(ac); \ (a) = ROTATE_LEFT((a), (s)); \ (a) += (b); \ } #define GG(a, b, c, d, x, s, ac) { \ (a) += G((b), (c), (d)) + (x) + (u_int32_t)(ac); \ (a) = ROTATE_LEFT((a), (s)); \ (a) += (b); \ } #define HH(a, b, c, d, x, s, ac) { \ (a) += H((b), (c), (d)) + (x) + (u_int32_t)(ac); \ (a) = ROTATE_LEFT((a), (s)); \ (a) += (b); \ } #define II(a, b, c, d, x, s, ac) { \ (a) += I((b), (c), (d)) + (x) + (u_int32_t)(ac); \ (a) = ROTATE_LEFT((a), (s)); \ (a) += (b); \ } void MD5Context::transform64Bytes(unsigned char const block[64]) { u_int32_t a = fState[0], b = fState[1], c = fState[2], d = fState[3]; // Begin by packing "block" into an array ("x") of 16 32-bit values (in little-endian order): u_int32_t x[16]; for (unsigned i = 0, j = 0; i < 16; ++i, j += 4) { x[i] = ((u_int32_t)block[j]) | (((u_int32_t)block[j+1]) << 8) | (((u_int32_t)block[j+2]) << 16) | (((u_int32_t)block[j+3]) << 24); } // Now, perform the transform on the array "x": // Round 1 FF(a, b, c, d, x[0], S11, 0xd76aa478); // 1 FF(d, a, b, c, x[1], S12, 0xe8c7b756); // 2 FF(c, d, a, b, x[2], S13, 0x242070db); // 3 FF(b, c, d, a, x[3], S14, 0xc1bdceee); // 4 FF(a, b, c, d, x[4], S11, 0xf57c0faf); // 5 FF(d, a, b, c, x[5], S12, 0x4787c62a); // 6 FF(c, d, a, b, x[6], S13, 0xa8304613); // 7 FF(b, c, d, a, x[7], S14, 0xfd469501); // 8 FF(a, b, c, d, x[8], S11, 0x698098d8); // 9 FF(d, a, b, c, x[9], S12, 0x8b44f7af); // 10 FF(c, d, a, b, x[10], S13, 0xffff5bb1); // 11 FF(b, c, d, a, x[11], S14, 0x895cd7be); // 12 FF(a, b, c, d, x[12], S11, 0x6b901122); // 13 FF(d, a, b, c, x[13], S12, 0xfd987193); // 14 FF(c, d, a, b, x[14], S13, 0xa679438e); // 15 FF(b, c, d, a, x[15], S14, 0x49b40821); // 16 // Round 2 GG(a, b, c, d, x[1], S21, 0xf61e2562); // 17 GG(d, a, b, c, x[6], S22, 0xc040b340); // 18 GG(c, d, a, b, x[11], S23, 0x265e5a51); // 19 GG(b, c, d, a, x[0], S24, 0xe9b6c7aa); // 20 GG(a, b, c, d, x[5], S21, 0xd62f105d); // 21 GG(d, a, b, c, x[10], S22, 0x2441453); // 22 GG(c, d, a, b, x[15], S23, 0xd8a1e681); // 23 GG(b, c, d, a, x[4], S24, 0xe7d3fbc8); // 24 GG(a, b, c, d, x[9], S21, 0x21e1cde6); // 25 GG(d, a, b, c, x[14], S22, 0xc33707d6); // 26 GG(c, d, a, b, x[3], S23, 0xf4d50d87); // 27 GG(b, c, d, a, x[8], S24, 0x455a14ed); // 28 GG(a, b, c, d, x[13], S21, 0xa9e3e905); // 29 GG(d, a, b, c, x[2], S22, 0xfcefa3f8); // 30 GG(c, d, a, b, x[7], S23, 0x676f02d9); // 31 GG(b, c, d, a, x[12], S24, 0x8d2a4c8a); // 32 // Round 3 HH(a, b, c, d, x[5], S31, 0xfffa3942); // 33 HH(d, a, b, c, x[8], S32, 0x8771f681); // 34 HH(c, d, a, b, x[11], S33, 0x6d9d6122); // 35 HH(b, c, d, a, x[14], S34, 0xfde5380c); // 36 HH(a, b, c, d, x[1], S31, 0xa4beea44); // 37 HH(d, a, b, c, x[4], S32, 0x4bdecfa9); // 38 HH(c, d, a, b, x[7], S33, 0xf6bb4b60); // 39 HH(b, c, d, a, x[10], S34, 0xbebfbc70); // 40 HH(a, b, c, d, x[13], S31, 0x289b7ec6); // 41 HH(d, a, b, c, x[0], S32, 0xeaa127fa); // 42 HH(c, d, a, b, x[3], S33, 0xd4ef3085); // 43 HH(b, c, d, a, x[6], S34, 0x4881d05); // 44 HH(a, b, c, d, x[9], S31, 0xd9d4d039); // 45 HH(d, a, b, c, x[12], S32, 0xe6db99e5); // 46 HH(c, d, a, b, x[15], S33, 0x1fa27cf8); // 47 HH(b, c, d, a, x[2], S34, 0xc4ac5665); // 48 // Round 4 II(a, b, c, d, x[0], S41, 0xf4292244); // 49 II(d, a, b, c, x[7], S42, 0x432aff97); // 50 II(c, d, a, b, x[14], S43, 0xab9423a7); // 51 II(b, c, d, a, x[5], S44, 0xfc93a039); // 52 II(a, b, c, d, x[12], S41, 0x655b59c3); // 53 II(d, a, b, c, x[3], S42, 0x8f0ccc92); // 54 II(c, d, a, b, x[10], S43, 0xffeff47d); // 55 II(b, c, d, a, x[1], S44, 0x85845dd1); // 56 II(a, b, c, d, x[8], S41, 0x6fa87e4f); // 57 II(d, a, b, c, x[15], S42, 0xfe2ce6e0); // 58 II(c, d, a, b, x[6], S43, 0xa3014314); // 59 II(b, c, d, a, x[13], S44, 0x4e0811a1); // 60 II(a, b, c, d, x[4], S41, 0xf7537e82); // 61 II(d, a, b, c, x[11], S42, 0xbd3af235); // 62 II(c, d, a, b, x[2], S43, 0x2ad7d2bb); // 63 II(b, c, d, a, x[9], S44, 0xeb86d391); // 64 fState[0] += a; fState[1] += b; fState[2] += c; fState[3] += d; // Zeroize sensitive information. for (unsigned k = 0; k < 16; ++k) x[k] = 0; } live/liveMedia/Base64.cpp000444 001751 000000 00000010717 12265042432 015374 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // Base64 encoding and decoding // implementation #include "Base64.hh" #include #include static char base64DecodeTable[256]; static void initBase64DecodeTable() { int i; for (i = 0; i < 256; ++i) base64DecodeTable[i] = (char)0x80; // default value: invalid for (i = 'A'; i <= 'Z'; ++i) base64DecodeTable[i] = 0 + (i - 'A'); for (i = 'a'; i <= 'z'; ++i) base64DecodeTable[i] = 26 + (i - 'a'); for (i = '0'; i <= '9'; ++i) base64DecodeTable[i] = 52 + (i - '0'); base64DecodeTable[(unsigned char)'+'] = 62; base64DecodeTable[(unsigned char)'/'] = 63; base64DecodeTable[(unsigned char)'='] = 0; } unsigned char* base64Decode(char const* in, unsigned& resultSize, Boolean trimTrailingZeros) { if (in == NULL) return NULL; // sanity check return base64Decode(in, strlen(in), resultSize, trimTrailingZeros); } unsigned char* base64Decode(char const* in, unsigned inSize, unsigned& resultSize, Boolean trimTrailingZeros) { static Boolean haveInitializedBase64DecodeTable = False; if (!haveInitializedBase64DecodeTable) { initBase64DecodeTable(); haveInitializedBase64DecodeTable = True; } unsigned char* out = (unsigned char*)strDupSize(in); // ensures we have enough space int k = 0; int paddingCount = 0; int const jMax = inSize - 3; // in case "inSize" is not a multiple of 4 (although it should be) for (int j = 0; j < jMax; j += 4) { char inTmp[4], outTmp[4]; for (int i = 0; i < 4; ++i) { inTmp[i] = in[i+j]; if (inTmp[i] == '=') ++paddingCount; outTmp[i] = base64DecodeTable[(unsigned char)inTmp[i]]; if ((outTmp[i]&0x80) != 0) outTmp[i] = 0; // this happens only if there was an invalid character; pretend that it was 'A' } out[k++] = (outTmp[0]<<2) | (outTmp[1]>>4); out[k++] = (outTmp[1]<<4) | (outTmp[2]>>2); out[k++] = (outTmp[2]<<6) | outTmp[3]; } if (trimTrailingZeros) { while (paddingCount > 0 && k > 0 && out[k-1] == '\0') { --k; --paddingCount; } } resultSize = k; unsigned char* result = new unsigned char[resultSize]; memmove(result, out, resultSize); delete[] out; return result; } static const char base64Char[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"; char* base64Encode(char const* origSigned, unsigned origLength) { unsigned char const* orig = (unsigned char const*)origSigned; // in case any input bytes have the MSB set if (orig == NULL) return NULL; unsigned const numOrig24BitValues = origLength/3; Boolean havePadding = origLength > numOrig24BitValues*3; Boolean havePadding2 = origLength == numOrig24BitValues*3 + 2; unsigned const numResultBytes = 4*(numOrig24BitValues + havePadding); char* result = new char[numResultBytes+1]; // allow for trailing '\0' // Map each full group of 3 input bytes into 4 output base-64 characters: unsigned i; for (i = 0; i < numOrig24BitValues; ++i) { result[4*i+0] = base64Char[(orig[3*i]>>2)&0x3F]; result[4*i+1] = base64Char[(((orig[3*i]&0x3)<<4) | (orig[3*i+1]>>4))&0x3F]; result[4*i+2] = base64Char[((orig[3*i+1]<<2) | (orig[3*i+2]>>6))&0x3F]; result[4*i+3] = base64Char[orig[3*i+2]&0x3F]; } // Now, take padding into account. (Note: i == numOrig24BitValues) if (havePadding) { result[4*i+0] = base64Char[(orig[3*i]>>2)&0x3F]; if (havePadding2) { result[4*i+1] = base64Char[(((orig[3*i]&0x3)<<4) | (orig[3*i+1]>>4))&0x3F]; result[4*i+2] = base64Char[(orig[3*i+1]<<2)&0x3F]; } else { result[4*i+1] = base64Char[((orig[3*i]&0x3)<<4)&0x3F]; result[4*i+2] = '='; } result[4*i+3] = '='; } result[numResultBytes] = '\0'; return result; } live/liveMedia/MPEG2TransportStreamMultiplexor.cpp000444 001751 000000 00000043036 12265042432 022520 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A class for generating MPEG-2 Transport Stream from one or more input // Elementary Stream data sources // Implementation #include "MPEG2TransportStreamMultiplexor.hh" #define TRANSPORT_PACKET_SIZE 188 #define PAT_PERIOD 100 // # of packets between Program Association Tables #define PMT_PERIOD 500 // # of packets between Program Map Tables #define PID_TABLE_SIZE 256 MPEG2TransportStreamMultiplexor ::MPEG2TransportStreamMultiplexor(UsageEnvironment& env) : FramedSource(env), fHaveVideoStreams(True/*by default*/), fOutgoingPacketCounter(0), fProgramMapVersion(0), fPreviousInputProgramMapVersion(0xFF), fCurrentInputProgramMapVersion(0xFF), fPCR_PID(0), fCurrentPID(0), fInputBuffer(NULL), fInputBufferSize(0), fInputBufferBytesUsed(0), fIsFirstAdaptationField(True) { for (unsigned i = 0; i < PID_TABLE_SIZE; ++i) { fPIDState[i].counter = 0; fPIDState[i].streamType = 0; } } MPEG2TransportStreamMultiplexor::~MPEG2TransportStreamMultiplexor() { } void MPEG2TransportStreamMultiplexor::doGetNextFrame() { if (fInputBufferBytesUsed >= fInputBufferSize) { // No more bytes are available from the current buffer. // Arrange to read a new one. awaitNewBuffer(fInputBuffer); return; } do { // Periodically return a Program Association Table packet instead: if (fOutgoingPacketCounter++ % PAT_PERIOD == 0) { deliverPATPacket(); break; } // Periodically (or when we see a new PID) return a Program Map Table instead: Boolean programMapHasChanged = fPIDState[fCurrentPID].counter == 0 || fCurrentInputProgramMapVersion != fPreviousInputProgramMapVersion; if (fOutgoingPacketCounter % PMT_PERIOD == 0 || programMapHasChanged) { if (programMapHasChanged) { // reset values for next time: fPIDState[fCurrentPID].counter = 1; fPreviousInputProgramMapVersion = fCurrentInputProgramMapVersion; } deliverPMTPacket(programMapHasChanged); break; } // Normal case: Deliver (or continue delivering) the recently-read data: deliverDataToClient(fCurrentPID, fInputBuffer, fInputBufferSize, fInputBufferBytesUsed); } while (0); // NEED TO SET fPresentationTime, durationInMicroseconds ##### // Complete the delivery to the client: if ((fOutgoingPacketCounter%10) == 0) { // To avoid excessive recursion (and stack overflow) caused by excessively large input frames, // occasionally return to the event loop to do this: envir().taskScheduler().scheduleDelayedTask(0, (TaskFunc*)FramedSource::afterGetting, this); } else { afterGetting(this); } } void MPEG2TransportStreamMultiplexor ::handleNewBuffer(unsigned char* buffer, unsigned bufferSize, int mpegVersion, MPEG1or2Demux::SCR scr) { if (bufferSize < 4) return; fInputBuffer = buffer; fInputBufferSize = bufferSize; fInputBufferBytesUsed = 0; u_int8_t stream_id = fInputBuffer[3]; // Use "stream_id" directly as our PID. // Also, figure out the Program Map 'stream type' from this. if (stream_id == 0xBE) { // padding_stream; ignore fInputBufferSize = 0; } else if (stream_id == 0xBC) { // program_stream_map setProgramStreamMap(fInputBufferSize); fInputBufferSize = 0; // then, ignore the buffer } else { fCurrentPID = stream_id; // Set the stream's type: u_int8_t& streamType = fPIDState[fCurrentPID].streamType; // alias if (streamType == 0) { // Instead, set the stream's type to default values, based on whether // the stream is audio or video, and whether it's MPEG-1 or MPEG-2: if ((stream_id&0xF0) == 0xE0) { // video streamType = mpegVersion == 1 ? 1 : mpegVersion == 2 ? 2 : mpegVersion == 4 ? 0x10 : mpegVersion == 5/*H.264*/ ? 0x1B : 0x24/*assume H.265*/; } else if ((stream_id&0xE0) == 0xC0) { // audio streamType = mpegVersion == 1 ? 3 : mpegVersion == 2 ? 4 : 0xF; } else if (stream_id == 0xBD) { // private_stream1 (usually AC-3) streamType = 0x06; // for DVB; for ATSC, use 0x81 } else { // something else, e.g., AC-3 uses private_stream1 (0xBD) streamType = 0x81; // private } } if (fPCR_PID == 0) { // set it to this stream, if it's appropriate: if ((!fHaveVideoStreams && (streamType == 3 || streamType == 4 || streamType == 0xF))/* audio stream */ || (streamType == 1 || streamType == 2 || streamType == 0x10 || streamType == 0x1B || streamType == 0x24)/* video stream */) { fPCR_PID = fCurrentPID; // use this stream's SCR for PCR } } if (fCurrentPID == fPCR_PID) { // Record the input's current SCR timestamp, for use as our PCR: fPCR = scr; } } // Now that we have new input data, retry the last delivery to the client: doGetNextFrame(); } void MPEG2TransportStreamMultiplexor ::deliverDataToClient(u_int8_t pid, unsigned char* buffer, unsigned bufferSize, unsigned& startPositionInBuffer) { // Construct a new Transport packet, and deliver it to the client: if (fMaxSize < TRANSPORT_PACKET_SIZE) { fFrameSize = 0; // the client hasn't given us enough space; deliver nothing fNumTruncatedBytes = TRANSPORT_PACKET_SIZE; } else { fFrameSize = TRANSPORT_PACKET_SIZE; Boolean willAddPCR = pid == fPCR_PID && startPositionInBuffer == 0 && !(fPCR.highBit == 0 && fPCR.remainingBits == 0 && fPCR.extension == 0); unsigned const numBytesAvailable = bufferSize - startPositionInBuffer; unsigned numHeaderBytes = 4; // by default unsigned numPCRBytes = 0; // by default unsigned numPaddingBytes = 0; // by default unsigned numDataBytes; u_int8_t adaptation_field_control; if (willAddPCR) { adaptation_field_control = 0x30; numHeaderBytes += 2; // for the "adaptation_field_length" and flags numPCRBytes = 6; if (numBytesAvailable >= TRANSPORT_PACKET_SIZE - numHeaderBytes - numPCRBytes) { numDataBytes = TRANSPORT_PACKET_SIZE - numHeaderBytes - numPCRBytes; } else { numDataBytes = numBytesAvailable; numPaddingBytes = TRANSPORT_PACKET_SIZE - numHeaderBytes - numPCRBytes - numDataBytes; } } else if (numBytesAvailable >= TRANSPORT_PACKET_SIZE - numHeaderBytes) { // This is the common case adaptation_field_control = 0x10; numDataBytes = TRANSPORT_PACKET_SIZE - numHeaderBytes; } else { adaptation_field_control = 0x30; ++numHeaderBytes; // for the "adaptation_field_length" // ASSERT: numBytesAvailable <= TRANSPORT_PACKET_SIZE - numHeaderBytes numDataBytes = numBytesAvailable; if (numDataBytes < TRANSPORT_PACKET_SIZE - numHeaderBytes) { ++numHeaderBytes; // for the adaptation field flags numPaddingBytes = TRANSPORT_PACKET_SIZE - numHeaderBytes - numDataBytes; } } // ASSERT: numHeaderBytes+numPCRBytes+numPaddingBytes+numDataBytes // == TRANSPORT_PACKET_SIZE // Fill in the header of the Transport Stream packet: unsigned char* header = fTo; *header++ = 0x47; // sync_byte *header++ = (startPositionInBuffer == 0) ? 0x40 : 0x00; // transport_error_indicator, payload_unit_start_indicator, transport_priority, // first 5 bits of PID *header++ = pid; // last 8 bits of PID unsigned& continuity_counter = fPIDState[pid].counter; // alias *header++ = adaptation_field_control|(continuity_counter&0x0F); // transport_scrambling_control, adaptation_field_control, continuity_counter ++continuity_counter; if (adaptation_field_control == 0x30) { // Add an adaptation field: u_int8_t adaptation_field_length = (numHeaderBytes == 5) ? 0 : 1 + numPCRBytes + numPaddingBytes; *header++ = adaptation_field_length; if (numHeaderBytes > 5) { u_int8_t flags = willAddPCR ? 0x10 : 0x00; if (fIsFirstAdaptationField) { flags |= 0x80; // discontinuity_indicator fIsFirstAdaptationField = False; } *header++ = flags; if (willAddPCR) { u_int32_t pcrHigh32Bits = (fPCR.highBit<<31) | (fPCR.remainingBits>>1); u_int8_t pcrLowBit = fPCR.remainingBits&1; u_int8_t extHighBit = (fPCR.extension&0x100)>>8; *header++ = pcrHigh32Bits>>24; *header++ = pcrHigh32Bits>>16; *header++ = pcrHigh32Bits>>8; *header++ = pcrHigh32Bits; *header++ = (pcrLowBit<<7)|0x7E|extHighBit; *header++ = (u_int8_t)fPCR.extension; // low 8 bits of extension } } } // Add any padding bytes: for (unsigned i = 0; i < numPaddingBytes; ++i) *header++ = 0xFF; // Finally, add the data bytes: memmove(header, &buffer[startPositionInBuffer], numDataBytes); startPositionInBuffer += numDataBytes; } } static u_int32_t calculateCRC(u_int8_t* data, unsigned dataLength); // forward #define PAT_PID 0 #define OUR_PROGRAM_NUMBER 1 #define OUR_PROGRAM_MAP_PID 0x30 void MPEG2TransportStreamMultiplexor::deliverPATPacket() { // First, create a new buffer for the PAT packet: unsigned const patSize = TRANSPORT_PACKET_SIZE - 4; // allow for the 4-byte header unsigned char* patBuffer = new unsigned char[patSize]; // and fill it in: unsigned char* pat = patBuffer; *pat++ = 0; // pointer_field *pat++ = 0; // table_id *pat++ = 0xB0; // section_syntax_indicator; 0; reserved, section_length (high) *pat++ = 13; // section_length (low) *pat++ = 0; *pat++ = 1; // transport_stream_id *pat++ = 0xC3; // reserved; version_number; current_next_indicator *pat++ = 0; // section_number *pat++ = 0; // last_section_number *pat++ = OUR_PROGRAM_NUMBER>>8; *pat++ = OUR_PROGRAM_NUMBER; // program_number *pat++ = 0xE0|(OUR_PROGRAM_MAP_PID>>8); // reserved; program_map_PID (high) *pat++ = OUR_PROGRAM_MAP_PID; // program_map_PID (low) // Compute the CRC from the bytes we currently have (not including "pointer_field"): u_int32_t crc = calculateCRC(patBuffer+1, pat - (patBuffer+1)); *pat++ = crc>>24; *pat++ = crc>>16; *pat++ = crc>>8; *pat++ = crc; // Fill in the rest of the packet with padding bytes: while (pat < &patBuffer[patSize]) *pat++ = 0xFF; // Deliver the packet: unsigned startPosition = 0; deliverDataToClient(PAT_PID, patBuffer, patSize, startPosition); // Finally, remove the new buffer: delete[] patBuffer; } void MPEG2TransportStreamMultiplexor::deliverPMTPacket(Boolean hasChanged) { if (hasChanged) ++fProgramMapVersion; // First, create a new buffer for the PMT packet: unsigned const pmtSize = TRANSPORT_PACKET_SIZE - 4; // allow for the 4-byte header unsigned char* pmtBuffer = new unsigned char[pmtSize]; // and fill it in: unsigned char* pmt = pmtBuffer; *pmt++ = 0; // pointer_field *pmt++ = 2; // table_id *pmt++ = 0xB0; // section_syntax_indicator; 0; reserved, section_length (high) unsigned char* section_lengthPtr = pmt; // save for later *pmt++ = 0; // section_length (low) (fill in later) *pmt++ = OUR_PROGRAM_NUMBER>>8; *pmt++ = OUR_PROGRAM_NUMBER; // program_number *pmt++ = 0xC1|((fProgramMapVersion&0x1F)<<1); // reserved; version_number; current_next_indicator *pmt++ = 0; // section_number *pmt++ = 0; // last_section_number *pmt++ = 0xE0; // reserved; PCR_PID (high) *pmt++ = fPCR_PID; // PCR_PID (low) *pmt++ = 0xF0; // reserved; program_info_length (high) *pmt++ = 0; // program_info_length (low) for (int pid = 0; pid < PID_TABLE_SIZE; ++pid) { if (fPIDState[pid].streamType != 0) { // This PID gets recorded in the table *pmt++ = fPIDState[pid].streamType; *pmt++ = 0xE0; // reserved; elementary_pid (high) *pmt++ = pid; // elementary_pid (low) *pmt++ = 0xF0; // reserved; ES_info_length (high) *pmt++ = 0; // ES_info_length (low) } } unsigned section_length = pmt - (section_lengthPtr+1) + 4 /*for CRC*/; *section_lengthPtr = section_length; // Compute the CRC from the bytes we currently have (not including "pointer_field"): u_int32_t crc = calculateCRC(pmtBuffer+1, pmt - (pmtBuffer+1)); *pmt++ = crc>>24; *pmt++ = crc>>16; *pmt++ = crc>>8; *pmt++ = crc; // Fill in the rest of the packet with padding bytes: while (pmt < &pmtBuffer[pmtSize]) *pmt++ = 0xFF; // Deliver the packet: unsigned startPosition = 0; deliverDataToClient(OUR_PROGRAM_MAP_PID, pmtBuffer, pmtSize, startPosition); // Finally, remove the new buffer: delete[] pmtBuffer; } void MPEG2TransportStreamMultiplexor::setProgramStreamMap(unsigned frameSize) { if (frameSize <= 16) return; // program_stream_map is too small to be useful if (frameSize > 0xFF) return; // program_stream_map is too large u_int16_t program_stream_map_length = (fInputBuffer[4]<<8) | fInputBuffer[5]; if ((u_int16_t)frameSize > 6+program_stream_map_length) { frameSize = 6+program_stream_map_length; } u_int8_t versionByte = fInputBuffer[6]; if ((versionByte&0x80) == 0) return; // "current_next_indicator" is not set fCurrentInputProgramMapVersion = versionByte&0x1F; u_int16_t program_stream_info_length = (fInputBuffer[8]<<8) | fInputBuffer[9]; unsigned offset = 10 + program_stream_info_length; // skip over 'descriptors' u_int16_t elementary_stream_map_length = (fInputBuffer[offset]<<8) | fInputBuffer[offset+1]; offset += 2; frameSize -= 4; // sizeof CRC_32 if (frameSize > offset + elementary_stream_map_length) { frameSize = offset + elementary_stream_map_length; } while (offset + 4 <= frameSize) { u_int8_t stream_type = fInputBuffer[offset]; u_int8_t elementary_stream_id = fInputBuffer[offset+1]; fPIDState[elementary_stream_id].streamType = stream_type; u_int16_t elementary_stream_info_length = (fInputBuffer[offset+2]<<8) | fInputBuffer[offset+3]; offset += 4 + elementary_stream_info_length; } } static u_int32_t const CRC32[256] = { 0x00000000, 0x04c11db7, 0x09823b6e, 0x0d4326d9, 0x130476dc, 0x17c56b6b, 0x1a864db2, 0x1e475005, 0x2608edb8, 0x22c9f00f, 0x2f8ad6d6, 0x2b4bcb61, 0x350c9b64, 0x31cd86d3, 0x3c8ea00a, 0x384fbdbd, 0x4c11db70, 0x48d0c6c7, 0x4593e01e, 0x4152fda9, 0x5f15adac, 0x5bd4b01b, 0x569796c2, 0x52568b75, 0x6a1936c8, 0x6ed82b7f, 0x639b0da6, 0x675a1011, 0x791d4014, 0x7ddc5da3, 0x709f7b7a, 0x745e66cd, 0x9823b6e0, 0x9ce2ab57, 0x91a18d8e, 0x95609039, 0x8b27c03c, 0x8fe6dd8b, 0x82a5fb52, 0x8664e6e5, 0xbe2b5b58, 0xbaea46ef, 0xb7a96036, 0xb3687d81, 0xad2f2d84, 0xa9ee3033, 0xa4ad16ea, 0xa06c0b5d, 0xd4326d90, 0xd0f37027, 0xddb056fe, 0xd9714b49, 0xc7361b4c, 0xc3f706fb, 0xceb42022, 0xca753d95, 0xf23a8028, 0xf6fb9d9f, 0xfbb8bb46, 0xff79a6f1, 0xe13ef6f4, 0xe5ffeb43, 0xe8bccd9a, 0xec7dd02d, 0x34867077, 0x30476dc0, 0x3d044b19, 0x39c556ae, 0x278206ab, 0x23431b1c, 0x2e003dc5, 0x2ac12072, 0x128e9dcf, 0x164f8078, 0x1b0ca6a1, 0x1fcdbb16, 0x018aeb13, 0x054bf6a4, 0x0808d07d, 0x0cc9cdca, 0x7897ab07, 0x7c56b6b0, 0x71159069, 0x75d48dde, 0x6b93dddb, 0x6f52c06c, 0x6211e6b5, 0x66d0fb02, 0x5e9f46bf, 0x5a5e5b08, 0x571d7dd1, 0x53dc6066, 0x4d9b3063, 0x495a2dd4, 0x44190b0d, 0x40d816ba, 0xaca5c697, 0xa864db20, 0xa527fdf9, 0xa1e6e04e, 0xbfa1b04b, 0xbb60adfc, 0xb6238b25, 0xb2e29692, 0x8aad2b2f, 0x8e6c3698, 0x832f1041, 0x87ee0df6, 0x99a95df3, 0x9d684044, 0x902b669d, 0x94ea7b2a, 0xe0b41de7, 0xe4750050, 0xe9362689, 0xedf73b3e, 0xf3b06b3b, 0xf771768c, 0xfa325055, 0xfef34de2, 0xc6bcf05f, 0xc27dede8, 0xcf3ecb31, 0xcbffd686, 0xd5b88683, 0xd1799b34, 0xdc3abded, 0xd8fba05a, 0x690ce0ee, 0x6dcdfd59, 0x608edb80, 0x644fc637, 0x7a089632, 0x7ec98b85, 0x738aad5c, 0x774bb0eb, 0x4f040d56, 0x4bc510e1, 0x46863638, 0x42472b8f, 0x5c007b8a, 0x58c1663d, 0x558240e4, 0x51435d53, 0x251d3b9e, 0x21dc2629, 0x2c9f00f0, 0x285e1d47, 0x36194d42, 0x32d850f5, 0x3f9b762c, 0x3b5a6b9b, 0x0315d626, 0x07d4cb91, 0x0a97ed48, 0x0e56f0ff, 0x1011a0fa, 0x14d0bd4d, 0x19939b94, 0x1d528623, 0xf12f560e, 0xf5ee4bb9, 0xf8ad6d60, 0xfc6c70d7, 0xe22b20d2, 0xe6ea3d65, 0xeba91bbc, 0xef68060b, 0xd727bbb6, 0xd3e6a601, 0xdea580d8, 0xda649d6f, 0xc423cd6a, 0xc0e2d0dd, 0xcda1f604, 0xc960ebb3, 0xbd3e8d7e, 0xb9ff90c9, 0xb4bcb610, 0xb07daba7, 0xae3afba2, 0xaafbe615, 0xa7b8c0cc, 0xa379dd7b, 0x9b3660c6, 0x9ff77d71, 0x92b45ba8, 0x9675461f, 0x8832161a, 0x8cf30bad, 0x81b02d74, 0x857130c3, 0x5d8a9099, 0x594b8d2e, 0x5408abf7, 0x50c9b640, 0x4e8ee645, 0x4a4ffbf2, 0x470cdd2b, 0x43cdc09c, 0x7b827d21, 0x7f436096, 0x7200464f, 0x76c15bf8, 0x68860bfd, 0x6c47164a, 0x61043093, 0x65c52d24, 0x119b4be9, 0x155a565e, 0x18197087, 0x1cd86d30, 0x029f3d35, 0x065e2082, 0x0b1d065b, 0x0fdc1bec, 0x3793a651, 0x3352bbe6, 0x3e119d3f, 0x3ad08088, 0x2497d08d, 0x2056cd3a, 0x2d15ebe3, 0x29d4f654, 0xc5a92679, 0xc1683bce, 0xcc2b1d17, 0xc8ea00a0, 0xd6ad50a5, 0xd26c4d12, 0xdf2f6bcb, 0xdbee767c, 0xe3a1cbc1, 0xe760d676, 0xea23f0af, 0xeee2ed18, 0xf0a5bd1d, 0xf464a0aa, 0xf9278673, 0xfde69bc4, 0x89b8fd09, 0x8d79e0be, 0x803ac667, 0x84fbdbd0, 0x9abc8bd5, 0x9e7d9662, 0x933eb0bb, 0x97ffad0c, 0xafb010b1, 0xab710d06, 0xa6322bdf, 0xa2f33668, 0xbcb4666d, 0xb8757bda, 0xb5365d03, 0xb1f740b4 }; static u_int32_t calculateCRC(u_int8_t* data, unsigned dataLength) { u_int32_t crc = 0xFFFFFFFF; while (dataLength-- > 0) { crc = (crc<<8) ^ CRC32[(crc>>24) ^ (u_int32_t)(*data++)]; } return crc; } live/liveMedia/MPEG2TransportFileServerMediaSubsession.cpp000444 001751 000000 00000033037 12265042432 024104 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A 'ServerMediaSubsession' object that creates new, unicast, "RTPSink"s // on demand, from a MPEG-2 Transport Stream file. // Implementation #include "MPEG2TransportFileServerMediaSubsession.hh" #include "SimpleRTPSink.hh" MPEG2TransportFileServerMediaSubsession* MPEG2TransportFileServerMediaSubsession::createNew(UsageEnvironment& env, char const* fileName, char const* indexFileName, Boolean reuseFirstSource) { MPEG2TransportStreamIndexFile* indexFile; if (indexFileName != NULL && reuseFirstSource) { // It makes no sense to support trick play if all clients use the same source. Fix this: env << "MPEG2TransportFileServerMediaSubsession::createNew(): ignoring the index file name, because \"reuseFirstSource\" is set\n"; indexFile = NULL; } else { indexFile = MPEG2TransportStreamIndexFile::createNew(env, indexFileName); } return new MPEG2TransportFileServerMediaSubsession(env, fileName, indexFile, reuseFirstSource); } MPEG2TransportFileServerMediaSubsession ::MPEG2TransportFileServerMediaSubsession(UsageEnvironment& env, char const* fileName, MPEG2TransportStreamIndexFile* indexFile, Boolean reuseFirstSource) : FileServerMediaSubsession(env, fileName, reuseFirstSource), fIndexFile(indexFile), fDuration(0.0), fClientSessionHashTable(NULL) { if (fIndexFile != NULL) { // we support 'trick play' fDuration = fIndexFile->getPlayingDuration(); fClientSessionHashTable = HashTable::create(ONE_WORD_HASH_KEYS); } } MPEG2TransportFileServerMediaSubsession ::~MPEG2TransportFileServerMediaSubsession() { if (fIndexFile != NULL) { // we support 'trick play' Medium::close(fIndexFile); // Clean out the client session hash table: while (1) { ClientTrickPlayState* client = (ClientTrickPlayState*)(fClientSessionHashTable->RemoveNext()); if (client == NULL) break; delete client; } delete fClientSessionHashTable; } } #define TRANSPORT_PACKET_SIZE 188 #define TRANSPORT_PACKETS_PER_NETWORK_PACKET 7 // The product of these two numbers must be enough to fit within a network packet void MPEG2TransportFileServerMediaSubsession ::startStream(unsigned clientSessionId, void* streamToken, TaskFunc* rtcpRRHandler, void* rtcpRRHandlerClientData, unsigned short& rtpSeqNum, unsigned& rtpTimestamp, ServerRequestAlternativeByteHandler* serverRequestAlternativeByteHandler, void* serverRequestAlternativeByteHandlerClientData) { if (fIndexFile != NULL) { // we support 'trick play' ClientTrickPlayState* client = lookupClient(clientSessionId); if (client != NULL && client->areChangingScale()) { // First, handle this like a "PAUSE", except that we back up to the previous VSH client->updateStateOnPlayChange(True); OnDemandServerMediaSubsession::pauseStream(clientSessionId, streamToken); // Then, adjust for the change of scale: client->updateStateOnScaleChange(); } } // Call the original, default version of this routine: OnDemandServerMediaSubsession::startStream(clientSessionId, streamToken, rtcpRRHandler, rtcpRRHandlerClientData, rtpSeqNum, rtpTimestamp, serverRequestAlternativeByteHandler, serverRequestAlternativeByteHandlerClientData); } void MPEG2TransportFileServerMediaSubsession ::pauseStream(unsigned clientSessionId, void* streamToken) { if (fIndexFile != NULL) { // we support 'trick play' ClientTrickPlayState* client = lookupClient(clientSessionId); if (client != NULL) { client->updateStateOnPlayChange(False); } } // Call the original, default version of this routine: OnDemandServerMediaSubsession::pauseStream(clientSessionId, streamToken); } void MPEG2TransportFileServerMediaSubsession ::seekStream(unsigned clientSessionId, void* streamToken, double& seekNPT, double streamDuration, u_int64_t& numBytes) { // Begin by calling the original, default version of this routine: OnDemandServerMediaSubsession::seekStream(clientSessionId, streamToken, seekNPT, streamDuration, numBytes); // Then, special handling specific to indexed Transport Stream files: if (fIndexFile != NULL) { // we support 'trick play' ClientTrickPlayState* client = lookupClient(clientSessionId); if (client != NULL) { unsigned long numTSPacketsToStream = client->updateStateFromNPT(seekNPT, streamDuration); numBytes = numTSPacketsToStream*TRANSPORT_PACKET_SIZE; } } } void MPEG2TransportFileServerMediaSubsession ::setStreamScale(unsigned clientSessionId, void* streamToken, float scale) { if (fIndexFile != NULL) { // we support 'trick play' ClientTrickPlayState* client = lookupClient(clientSessionId); if (client != NULL) { client->setNextScale(scale); // scale won't take effect until the next "PLAY" } } // Call the original, default version of this routine: OnDemandServerMediaSubsession::setStreamScale(clientSessionId, streamToken, scale); } void MPEG2TransportFileServerMediaSubsession ::deleteStream(unsigned clientSessionId, void*& streamToken) { if (fIndexFile != NULL) { // we support 'trick play' ClientTrickPlayState* client = lookupClient(clientSessionId); if (client != NULL) { client->updateStateOnPlayChange(False); } } // Call the original, default version of this routine: OnDemandServerMediaSubsession::deleteStream(clientSessionId, streamToken); } ClientTrickPlayState* MPEG2TransportFileServerMediaSubsession::newClientTrickPlayState() { return new ClientTrickPlayState(fIndexFile); } FramedSource* MPEG2TransportFileServerMediaSubsession ::createNewStreamSource(unsigned clientSessionId, unsigned& estBitrate) { // Create the video source: unsigned const inputDataChunkSize = TRANSPORT_PACKETS_PER_NETWORK_PACKET*TRANSPORT_PACKET_SIZE; ByteStreamFileSource* fileSource = ByteStreamFileSource::createNew(envir(), fFileName, inputDataChunkSize); if (fileSource == NULL) return NULL; fFileSize = fileSource->fileSize(); // Use the file size and the duration to estimate the stream's bitrate: if (fFileSize > 0 && fDuration > 0.0) { estBitrate = (unsigned)((int64_t)fFileSize/(125*fDuration) + 0.5); // kbps, rounded } else { estBitrate = 5000; // kbps, estimate } // Create a framer for the Transport Stream: MPEG2TransportStreamFramer* framer = MPEG2TransportStreamFramer::createNew(envir(), fileSource); if (fIndexFile != NULL) { // we support 'trick play' // Keep state for this client (if we don't already have it): ClientTrickPlayState* client = lookupClient(clientSessionId); if (client == NULL) { client = newClientTrickPlayState(); fClientSessionHashTable->Add((char const*)clientSessionId, client); } client->setSource(framer); } return framer; } RTPSink* MPEG2TransportFileServerMediaSubsession ::createNewRTPSink(Groupsock* rtpGroupsock, unsigned char /*rtpPayloadTypeIfDynamic*/, FramedSource* /*inputSource*/) { return SimpleRTPSink::createNew(envir(), rtpGroupsock, 33, 90000, "video", "MP2T", 1, True, False /*no 'M' bit*/); } void MPEG2TransportFileServerMediaSubsession::testScaleFactor(float& scale) { if (fIndexFile != NULL && fDuration > 0.0) { // We support any integral scale, other than 0 int iScale = scale < 0.0 ? (int)(scale - 0.5f) : (int)(scale + 0.5f); // round if (iScale == 0) iScale = 1; scale = (float)iScale; } else { scale = 1.0f; } } float MPEG2TransportFileServerMediaSubsession::duration() const { return fDuration; } ClientTrickPlayState* MPEG2TransportFileServerMediaSubsession ::lookupClient(unsigned clientSessionId) { return (ClientTrickPlayState*)(fClientSessionHashTable->Lookup((char const*)clientSessionId)); } ////////// ClientTrickPlayState implementation ////////// ClientTrickPlayState::ClientTrickPlayState(MPEG2TransportStreamIndexFile* indexFile) : fIndexFile(indexFile), fOriginalTransportStreamSource(NULL), fTrickModeFilter(NULL), fTrickPlaySource(NULL), fFramer(NULL), fScale(1.0f), fNextScale(1.0f), fNPT(0.0f), fTSRecordNum(0), fIxRecordNum(0) { } unsigned long ClientTrickPlayState::updateStateFromNPT(double npt, double streamDuration) { fNPT = (float)npt; // Map "fNPT" to the corresponding Transport Stream and Index record numbers: unsigned long tsRecordNum, ixRecordNum; fIndexFile->lookupTSPacketNumFromNPT(fNPT, tsRecordNum, ixRecordNum); updateTSRecordNum(); if (tsRecordNum != fTSRecordNum) { fTSRecordNum = tsRecordNum; fIxRecordNum = ixRecordNum; // Seek the source to the new record number: reseekOriginalTransportStreamSource(); // Note: We assume that we're asked to seek only in normal // (i.e., non trick play) mode, so we don't seek within the trick // play source (if any). fFramer->clearPIDStatusTable(); } unsigned long numTSRecordsToStream = 0; float pcrLimit = 0.0; if (streamDuration > 0.0) { // fNPT might have changed when we looked it up in the index file. Adjust "streamDuration" accordingly: streamDuration += npt - (double)fNPT; if (streamDuration > 0.0) { // Specify that we want to stream no more data than this. if (fNextScale == 1.0f) { // We'll be streaming from the original file. // Use the index file to figure out how many Transport Packets we get to stream: unsigned long toTSRecordNum, toIxRecordNum; float toNPT = (float)(fNPT + streamDuration); fIndexFile->lookupTSPacketNumFromNPT(toNPT, toTSRecordNum, toIxRecordNum); if (toTSRecordNum > tsRecordNum) { // sanity check numTSRecordsToStream = toTSRecordNum - tsRecordNum; } } else { // We'll be streaming from the trick play stream. // It'd be difficult to figure out how many Transport Packets we need to stream, so instead set a PCR // limit in the trick play stream. (We rely upon the fact that PCRs in the trick play stream start at 0.0) int direction = fNextScale < 0.0 ? -1 : 1; pcrLimit = (float)(streamDuration/(fNextScale*direction)); } } } fFramer->setNumTSPacketsToStream(numTSRecordsToStream); fFramer->setPCRLimit(pcrLimit); return numTSRecordsToStream; } void ClientTrickPlayState::updateStateOnScaleChange() { fScale = fNextScale; // Change our source objects to reflect the change in scale: // First, close the existing trick play source (if any): if (fTrickPlaySource != NULL) { fTrickModeFilter->forgetInputSource(); // so that the underlying Transport Stream source doesn't get deleted by: Medium::close(fTrickPlaySource); fTrickPlaySource = NULL; fTrickModeFilter = NULL; } if (fNextScale != 1.0f) { // Create a new trick play filter from the original Transport Stream source: UsageEnvironment& env = fIndexFile->envir(); // alias fTrickModeFilter = MPEG2TransportStreamTrickModeFilter ::createNew(env, fOriginalTransportStreamSource, fIndexFile, int(fNextScale)); fTrickModeFilter->seekTo(fTSRecordNum, fIxRecordNum); // And generate a Transport Stream from this: fTrickPlaySource = MPEG2TransportStreamFromESSource::createNew(env); fTrickPlaySource->addNewVideoSource(fTrickModeFilter, fIndexFile->mpegVersion()); fFramer->changeInputSource(fTrickPlaySource); } else { // Switch back to the original Transport Stream source: reseekOriginalTransportStreamSource(); fFramer->changeInputSource(fOriginalTransportStreamSource); } } void ClientTrickPlayState::updateStateOnPlayChange(Boolean reverseToPreviousVSH) { updateTSRecordNum(); if (fTrickPlaySource == NULL) { // We were in regular (1x) play. Use the index file to look up the // index record number and npt from the current transport number: fIndexFile->lookupPCRFromTSPacketNum(fTSRecordNum, reverseToPreviousVSH, fNPT, fIxRecordNum); } else { // We were in trick mode, and so already have the index record number. // Get the transport record number and npt from this: fIxRecordNum = fTrickModeFilter->nextIndexRecordNum(); if ((long)fIxRecordNum < 0) fIxRecordNum = 0; // we were at the start of the file unsigned long transportRecordNum; float pcr; u_int8_t offset, size, recordType; // all dummy if (fIndexFile->readIndexRecordValues(fIxRecordNum, transportRecordNum, offset, size, pcr, recordType)) { fTSRecordNum = transportRecordNum; fNPT = pcr; } } } void ClientTrickPlayState::setSource(MPEG2TransportStreamFramer* framer) { fFramer = framer; fOriginalTransportStreamSource = (ByteStreamFileSource*)(framer->inputSource()); } void ClientTrickPlayState::updateTSRecordNum(){ if (fFramer != NULL) fTSRecordNum += (unsigned long)(fFramer->tsPacketCount()); } void ClientTrickPlayState::reseekOriginalTransportStreamSource() { u_int64_t tsRecordNum64 = (u_int64_t)fTSRecordNum; fOriginalTransportStreamSource->seekToByteAbsolute(tsRecordNum64*TRANSPORT_PACKET_SIZE); } live/liveMedia/MPEG2TransportStreamFramer.cpp000444 001751 000000 00000025515 12265042432 021412 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A filter that passes through (unchanged) chunks that contain an integral number // of MPEG-2 Transport Stream packets, but returning (in "fDurationInMicroseconds") // an updated estimate of the time gap between chunks. // Implementation #include "MPEG2TransportStreamFramer.hh" #include // for "gettimeofday()" #define TRANSPORT_PACKET_SIZE 188 ////////// Definitions of constants that control the behavior of this code ///////// #if !defined(NEW_DURATION_WEIGHT) #define NEW_DURATION_WEIGHT 0.5 // How much weight to give to the latest duration measurement (must be <= 1) #endif #if !defined(TIME_ADJUSTMENT_FACTOR) #define TIME_ADJUSTMENT_FACTOR 0.8 // A factor by which to adjust the duration estimate to ensure that the overall // packet transmission times remains matched with the PCR times (which will be the // times that we expect receivers to play the incoming packets). // (must be <= 1) #endif #if !defined(MAX_PLAYOUT_BUFFER_DURATION) #define MAX_PLAYOUT_BUFFER_DURATION 0.1 // (seconds) #endif #if !defined(PCR_PERIOD_VARIATION_RATIO) #define PCR_PERIOD_VARIATION_RATIO 0.5 #endif ////////// PIDStatus ////////// class PIDStatus { public: PIDStatus(double _firstClock, double _firstRealTime) : firstClock(_firstClock), lastClock(_firstClock), firstRealTime(_firstRealTime), lastRealTime(_firstRealTime), lastPacketNum(0) { } double firstClock, lastClock, firstRealTime, lastRealTime; u_int64_t lastPacketNum; }; ////////// MPEG2TransportStreamFramer ////////// MPEG2TransportStreamFramer* MPEG2TransportStreamFramer ::createNew(UsageEnvironment& env, FramedSource* inputSource) { return new MPEG2TransportStreamFramer(env, inputSource); } MPEG2TransportStreamFramer ::MPEG2TransportStreamFramer(UsageEnvironment& env, FramedSource* inputSource) : FramedFilter(env, inputSource), fTSPacketCount(0), fTSPacketDurationEstimate(0.0), fTSPCRCount(0), fLimitNumTSPacketsToStream(False), fNumTSPacketsToStream(0), fLimitTSPacketsToStreamByPCR(False), fPCRLimit(0.0) { fPIDStatusTable = HashTable::create(ONE_WORD_HASH_KEYS); } MPEG2TransportStreamFramer::~MPEG2TransportStreamFramer() { clearPIDStatusTable(); delete fPIDStatusTable; } void MPEG2TransportStreamFramer::clearPIDStatusTable() { PIDStatus* pidStatus; while ((pidStatus = (PIDStatus*)fPIDStatusTable->RemoveNext()) != NULL) { delete pidStatus; } } void MPEG2TransportStreamFramer::setNumTSPacketsToStream(unsigned long numTSRecordsToStream) { fNumTSPacketsToStream = numTSRecordsToStream; fLimitNumTSPacketsToStream = numTSRecordsToStream > 0; } void MPEG2TransportStreamFramer::setPCRLimit(float pcrLimit) { fPCRLimit = pcrLimit; fLimitTSPacketsToStreamByPCR = pcrLimit != 0.0; } void MPEG2TransportStreamFramer::doGetNextFrame() { if (fLimitNumTSPacketsToStream) { if (fNumTSPacketsToStream == 0) { handleClosure(this); return; } if (fNumTSPacketsToStream*TRANSPORT_PACKET_SIZE < fMaxSize) { fMaxSize = fNumTSPacketsToStream*TRANSPORT_PACKET_SIZE; } } // Read directly from our input source into our client's buffer: fFrameSize = 0; fInputSource->getNextFrame(fTo, fMaxSize, afterGettingFrame, this, FramedSource::handleClosure, this); } void MPEG2TransportStreamFramer::doStopGettingFrames() { FramedFilter::doStopGettingFrames(); fTSPacketCount = 0; fTSPCRCount = 0; clearPIDStatusTable(); } void MPEG2TransportStreamFramer ::afterGettingFrame(void* clientData, unsigned frameSize, unsigned /*numTruncatedBytes*/, struct timeval presentationTime, unsigned /*durationInMicroseconds*/) { MPEG2TransportStreamFramer* framer = (MPEG2TransportStreamFramer*)clientData; framer->afterGettingFrame1(frameSize, presentationTime); } #define TRANSPORT_SYNC_BYTE 0x47 void MPEG2TransportStreamFramer::afterGettingFrame1(unsigned frameSize, struct timeval presentationTime) { fFrameSize += frameSize; unsigned const numTSPackets = fFrameSize/TRANSPORT_PACKET_SIZE; fNumTSPacketsToStream -= numTSPackets; fFrameSize = numTSPackets*TRANSPORT_PACKET_SIZE; // an integral # of TS packets if (fFrameSize == 0) { // We didn't read a complete TS packet; assume that the input source has closed. handleClosure(this); return; } // Make sure the data begins with a sync byte: unsigned syncBytePosition; for (syncBytePosition = 0; syncBytePosition < fFrameSize; ++syncBytePosition) { if (fTo[syncBytePosition] == TRANSPORT_SYNC_BYTE) break; } if (syncBytePosition == fFrameSize) { envir() << "No Transport Stream sync byte in data."; handleClosure(this); return; } else if (syncBytePosition > 0) { // There's a sync byte, but not at the start of the data. Move the good data // to the start of the buffer, then read more to fill it up again: memmove(fTo, &fTo[syncBytePosition], fFrameSize - syncBytePosition); fFrameSize -= syncBytePosition; fInputSource->getNextFrame(&fTo[fFrameSize], syncBytePosition, afterGettingFrame, this, FramedSource::handleClosure, this); return; } // else normal case: the data begins with a sync byte fPresentationTime = presentationTime; // Scan through the TS packets that we read, and update our estimate of // the duration of each packet: struct timeval tvNow; gettimeofday(&tvNow, NULL); double timeNow = tvNow.tv_sec + tvNow.tv_usec/1000000.0; for (unsigned i = 0; i < numTSPackets; ++i) { if (!updateTSPacketDurationEstimate(&fTo[i*TRANSPORT_PACKET_SIZE], timeNow)) { // We hit a preset limit (based on PCR) within the stream. Handle this as if the input source has closed: handleClosure(this); return; } } fDurationInMicroseconds = numTSPackets * (unsigned)(fTSPacketDurationEstimate*1000000); // Complete the delivery to our client: afterGetting(this); } Boolean MPEG2TransportStreamFramer::updateTSPacketDurationEstimate(unsigned char* pkt, double timeNow) { // Sanity check: Make sure we start with the sync byte: if (pkt[0] != TRANSPORT_SYNC_BYTE) { envir() << "Missing sync byte!\n"; return True; } ++fTSPacketCount; // If this packet doesn't contain a PCR, then we're not interested in it: u_int8_t const adaptation_field_control = (pkt[3]&0x30)>>4; if (adaptation_field_control != 2 && adaptation_field_control != 3) return True; // there's no adaptation_field u_int8_t const adaptation_field_length = pkt[4]; if (adaptation_field_length == 0) return True; u_int8_t const discontinuity_indicator = pkt[5]&0x80; u_int8_t const pcrFlag = pkt[5]&0x10; if (pcrFlag == 0) return True; // no PCR // There's a PCR. Get it, and the PID: ++fTSPCRCount; u_int32_t pcrBaseHigh = (pkt[6]<<24)|(pkt[7]<<16)|(pkt[8]<<8)|pkt[9]; double clock = pcrBaseHigh/45000.0; if ((pkt[10]&0x80) != 0) clock += 1/90000.0; // add in low-bit (if set) unsigned short pcrExt = ((pkt[10]&0x01)<<8) | pkt[11]; clock += pcrExt/27000000.0; if (fLimitTSPacketsToStreamByPCR) { if (clock > fPCRLimit) { // We've hit a preset limit within the stream: return False; } } unsigned pid = ((pkt[1]&0x1F)<<8) | pkt[2]; // Check whether we already have a record of a PCR for this PID: PIDStatus* pidStatus = (PIDStatus*)(fPIDStatusTable->Lookup((char*)pid)); if (pidStatus == NULL) { // We're seeing this PID's PCR for the first time: pidStatus = new PIDStatus(clock, timeNow); fPIDStatusTable->Add((char*)pid, pidStatus); #ifdef DEBUG_PCR fprintf(stderr, "PID 0x%x, FIRST PCR 0x%08x+%d:%03x == %f @ %f, pkt #%lu\n", pid, pcrBaseHigh, pkt[10]>>7, pcrExt, clock, timeNow, fTSPacketCount); #endif } else { // We've seen this PID's PCR before; update our per-packet duration estimate: int64_t packetsSinceLast = (int64_t)(fTSPacketCount - pidStatus->lastPacketNum); // it's "int64_t" because some compilers can't convert "u_int64_t" -> "double" double durationPerPacket = (clock - pidStatus->lastClock)/packetsSinceLast; // Hack (suggested by "Romain"): Don't update our estimate if this PCR appeared unusually quickly. // (This can produce more accurate estimates for wildly VBR streams.) double meanPCRPeriod = 0.0; if (fTSPCRCount > 0) { double tsPacketCount = (double)(int64_t)fTSPacketCount; double tsPCRCount = (double)(int64_t)fTSPCRCount; meanPCRPeriod = tsPacketCount/tsPCRCount; if (packetsSinceLast < meanPCRPeriod*PCR_PERIOD_VARIATION_RATIO) return True; } if (fTSPacketDurationEstimate == 0.0) { // we've just started fTSPacketDurationEstimate = durationPerPacket; } else if (discontinuity_indicator == 0 && durationPerPacket >= 0.0) { fTSPacketDurationEstimate = durationPerPacket*NEW_DURATION_WEIGHT + fTSPacketDurationEstimate*(1-NEW_DURATION_WEIGHT); // Also adjust the duration estimate to try to ensure that the transmission // rate matches the playout rate: double transmitDuration = timeNow - pidStatus->firstRealTime; double playoutDuration = clock - pidStatus->firstClock; if (transmitDuration > playoutDuration) { fTSPacketDurationEstimate *= TIME_ADJUSTMENT_FACTOR; // reduce estimate } else if (transmitDuration + MAX_PLAYOUT_BUFFER_DURATION < playoutDuration) { fTSPacketDurationEstimate /= TIME_ADJUSTMENT_FACTOR; // increase estimate } } else { // the PCR has a discontinuity from its previous value; don't use it now, // but reset our PCR and real-time values to compensate: pidStatus->firstClock = clock; pidStatus->firstRealTime = timeNow; } #ifdef DEBUG_PCR fprintf(stderr, "PID 0x%x, PCR 0x%08x+%d:%03x == %f @ %f (diffs %f @ %f), pkt #%lu, discon %d => this duration %f, new estimate %f, mean PCR period=%f\n", pid, pcrBaseHigh, pkt[10]>>7, pcrExt, clock, timeNow, clock - pidStatus->firstClock, timeNow - pidStatus->firstRealTime, fTSPacketCount, discontinuity_indicator != 0, durationPerPacket, fTSPacketDurationEstimate, meanPCRPeriod ); #endif } pidStatus->lastClock = clock; pidStatus->lastRealTime = timeNow; pidStatus->lastPacketNum = fTSPacketCount; return True; } live/liveMedia/MPEG4ESVideoRTPSource.cpp000444 001751 000000 00000004341 12265042432 020146 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // MP4V-ES video RTP stream sources // Implementation #include "MPEG4ESVideoRTPSource.hh" ///////// MPEG4ESVideoRTPSource implementation //////// //##### NOTE: INCOMPLETE!!! ##### MPEG4ESVideoRTPSource* MPEG4ESVideoRTPSource::createNew(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, unsigned rtpTimestampFrequency) { return new MPEG4ESVideoRTPSource(env, RTPgs, rtpPayloadFormat, rtpTimestampFrequency); } MPEG4ESVideoRTPSource ::MPEG4ESVideoRTPSource(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, unsigned rtpTimestampFrequency) : MultiFramedRTPSource(env, RTPgs, rtpPayloadFormat, rtpTimestampFrequency) { } MPEG4ESVideoRTPSource::~MPEG4ESVideoRTPSource() { } Boolean MPEG4ESVideoRTPSource ::processSpecialHeader(BufferedPacket* packet, unsigned& resultSpecialHeaderSize) { // The packet begins a frame iff its data begins with a system code // (i.e., 0x000001??) fCurrentPacketBeginsFrame = packet->dataSize() >= 4 && (packet->data())[0] == 0 && (packet->data())[1] == 0 && (packet->data())[2] == 1; // The RTP "M" (marker) bit indicates the last fragment of a frame: fCurrentPacketCompletesFrame = packet->rtpMarkerBit(); // There is no special header resultSpecialHeaderSize = 0; return True; } char const* MPEG4ESVideoRTPSource::MIMEtype() const { return "video/MP4V-ES"; } live/liveMedia/H261VideoRTPSource.cpp000444 001751 000000 00000004367 12265042432 017532 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // H.261 Video RTP Sources // Implementation #include "H261VideoRTPSource.hh" H261VideoRTPSource* H261VideoRTPSource::createNew(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, unsigned rtpTimestampFrequency) { return new H261VideoRTPSource(env, RTPgs, rtpPayloadFormat, rtpTimestampFrequency); } H261VideoRTPSource ::H261VideoRTPSource(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, unsigned rtpTimestampFrequency) : MultiFramedRTPSource(env, RTPgs, rtpPayloadFormat, rtpTimestampFrequency), fLastSpecialHeader(0) { } H261VideoRTPSource::~H261VideoRTPSource() { } Boolean H261VideoRTPSource ::processSpecialHeader(BufferedPacket* packet, unsigned& resultSpecialHeaderSize) { // There's a 4-byte video-specific header if (packet->dataSize() < 4) return False; unsigned char* headerStart = packet->data(); fLastSpecialHeader = (headerStart[0]<<24)|(headerStart[1]<<16)|(headerStart[2]<<8)|headerStart[3]; #ifdef DELIVER_COMPLETE_FRAMES fCurrentPacketBeginsFrame = fCurrentPacketCompletesFrame; // whether the *previous* packet ended a frame // The RTP "M" (marker) bit indicates the last fragment of a frame: fCurrentPacketCompletesFrame = packet->rtpMarkerBit(); #endif resultSpecialHeaderSize = 4; return True; } char const* H261VideoRTPSource::MIMEtype() const { return "video/H261"; } live/liveMedia/MPEG4GenericRTPSource.cpp000444 001751 000000 00000017426 12265042432 020234 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // MPEG4-GENERIC ("audio", "video", or "application") RTP stream sources // Implementation #include "MPEG4GenericRTPSource.hh" #include "BitVector.hh" #include "MPEG4LATMAudioRTPSource.hh" // for parseGeneralConfigStr() ////////// MPEG4GenericBufferedPacket and MPEG4GenericBufferedPacketFactory class MPEG4GenericBufferedPacket: public BufferedPacket { public: MPEG4GenericBufferedPacket(MPEG4GenericRTPSource* ourSource); virtual ~MPEG4GenericBufferedPacket(); private: // redefined virtual functions virtual unsigned nextEnclosedFrameSize(unsigned char*& framePtr, unsigned dataSize); private: MPEG4GenericRTPSource* fOurSource; }; class MPEG4GenericBufferedPacketFactory: public BufferedPacketFactory { private: // redefined virtual functions virtual BufferedPacket* createNewPacket(MultiFramedRTPSource* ourSource); }; ////////// AUHeader ////////// struct AUHeader { unsigned size; unsigned index; // indexDelta for the 2nd & subsequent headers }; ///////// MPEG4GenericRTPSource implementation //////// //##### NOTE: INCOMPLETE!!! Support more modes, and interleaving ##### MPEG4GenericRTPSource* MPEG4GenericRTPSource::createNew(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, unsigned rtpTimestampFrequency, char const* mediumName, char const* mode, unsigned sizeLength, unsigned indexLength, unsigned indexDeltaLength ) { return new MPEG4GenericRTPSource(env, RTPgs, rtpPayloadFormat, rtpTimestampFrequency, mediumName, mode, sizeLength, indexLength, indexDeltaLength ); } MPEG4GenericRTPSource ::MPEG4GenericRTPSource(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, unsigned rtpTimestampFrequency, char const* mediumName, char const* mode, unsigned sizeLength, unsigned indexLength, unsigned indexDeltaLength ) : MultiFramedRTPSource(env, RTPgs, rtpPayloadFormat, rtpTimestampFrequency, new MPEG4GenericBufferedPacketFactory), fSizeLength(sizeLength), fIndexLength(indexLength), fIndexDeltaLength(indexDeltaLength), fNumAUHeaders(0), fNextAUHeader(0), fAUHeaders(NULL) { unsigned mimeTypeLength = strlen(mediumName) + 14 /* strlen("/MPEG4-GENERIC") */ + 1; fMIMEType = new char[mimeTypeLength]; if (fMIMEType != NULL) { sprintf(fMIMEType, "%s/MPEG4-GENERIC", mediumName); } fMode = strDup(mode); // Check for a "mode" that we don't yet support: //##### if (mode == NULL || (strcmp(mode, "aac-hbr") != 0 && strcmp(mode, "generic") != 0)) { envir() << "MPEG4GenericRTPSource Warning: Unknown or unsupported \"mode\": " << mode << "\n"; } } MPEG4GenericRTPSource::~MPEG4GenericRTPSource() { delete[] fAUHeaders; delete[] fMode; delete[] fMIMEType; } Boolean MPEG4GenericRTPSource ::processSpecialHeader(BufferedPacket* packet, unsigned& resultSpecialHeaderSize) { unsigned char* headerStart = packet->data(); unsigned packetSize = packet->dataSize(); fCurrentPacketBeginsFrame = fCurrentPacketCompletesFrame; // whether the *previous* packet ended a frame // The RTP "M" (marker) bit indicates the last fragment of a frame: fCurrentPacketCompletesFrame = packet->rtpMarkerBit(); // default values: resultSpecialHeaderSize = 0; fNumAUHeaders = 0; fNextAUHeader = 0; delete[] fAUHeaders; fAUHeaders = NULL; if (fSizeLength > 0) { // The packet begins with a "AU Header Section". Parse it, to // determine the "AU-header"s for each frame present in this packet: resultSpecialHeaderSize += 2; if (packetSize < resultSpecialHeaderSize) return False; unsigned AU_headers_length = (headerStart[0]<<8)|headerStart[1]; unsigned AU_headers_length_bytes = (AU_headers_length+7)/8; if (packetSize < resultSpecialHeaderSize + AU_headers_length_bytes) return False; resultSpecialHeaderSize += AU_headers_length_bytes; // Figure out how many AU-headers are present in the packet: int bitsAvail = AU_headers_length - (fSizeLength + fIndexLength); if (bitsAvail >= 0 && (fSizeLength + fIndexDeltaLength) > 0) { fNumAUHeaders = 1 + bitsAvail/(fSizeLength + fIndexDeltaLength); } if (fNumAUHeaders > 0) { fAUHeaders = new AUHeader[fNumAUHeaders]; // Fill in each header: BitVector bv(&headerStart[2], 0, AU_headers_length); fAUHeaders[0].size = bv.getBits(fSizeLength); fAUHeaders[0].index = bv.getBits(fIndexLength); for (unsigned i = 1; i < fNumAUHeaders; ++i) { fAUHeaders[i].size = bv.getBits(fSizeLength); fAUHeaders[i].index = bv.getBits(fIndexDeltaLength); } } } return True; } char const* MPEG4GenericRTPSource::MIMEtype() const { return fMIMEType; } ////////// MPEG4GenericBufferedPacket ////////// and MPEG4GenericBufferedPacketFactory implementation MPEG4GenericBufferedPacket ::MPEG4GenericBufferedPacket(MPEG4GenericRTPSource* ourSource) : fOurSource(ourSource) { } MPEG4GenericBufferedPacket::~MPEG4GenericBufferedPacket() { } unsigned MPEG4GenericBufferedPacket ::nextEnclosedFrameSize(unsigned char*& /*framePtr*/, unsigned dataSize) { // WE CURRENTLY DON'T IMPLEMENT INTERLEAVING. FIX THIS! ##### AUHeader* auHeader = fOurSource->fAUHeaders; if (auHeader == NULL) return dataSize; unsigned numAUHeaders = fOurSource->fNumAUHeaders; if (fOurSource->fNextAUHeader >= numAUHeaders) { fOurSource->envir() << "MPEG4GenericBufferedPacket::nextEnclosedFrameSize(" << dataSize << "): data error (" << auHeader << "," << fOurSource->fNextAUHeader << "," << numAUHeaders << ")!\n"; return dataSize; } auHeader = &auHeader[fOurSource->fNextAUHeader++]; return auHeader->size <= dataSize ? auHeader->size : dataSize; } BufferedPacket* MPEG4GenericBufferedPacketFactory ::createNewPacket(MultiFramedRTPSource* ourSource) { return new MPEG4GenericBufferedPacket((MPEG4GenericRTPSource*)ourSource); } ////////// samplingFrequencyFromAudioSpecificConfig() implementation ////////// static unsigned const samplingFrequencyFromIndex[16] = { 96000, 88200, 64000, 48000, 44100, 32000, 24000, 22050, 16000, 12000, 11025, 8000, 7350, 0, 0, 0 }; unsigned samplingFrequencyFromAudioSpecificConfig(char const* configStr) { unsigned char* config = NULL; unsigned result = 0; // if returned, indicates an error do { // Begin by parsing the config string: unsigned configSize; config = parseGeneralConfigStr(configStr, configSize); if (config == NULL) break; if (configSize < 2) break; unsigned char samplingFrequencyIndex = ((config[0]&0x07)<<1) | (config[1]>>7); if (samplingFrequencyIndex < 15) { result = samplingFrequencyFromIndex[samplingFrequencyIndex]; break; } // Index == 15 means that the actual frequency is next (24 bits): if (configSize < 5) break; result = ((config[1]&0x7F)<<17) | (config[2]<<9) | (config[3]<<1) | (config[4]>>7); } while (0); delete[] config; return result; } live/liveMedia/MPEGVideoStreamFramer.cpp000444 001751 000000 00000014502 12265042432 020374 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A filter that breaks up an MPEG video elementary stream into // headers and frames // Implementation #include "MPEGVideoStreamParser.hh" #include ////////// TimeCode implementation ////////// TimeCode::TimeCode() : days(0), hours(0), minutes(0), seconds(0), pictures(0) { } TimeCode::~TimeCode() { } int TimeCode::operator==(TimeCode const& arg2) { return pictures == arg2.pictures && seconds == arg2.seconds && minutes == arg2.minutes && hours == arg2.hours && days == arg2.days; } ////////// MPEGVideoStreamFramer implementation ////////// MPEGVideoStreamFramer::MPEGVideoStreamFramer(UsageEnvironment& env, FramedSource* inputSource) : FramedFilter(env, inputSource), fFrameRate(0.0) /* until we learn otherwise */, fParser(NULL) { reset(); } MPEGVideoStreamFramer::~MPEGVideoStreamFramer() { delete fParser; } void MPEGVideoStreamFramer::flushInput() { reset(); if (fParser != NULL) fParser->flushInput(); } void MPEGVideoStreamFramer::reset() { fPictureCount = 0; fPictureEndMarker = False; fPicturesAdjustment = 0; fPictureTimeBase = 0.0; fTcSecsBase = 0; fHaveSeenFirstTimeCode = False; // Use the current wallclock time as the base 'presentation time': gettimeofday(&fPresentationTimeBase, NULL); } #ifdef DEBUG static struct timeval firstPT; #endif void MPEGVideoStreamFramer ::computePresentationTime(unsigned numAdditionalPictures) { // Computes "fPresentationTime" from the most recent GOP's // time_code, along with the "numAdditionalPictures" parameter: TimeCode& tc = fCurGOPTimeCode; unsigned tcSecs = (((tc.days*24)+tc.hours)*60+tc.minutes)*60+tc.seconds - fTcSecsBase; double pictureTime = fFrameRate == 0.0 ? 0.0 : (tc.pictures + fPicturesAdjustment + numAdditionalPictures)/fFrameRate; while (pictureTime < fPictureTimeBase) { // "if" should be enough, but just in case if (tcSecs > 0) tcSecs -= 1; pictureTime += 1.0; } pictureTime -= fPictureTimeBase; if (pictureTime < 0.0) pictureTime = 0.0; // sanity check unsigned pictureSeconds = (unsigned)pictureTime; double pictureFractionOfSecond = pictureTime - (double)pictureSeconds; fPresentationTime = fPresentationTimeBase; fPresentationTime.tv_sec += tcSecs + pictureSeconds; fPresentationTime.tv_usec += (long)(pictureFractionOfSecond*1000000.0); if (fPresentationTime.tv_usec >= 1000000) { fPresentationTime.tv_usec -= 1000000; ++fPresentationTime.tv_sec; } #ifdef DEBUG if (firstPT.tv_sec == 0 && firstPT.tv_usec == 0) firstPT = fPresentationTime; struct timeval diffPT; diffPT.tv_sec = fPresentationTime.tv_sec - firstPT.tv_sec; diffPT.tv_usec = fPresentationTime.tv_usec - firstPT.tv_usec; if (fPresentationTime.tv_usec < firstPT.tv_usec) { --diffPT.tv_sec; diffPT.tv_usec += 1000000; } fprintf(stderr, "MPEGVideoStreamFramer::computePresentationTime(%d) -> %lu.%06ld [%lu.%06ld]\n", numAdditionalPictures, fPresentationTime.tv_sec, fPresentationTime.tv_usec, diffPT.tv_sec, diffPT.tv_usec); #endif } void MPEGVideoStreamFramer ::setTimeCode(unsigned hours, unsigned minutes, unsigned seconds, unsigned pictures, unsigned picturesSinceLastGOP) { TimeCode& tc = fCurGOPTimeCode; // abbrev unsigned days = tc.days; if (hours < tc.hours) { // Assume that the 'day' has wrapped around: ++days; } tc.days = days; tc.hours = hours; tc.minutes = minutes; tc.seconds = seconds; tc.pictures = pictures; if (!fHaveSeenFirstTimeCode) { fPictureTimeBase = fFrameRate == 0.0 ? 0.0 : tc.pictures/fFrameRate; fTcSecsBase = (((tc.days*24)+tc.hours)*60+tc.minutes)*60+tc.seconds; fHaveSeenFirstTimeCode = True; } else if (fCurGOPTimeCode == fPrevGOPTimeCode) { // The time code has not changed since last time. Adjust for this: fPicturesAdjustment += picturesSinceLastGOP; } else { // Normal case: The time code changed since last time. fPrevGOPTimeCode = tc; fPicturesAdjustment = 0; } } void MPEGVideoStreamFramer::doGetNextFrame() { fParser->registerReadInterest(fTo, fMaxSize); continueReadProcessing(); } void MPEGVideoStreamFramer ::continueReadProcessing(void* clientData, unsigned char* /*ptr*/, unsigned /*size*/, struct timeval /*presentationTime*/) { MPEGVideoStreamFramer* framer = (MPEGVideoStreamFramer*)clientData; framer->continueReadProcessing(); } void MPEGVideoStreamFramer::continueReadProcessing() { unsigned acquiredFrameSize = fParser->parse(); if (acquiredFrameSize > 0) { // We were able to acquire a frame from the input. // It has already been copied to the reader's space. fFrameSize = acquiredFrameSize; fNumTruncatedBytes = fParser->numTruncatedBytes(); // "fPresentationTime" should have already been computed. // Compute "fDurationInMicroseconds" now: fDurationInMicroseconds = (fFrameRate == 0.0 || ((int)fPictureCount) < 0) ? 0 : (unsigned)((fPictureCount*1000000)/fFrameRate); #ifdef DEBUG fprintf(stderr, "%d bytes @%u.%06d, fDurationInMicroseconds: %d ((%d*1000000)/%f)\n", acquiredFrameSize, fPresentationTime.tv_sec, fPresentationTime.tv_usec, fDurationInMicroseconds, fPictureCount, fFrameRate); #endif fPictureCount = 0; // Call our own 'after getting' function. Because we're not a 'leaf' // source, we can call this directly, without risking infinite recursion. afterGetting(this); } else { // We were unable to parse a complete frame from the input, because: // - we had to read more data from the source stream, or // - the source stream has ended. } } live/liveMedia/H264VideoStreamFramer.cpp000444 001751 000000 00000003107 12265042432 020266 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A filter that breaks up a H.264 Video Elementary Stream into NAL units. // Implementation #include "H264VideoStreamFramer.hh" H264VideoStreamFramer* H264VideoStreamFramer ::createNew(UsageEnvironment& env, FramedSource* inputSource, Boolean includeStartCodeInOutput) { return new H264VideoStreamFramer(env, inputSource, True, includeStartCodeInOutput); } H264VideoStreamFramer ::H264VideoStreamFramer(UsageEnvironment& env, FramedSource* inputSource, Boolean createParser, Boolean includeStartCodeInOutput) : H264or5VideoStreamFramer(264, env, inputSource, createParser, includeStartCodeInOutput) { } H264VideoStreamFramer::~H264VideoStreamFramer() { } Boolean H264VideoStreamFramer::isH264VideoStreamFramer() const { return True; } live/liveMedia/MPEG4LATMAudioRTPSink.cpp000444 001751 000000 00000006465 12265042432 020044 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // RTP sink for MPEG-4 audio, using LATM multiplexing (RFC 3016) // Implementation #include "MPEG4LATMAudioRTPSink.hh" MPEG4LATMAudioRTPSink ::MPEG4LATMAudioRTPSink(UsageEnvironment& env, Groupsock* RTPgs, u_int8_t rtpPayloadFormat, u_int32_t rtpTimestampFrequency, char const* streamMuxConfigString, unsigned numChannels, Boolean allowMultipleFramesPerPacket) : AudioRTPSink(env, RTPgs, rtpPayloadFormat, rtpTimestampFrequency, "MP4A-LATM", numChannels), fStreamMuxConfigString(strDup(streamMuxConfigString)), fAllowMultipleFramesPerPacket(allowMultipleFramesPerPacket) { // Set up the "a=fmtp:" SDP line for this stream: char const* fmtpFmt = "a=fmtp:%d " "cpresent=0;config=%s\r\n"; unsigned fmtpFmtSize = strlen(fmtpFmt) + 3 /* max char len */ + strlen(fStreamMuxConfigString); char* fmtp = new char[fmtpFmtSize]; sprintf(fmtp, fmtpFmt, rtpPayloadType(), fStreamMuxConfigString); fFmtpSDPLine = strDup(fmtp); delete[] fmtp; } MPEG4LATMAudioRTPSink::~MPEG4LATMAudioRTPSink() { delete[] fFmtpSDPLine; delete[] (char*)fStreamMuxConfigString; } MPEG4LATMAudioRTPSink* MPEG4LATMAudioRTPSink::createNew(UsageEnvironment& env, Groupsock* RTPgs, u_int8_t rtpPayloadFormat, u_int32_t rtpTimestampFrequency, char const* streamMuxConfigString, unsigned numChannels, Boolean allowMultipleFramesPerPacket) { return new MPEG4LATMAudioRTPSink(env, RTPgs, rtpPayloadFormat, rtpTimestampFrequency, streamMuxConfigString, numChannels, allowMultipleFramesPerPacket); } Boolean MPEG4LATMAudioRTPSink ::frameCanAppearAfterPacketStart(unsigned char const* /*frameStart*/, unsigned /*numBytesInFrame*/) const { return fAllowMultipleFramesPerPacket; } void MPEG4LATMAudioRTPSink ::doSpecialFrameHandling(unsigned fragmentationOffset, unsigned char* frameStart, unsigned numBytesInFrame, struct timeval framePresentationTime, unsigned numRemainingBytes) { if (numRemainingBytes == 0) { // This packet contains the last (or only) fragment of the frame. // Set the RTP 'M' ('marker') bit: setMarkerBit(); } // Important: Also call our base class's doSpecialFrameHandling(), // to set the packet's timestamp: MultiFramedRTPSink::doSpecialFrameHandling(fragmentationOffset, frameStart, numBytesInFrame, framePresentationTime, numRemainingBytes); } char const* MPEG4LATMAudioRTPSink::auxSDPLine() { return fFmtpSDPLine; } live/liveMedia/uLawAudioFilter.cpp000444 001751 000000 00000034500 12265042432 017404 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // Filters for converting between raw PCM audio and uLaw // Implementation #include "uLawAudioFilter.hh" ////////// 16-bit PCM (in various byte orders) -> 8-bit u-Law ////////// uLawFromPCMAudioSource* uLawFromPCMAudioSource ::createNew(UsageEnvironment& env, FramedSource* inputSource, int byteOrdering) { // "byteOrdering" must be 0, 1, or 2: if (byteOrdering < 0 || byteOrdering > 2) { env.setResultMsg("uLawFromPCMAudioSource::createNew(): bad \"byteOrdering\" parameter"); return NULL; } return new uLawFromPCMAudioSource(env, inputSource, byteOrdering); } uLawFromPCMAudioSource ::uLawFromPCMAudioSource(UsageEnvironment& env, FramedSource* inputSource, int byteOrdering) : FramedFilter(env, inputSource), fByteOrdering(byteOrdering), fInputBuffer(NULL), fInputBufferSize(0) { } uLawFromPCMAudioSource::~uLawFromPCMAudioSource() { delete[] fInputBuffer; } void uLawFromPCMAudioSource::doGetNextFrame() { // Figure out how many bytes of input data to ask for, and increase // our input buffer if necessary: unsigned bytesToRead = fMaxSize*2; // because we're converting 16 bits->8 if (bytesToRead > fInputBufferSize) { delete[] fInputBuffer; fInputBuffer = new unsigned char[bytesToRead]; fInputBufferSize = bytesToRead; } // Arrange to read samples into the input buffer: fInputSource->getNextFrame(fInputBuffer, bytesToRead, afterGettingFrame, this, FramedSource::handleClosure, this); } void uLawFromPCMAudioSource ::afterGettingFrame(void* clientData, unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime, unsigned durationInMicroseconds) { uLawFromPCMAudioSource* source = (uLawFromPCMAudioSource*)clientData; source->afterGettingFrame1(frameSize, numTruncatedBytes, presentationTime, durationInMicroseconds); } #define BIAS 0x84 // the add-in bias for 16 bit samples #define CLIP 32635 static unsigned char uLawFrom16BitLinear(u_int16_t sample) { static int const exp_lut[256] = {0,0,1,1,2,2,2,2,3,3,3,3,3,3,3,3, 4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4, 5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5, 5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5, 6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6, 6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6, 6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6, 6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6, 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7}; unsigned char sign = (sample >> 8) & 0x80; if (sign != 0) sample = -sample; // get the magnitude if (sample > CLIP) sample = CLIP; // clip the magnitude sample += BIAS; unsigned char exponent = exp_lut[(sample>>7) & 0xFF]; unsigned char mantissa = (sample >> (exponent+3)) & 0x0F; unsigned char result = ~(sign | (exponent << 4) | mantissa); if (result == 0 ) result = 0x02; // CCITT trap return result; } void uLawFromPCMAudioSource ::afterGettingFrame1(unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime, unsigned durationInMicroseconds) { // Translate raw 16-bit PCM samples (in the input buffer) // into uLaw samples (in the output buffer). unsigned numSamples = frameSize/2; switch (fByteOrdering) { case 0: { // host order u_int16_t* inputSample = (u_int16_t*)fInputBuffer; for (unsigned i = 0; i < numSamples; ++i) { fTo[i] = uLawFrom16BitLinear(inputSample[i]); } break; } case 1: { // little-endian order for (unsigned i = 0; i < numSamples; ++i) { u_int16_t const newValue = (fInputBuffer[2*i+1]<<8)|fInputBuffer[2*i]; fTo[i] = uLawFrom16BitLinear(newValue); } break; } case 2: { // network (i.e., big-endian) order for (unsigned i = 0; i < numSamples; ++i) { u_int16_t const newValue = (fInputBuffer[2*i]<<8)|fInputBuffer[2*i+i]; fTo[i] = uLawFrom16BitLinear(newValue); } break; } } // Complete delivery to the client: fFrameSize = numSamples; fNumTruncatedBytes = numTruncatedBytes; fPresentationTime = presentationTime; fDurationInMicroseconds = durationInMicroseconds; afterGetting(this); } ////////// u-Law -> 16-bit PCM (in host order) ////////// PCMFromuLawAudioSource* PCMFromuLawAudioSource ::createNew(UsageEnvironment& env, FramedSource* inputSource) { return new PCMFromuLawAudioSource(env, inputSource); } PCMFromuLawAudioSource ::PCMFromuLawAudioSource(UsageEnvironment& env, FramedSource* inputSource) : FramedFilter(env, inputSource), fInputBuffer(NULL), fInputBufferSize(0) { } PCMFromuLawAudioSource::~PCMFromuLawAudioSource() { delete[] fInputBuffer; } void PCMFromuLawAudioSource::doGetNextFrame() { // Figure out how many bytes of input data to ask for, and increase // our input buffer if necessary: unsigned bytesToRead = fMaxSize/2; // because we're converting 8 bits->16 if (bytesToRead > fInputBufferSize) { delete[] fInputBuffer; fInputBuffer = new unsigned char[bytesToRead]; fInputBufferSize = bytesToRead; } // Arrange to read samples into the input buffer: fInputSource->getNextFrame(fInputBuffer, bytesToRead, afterGettingFrame, this, FramedSource::handleClosure, this); } void PCMFromuLawAudioSource ::afterGettingFrame(void* clientData, unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime, unsigned durationInMicroseconds) { PCMFromuLawAudioSource* source = (PCMFromuLawAudioSource*)clientData; source->afterGettingFrame1(frameSize, numTruncatedBytes, presentationTime, durationInMicroseconds); } static u_int16_t linear16FromuLaw(unsigned char uLawByte) { static int const exp_lut[8] = {0,132,396,924,1980,4092,8316,16764}; uLawByte = ~uLawByte; Boolean sign = (uLawByte & 0x80) != 0; unsigned char exponent = (uLawByte>>4) & 0x07; unsigned char mantissa = uLawByte & 0x0F; u_int16_t result = exp_lut[exponent] + (mantissa << (exponent+3)); if (sign) result = -result; return result; } void PCMFromuLawAudioSource ::afterGettingFrame1(unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime, unsigned durationInMicroseconds) { // Translate uLaw samples (in the input buffer) // into 16-bit PCM samples (in the output buffer), in host order. unsigned numSamples = frameSize; u_int16_t* outputSample = (u_int16_t*)fTo; for (unsigned i = 0; i < numSamples; ++i) { outputSample[i] = linear16FromuLaw(fInputBuffer[i]); } // Complete delivery to the client: fFrameSize = numSamples*2; fNumTruncatedBytes = numTruncatedBytes; fPresentationTime = presentationTime; fDurationInMicroseconds = durationInMicroseconds; afterGetting(this); } ////////// 16-bit values (in host order) -> 16-bit network order ////////// NetworkFromHostOrder16* NetworkFromHostOrder16 ::createNew(UsageEnvironment& env, FramedSource* inputSource) { return new NetworkFromHostOrder16(env, inputSource); } NetworkFromHostOrder16 ::NetworkFromHostOrder16(UsageEnvironment& env, FramedSource* inputSource) : FramedFilter(env, inputSource) { } NetworkFromHostOrder16::~NetworkFromHostOrder16() { } void NetworkFromHostOrder16::doGetNextFrame() { // Arrange to read data directly into the client's buffer: fInputSource->getNextFrame(fTo, fMaxSize, afterGettingFrame, this, FramedSource::handleClosure, this); } void NetworkFromHostOrder16 ::afterGettingFrame(void* clientData, unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime, unsigned durationInMicroseconds) { NetworkFromHostOrder16* source = (NetworkFromHostOrder16*)clientData; source->afterGettingFrame1(frameSize, numTruncatedBytes, presentationTime, durationInMicroseconds); } void NetworkFromHostOrder16 ::afterGettingFrame1(unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime, unsigned durationInMicroseconds) { // Translate the 16-bit values that we have just read from host // to network order (in-place) unsigned numValues = frameSize/2; u_int16_t* value = (u_int16_t*)fTo; for (unsigned i = 0; i < numValues; ++i) { value[i] = htons(value[i]); } // Complete delivery to the client: fFrameSize = numValues*2; fNumTruncatedBytes = numTruncatedBytes; fPresentationTime = presentationTime; fDurationInMicroseconds = durationInMicroseconds; afterGetting(this); } ////////// 16-bit values (in network order) -> 16-bit host order ////////// HostFromNetworkOrder16* HostFromNetworkOrder16 ::createNew(UsageEnvironment& env, FramedSource* inputSource) { return new HostFromNetworkOrder16(env, inputSource); } HostFromNetworkOrder16 ::HostFromNetworkOrder16(UsageEnvironment& env, FramedSource* inputSource) : FramedFilter(env, inputSource) { } HostFromNetworkOrder16::~HostFromNetworkOrder16() { } void HostFromNetworkOrder16::doGetNextFrame() { // Arrange to read data directly into the client's buffer: fInputSource->getNextFrame(fTo, fMaxSize, afterGettingFrame, this, FramedSource::handleClosure, this); } void HostFromNetworkOrder16 ::afterGettingFrame(void* clientData, unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime, unsigned durationInMicroseconds) { HostFromNetworkOrder16* source = (HostFromNetworkOrder16*)clientData; source->afterGettingFrame1(frameSize, numTruncatedBytes, presentationTime, durationInMicroseconds); } void HostFromNetworkOrder16 ::afterGettingFrame1(unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime, unsigned durationInMicroseconds) { // Translate the 16-bit values that we have just read from network // to host order (in-place): unsigned numValues = frameSize/2; u_int16_t* value = (u_int16_t*)fTo; for (unsigned i = 0; i < numValues; ++i) { value[i] = ntohs(value[i]); } // Complete delivery to the client: fFrameSize = numValues*2; fNumTruncatedBytes = numTruncatedBytes; fPresentationTime = presentationTime; fDurationInMicroseconds = durationInMicroseconds; afterGetting(this); } ////////// 16-bit values: little-endian <-> big-endian ////////// EndianSwap16* EndianSwap16::createNew(UsageEnvironment& env, FramedSource* inputSource) { return new EndianSwap16(env, inputSource); } EndianSwap16::EndianSwap16(UsageEnvironment& env, FramedSource* inputSource) : FramedFilter(env, inputSource) { } EndianSwap16::~EndianSwap16() { } void EndianSwap16::doGetNextFrame() { // Arrange to read data directly into the client's buffer: fInputSource->getNextFrame(fTo, fMaxSize, afterGettingFrame, this, FramedSource::handleClosure, this); } void EndianSwap16::afterGettingFrame(void* clientData, unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime, unsigned durationInMicroseconds) { EndianSwap16* source = (EndianSwap16*)clientData; source->afterGettingFrame1(frameSize, numTruncatedBytes, presentationTime, durationInMicroseconds); } void EndianSwap16::afterGettingFrame1(unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime, unsigned durationInMicroseconds) { // Swap the byte order of the 16-bit values that we have just read (in place): unsigned numValues = frameSize/2; u_int16_t* value = (u_int16_t*)fTo; for (unsigned i = 0; i < numValues; ++i) { u_int16_t const orig = value[i]; value[i] = ((orig&0xFF)<<8) | ((orig&0xFF00)>>8); } // Complete delivery to the client: fFrameSize = numValues*2; fNumTruncatedBytes = numTruncatedBytes + (frameSize - fFrameSize); fPresentationTime = presentationTime; fDurationInMicroseconds = durationInMicroseconds; afterGetting(this); } ////////// 24-bit values: little-endian <-> big-endian ////////// EndianSwap24* EndianSwap24::createNew(UsageEnvironment& env, FramedSource* inputSource) { return new EndianSwap24(env, inputSource); } EndianSwap24::EndianSwap24(UsageEnvironment& env, FramedSource* inputSource) : FramedFilter(env, inputSource) { } EndianSwap24::~EndianSwap24() { } void EndianSwap24::doGetNextFrame() { // Arrange to read data directly into the client's buffer: fInputSource->getNextFrame(fTo, fMaxSize, afterGettingFrame, this, FramedSource::handleClosure, this); } void EndianSwap24::afterGettingFrame(void* clientData, unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime, unsigned durationInMicroseconds) { EndianSwap24* source = (EndianSwap24*)clientData; source->afterGettingFrame1(frameSize, numTruncatedBytes, presentationTime, durationInMicroseconds); } void EndianSwap24::afterGettingFrame1(unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime, unsigned durationInMicroseconds) { // Swap the byte order of the 24-bit values that we have just read (in place): unsigned const numValues = frameSize/3; u_int8_t* p = fTo; for (unsigned i = 0; i < numValues; ++i) { u_int8_t tmp = p[0]; p[0] = p[2]; p[2] = tmp; p += 3; } // Complete delivery to the client: fFrameSize = numValues*3; fNumTruncatedBytes = numTruncatedBytes + (frameSize - fFrameSize); fPresentationTime = presentationTime; fDurationInMicroseconds = durationInMicroseconds; afterGetting(this); } live/liveMedia/JPEGVideoSource.cpp000444 001751 000000 00000002565 12265042432 017247 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // JPEG video sources // Implementation #include "JPEGVideoSource.hh" JPEGVideoSource::JPEGVideoSource(UsageEnvironment& env) : FramedSource(env) { } JPEGVideoSource::~JPEGVideoSource() { } u_int8_t const* JPEGVideoSource::quantizationTables(u_int8_t& precision, u_int16_t& length) { // Default implementation precision = 0; length = 0; return NULL; } u_int16_t JPEGVideoSource::restartInterval() { // Default implementation return 0; } Boolean JPEGVideoSource::isJPEGVideoSource() const { return True; } live/liveMedia/VideoRTPSink.cpp000444 001751 000000 00000002524 12265042432 016626 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A generic RTP sink for video codecs (abstract base class) // Implementation #include "VideoRTPSink.hh" VideoRTPSink::VideoRTPSink(UsageEnvironment& env, Groupsock* rtpgs, unsigned char rtpPayloadType, unsigned rtpTimestampFrequency, char const* rtpPayloadFormatName) : MultiFramedRTPSink(env, rtpgs, rtpPayloadType, rtpTimestampFrequency, rtpPayloadFormatName) { } VideoRTPSink::~VideoRTPSink() { } char const* VideoRTPSink::sdpMediaType() const { return "video"; } live/liveMedia/AudioRTPSink.cpp000444 001751 000000 00000002575 12265042432 016627 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A generic RTP sink for audio codecs (abstract base class) // Implementation #include "AudioRTPSink.hh" AudioRTPSink::AudioRTPSink(UsageEnvironment& env, Groupsock* rtpgs, unsigned char rtpPayloadType, unsigned rtpTimestampFrequency, char const* rtpPayloadFormatName, unsigned numChannels) : MultiFramedRTPSink(env, rtpgs, rtpPayloadType, rtpTimestampFrequency, rtpPayloadFormatName, numChannels) { } AudioRTPSink::~AudioRTPSink() { } char const* AudioRTPSink::sdpMediaType() const { return "audio"; } live/liveMedia/DVVideoRTPSink.cpp000444 001751 000000 00000006755 12265042432 017072 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // RTP sink for DV video (RFC 3189) // (Thanks to Ben Hutchings for prototyping this.) // Implementation #include "DVVideoRTPSink.hh" ////////// DVVideoRTPSink implementation ////////// DVVideoRTPSink ::DVVideoRTPSink(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat) : VideoRTPSink(env, RTPgs, rtpPayloadFormat, 90000, "DV"), fFmtpSDPLine(NULL) { } DVVideoRTPSink::~DVVideoRTPSink() { delete[] fFmtpSDPLine; } DVVideoRTPSink* DVVideoRTPSink::createNew(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat) { return new DVVideoRTPSink(env, RTPgs, rtpPayloadFormat); } Boolean DVVideoRTPSink::sourceIsCompatibleWithUs(MediaSource& source) { // Our source must be an appropriate framer: return source.isDVVideoStreamFramer(); } void DVVideoRTPSink::doSpecialFrameHandling(unsigned fragmentationOffset, unsigned char* /*frameStart*/, unsigned /*numBytesInFrame*/, struct timeval framePresentationTime, unsigned numRemainingBytes) { if (numRemainingBytes == 0) { // This packet contains the last (or only) fragment of the frame. // Set the RTP 'M' ('marker') bit: setMarkerBit(); } // Also set the RTP timestamp: setTimestamp(framePresentationTime); } unsigned DVVideoRTPSink::computeOverflowForNewFrame(unsigned newFrameSize) const { unsigned initialOverflow = MultiFramedRTPSink::computeOverflowForNewFrame(newFrameSize); // Adjust (increase) this overflow, if necessary, so that the amount of frame data that we use is an integral number // of DIF blocks: unsigned numFrameBytesUsed = newFrameSize - initialOverflow; initialOverflow += numFrameBytesUsed%DV_DIF_BLOCK_SIZE; return initialOverflow; } char const* DVVideoRTPSink::auxSDPLine() { // Generate a new "a=fmtp:" line each time, using parameters from // our framer source (in case they've changed since the last time that // we were called): DVVideoStreamFramer* framerSource = (DVVideoStreamFramer*)fSource; if (framerSource == NULL) return NULL; // we don't yet have a source return auxSDPLineFromFramer(framerSource); } char const* DVVideoRTPSink::auxSDPLineFromFramer(DVVideoStreamFramer* framerSource) { char const* const profileName = framerSource->profileName(); if (profileName == NULL) return NULL; char const* const fmtpSDPFmt = "a=fmtp:%d encode=%s;audio=bundled\r\n"; unsigned fmtpSDPFmtSize = strlen(fmtpSDPFmt) + 3 // max payload format code length + strlen(profileName); delete[] fFmtpSDPLine; // if it already exists fFmtpSDPLine = new char[fmtpSDPFmtSize]; sprintf(fFmtpSDPLine, fmtpSDPFmt, rtpPayloadType(), profileName); return fFmtpSDPLine; } live/liveMedia/InputFile.cpp000444 001751 000000 00000005766 12265042432 016257 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // Common routines for opening/closing named input files // Implementation #include "InputFile.hh" #include FILE* OpenInputFile(UsageEnvironment& env, char const* fileName) { FILE* fid; // Check for a special case file name: "stdin" if (strcmp(fileName, "stdin") == 0) { fid = stdin; #if (defined(__WIN32__) || defined(_WIN32)) && !defined(_WIN32_WCE) _setmode(_fileno(stdin), _O_BINARY); // convert to binary mode #endif } else { fid = fopen(fileName, "rb"); if (fid == NULL) { env.setResultMsg("unable to open file \"",fileName, "\""); } } return fid; } void CloseInputFile(FILE* fid) { // Don't close 'stdin', in case we want to use it again later. if (fid != NULL && fid != stdin) fclose(fid); } u_int64_t GetFileSize(char const* fileName, FILE* fid) { u_int64_t fileSize = 0; // by default if (fid != stdin) { #if !defined(_WIN32_WCE) if (fileName == NULL) { #endif if (fid != NULL && SeekFile64(fid, 0, SEEK_END) >= 0) { fileSize = (u_int64_t)TellFile64(fid); if (fileSize == (u_int64_t)-1) fileSize = 0; // TellFile64() failed SeekFile64(fid, 0, SEEK_SET); } #if !defined(_WIN32_WCE) } else { struct stat sb; if (stat(fileName, &sb) == 0) { fileSize = sb.st_size; } } #endif } return fileSize; } int64_t SeekFile64(FILE *fid, int64_t offset, int whence) { if (fid == NULL) return -1; clearerr(fid); fflush(fid); #if (defined(__WIN32__) || defined(_WIN32)) && !defined(_WIN32_WCE) return _lseeki64(_fileno(fid), offset, whence) == (int64_t)-1 ? -1 : 0; #else #if defined(_WIN32_WCE) return fseek(fid, (long)(offset), whence); #else return fseeko(fid, (off_t)(offset), whence); #endif #endif } int64_t TellFile64(FILE *fid) { if (fid == NULL) return -1; clearerr(fid); fflush(fid); #if (defined(__WIN32__) || defined(_WIN32)) && !defined(_WIN32_WCE) return _telli64(_fileno(fid)); #else #if defined(_WIN32_WCE) return ftell(fid); #else return ftello(fid); #endif #endif } Boolean FileIsSeekable(FILE *fid) { if (SeekFile64(fid, 1, SEEK_CUR) < 0) { return False; } SeekFile64(fid, -1, SEEK_CUR); // seek back to where we were return True; } live/liveMedia/MPEG4VideoFileServerMediaSubsession.cpp000444 001751 000000 00000011107 12265042432 023152 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A 'ServerMediaSubsession' object that creates new, unicast, "RTPSink"s // on demand, from a MPEG-4 video file. // Implementation #include "MPEG4VideoFileServerMediaSubsession.hh" #include "MPEG4ESVideoRTPSink.hh" #include "ByteStreamFileSource.hh" #include "MPEG4VideoStreamFramer.hh" MPEG4VideoFileServerMediaSubsession* MPEG4VideoFileServerMediaSubsession::createNew(UsageEnvironment& env, char const* fileName, Boolean reuseFirstSource) { return new MPEG4VideoFileServerMediaSubsession(env, fileName, reuseFirstSource); } MPEG4VideoFileServerMediaSubsession ::MPEG4VideoFileServerMediaSubsession(UsageEnvironment& env, char const* fileName, Boolean reuseFirstSource) : FileServerMediaSubsession(env, fileName, reuseFirstSource), fAuxSDPLine(NULL), fDoneFlag(0), fDummyRTPSink(NULL) { } MPEG4VideoFileServerMediaSubsession::~MPEG4VideoFileServerMediaSubsession() { delete[] fAuxSDPLine; } static void afterPlayingDummy(void* clientData) { MPEG4VideoFileServerMediaSubsession* subsess = (MPEG4VideoFileServerMediaSubsession*)clientData; subsess->afterPlayingDummy1(); } void MPEG4VideoFileServerMediaSubsession::afterPlayingDummy1() { // Unschedule any pending 'checking' task: envir().taskScheduler().unscheduleDelayedTask(nextTask()); // Signal the event loop that we're done: setDoneFlag(); } static void checkForAuxSDPLine(void* clientData) { MPEG4VideoFileServerMediaSubsession* subsess = (MPEG4VideoFileServerMediaSubsession*)clientData; subsess->checkForAuxSDPLine1(); } void MPEG4VideoFileServerMediaSubsession::checkForAuxSDPLine1() { char const* dasl; if (fAuxSDPLine != NULL) { // Signal the event loop that we're done: setDoneFlag(); } else if (fDummyRTPSink != NULL && (dasl = fDummyRTPSink->auxSDPLine()) != NULL) { fAuxSDPLine= strDup(dasl); fDummyRTPSink = NULL; // Signal the event loop that we're done: setDoneFlag(); } else { // try again after a brief delay: int uSecsToDelay = 100000; // 100 ms nextTask() = envir().taskScheduler().scheduleDelayedTask(uSecsToDelay, (TaskFunc*)checkForAuxSDPLine, this); } } char const* MPEG4VideoFileServerMediaSubsession::getAuxSDPLine(RTPSink* rtpSink, FramedSource* inputSource) { if (fAuxSDPLine != NULL) return fAuxSDPLine; // it's already been set up (for a previous client) if (fDummyRTPSink == NULL) { // we're not already setting it up for another, concurrent stream // Note: For MPEG-4 video files, the 'config' information isn't known // until we start reading the file. This means that "rtpSink"s // "auxSDPLine()" will be NULL initially, and we need to start reading data from our file until this changes. fDummyRTPSink = rtpSink; // Start reading the file: fDummyRTPSink->startPlaying(*inputSource, afterPlayingDummy, this); // Check whether the sink's 'auxSDPLine()' is ready: checkForAuxSDPLine(this); } envir().taskScheduler().doEventLoop(&fDoneFlag); return fAuxSDPLine; } FramedSource* MPEG4VideoFileServerMediaSubsession ::createNewStreamSource(unsigned /*clientSessionId*/, unsigned& estBitrate) { estBitrate = 500; // kbps, estimate // Create the video source: ByteStreamFileSource* fileSource = ByteStreamFileSource::createNew(envir(), fFileName); if (fileSource == NULL) return NULL; fFileSize = fileSource->fileSize(); // Create a framer for the Video Elementary Stream: return MPEG4VideoStreamFramer::createNew(envir(), fileSource); } RTPSink* MPEG4VideoFileServerMediaSubsession ::createNewRTPSink(Groupsock* rtpGroupsock, unsigned char rtpPayloadTypeIfDynamic, FramedSource* /*inputSource*/) { return MPEG4ESVideoRTPSink::createNew(envir(), rtpGroupsock, rtpPayloadTypeIfDynamic); } live/liveMedia/AMRAudioFileServerMediaSubsession.cpp000444 001751 000000 00000004345 12265042432 022756 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A 'ServerMediaSubsession' object that creates new, unicast, "RTPSink"s // on demand, from an AMR audio file. // Implementation #include "AMRAudioFileServerMediaSubsession.hh" #include "AMRAudioRTPSink.hh" #include "AMRAudioFileSource.hh" AMRAudioFileServerMediaSubsession* AMRAudioFileServerMediaSubsession::createNew(UsageEnvironment& env, char const* fileName, Boolean reuseFirstSource) { return new AMRAudioFileServerMediaSubsession(env, fileName, reuseFirstSource); } AMRAudioFileServerMediaSubsession ::AMRAudioFileServerMediaSubsession(UsageEnvironment& env, char const* fileName, Boolean reuseFirstSource) : FileServerMediaSubsession(env, fileName, reuseFirstSource) { } AMRAudioFileServerMediaSubsession ::~AMRAudioFileServerMediaSubsession() { } FramedSource* AMRAudioFileServerMediaSubsession ::createNewStreamSource(unsigned /*clientSessionId*/, unsigned& estBitrate) { estBitrate = 10; // kbps, estimate return AMRAudioFileSource::createNew(envir(), fFileName); } RTPSink* AMRAudioFileServerMediaSubsession ::createNewRTPSink(Groupsock* rtpGroupsock, unsigned char rtpPayloadTypeIfDynamic, FramedSource* inputSource) { AMRAudioFileSource* amrSource = (AMRAudioFileSource*)inputSource; return AMRAudioRTPSink::createNew(envir(), rtpGroupsock, rtpPayloadTypeIfDynamic, amrSource->isWideband(), amrSource->numChannels()); } live/liveMedia/H263plusVideoStreamParser.hh000444 001751 000000 00000011071 12265042432 021025 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A filter that breaks up an H263 video stream into frames. // derived from MPEG4IP h263.c // Author Benhard Feiten #ifndef _H263PLUS_VIDEO_STREAM_PARSER_HH #define _H263PLUS_VIDEO_STREAM_PARSER_HH #ifndef _STREAM_PARSER_HH #include "StreamParser.hh" #endif // Default timescale for H.263 (1000ms) #define H263_TIMESCALE 1000 // Default H263 frame rate (30fps) #define H263_BASIC_FRAME_RATE 30 // Minimum number of bytes needed to parse an H263 header #define H263_REQUIRE_HEADER_SIZE_BYTES 5 // Number of bytes the start code requries #define H263_STARTCODE_SIZE_BYTES 3 // This is the input buffer's size. It should contain // 1 frame with the following start code #define H263_BUFFER_SIZE 256 * 1024 // additionalBytesNeeded - indicates how many additional bytes are to be read // from the next frame's header (over the 3 bytes that are already read). #define ADDITIONAL_BYTES_NEEDED H263_REQUIRE_HEADER_SIZE_BYTES - H263_STARTCODE_SIZE_BYTES // The default max different (in %) betwqeen max and average bitrates #define H263_DEFAULT_CBR_TOLERANCE 10 // The following structure holds information extracted from each frame's header: typedef struct _H263INFO { u_int8_t tr; // Temporal Reference, used in duration calculation u_int16_t width; // Width of the picture u_int16_t height; // Height of the picture bool isSyncFrame; // Frame type (true = I frame = "sync" frame) } H263INFO; typedef struct _MaxBitrate_CTX { u_int32_t bitrateTable[H263_BASIC_FRAME_RATE];// Window of 1 second u_int32_t windowBitrate; // The bitrate of the current window u_int32_t maxBitrate; // The up-to-date maximum bitrate u_int32_t tableIndex; // The next TR unit to update } MaxBitrate_CTX; class H263plusVideoStreamParser : public StreamParser { public: H263plusVideoStreamParser( class H263plusVideoStreamFramer* usingSource, FramedSource* inputSource); virtual ~H263plusVideoStreamParser(); void registerReadInterest(unsigned char* to, unsigned maxSize); unsigned parse(u_int64_t & currentDuration); // returns the size of the frame that was acquired, or 0 if none unsigned numTruncatedBytes() const { return fNumTruncatedBytes; } // The number of truncated bytes (if any) protected: // H263plusVideoStreamFramer* usingSource() { // return (H263plusVideoStreamFramer*)fUsingSource; // } void setParseState(); // void setParseState(H263plusParseState parseState); private: int parseH263Frame( ); bool ParseShortHeader(u_int8_t *headerBuffer, H263INFO *outputInfoStruct); void GetMaxBitrate( MaxBitrate_CTX *ctx, u_int32_t frameSize, u_int8_t frameTRDiff); u_int64_t CalculateDuration(u_int8_t trDiff); bool GetWidthAndHeight( u_int8_t fmt, u_int16_t *width, u_int16_t *height); u_int8_t GetTRDifference( u_int8_t nextTR, u_int8_t currentTR); virtual void restoreSavedParserState(); protected: class H263plusVideoStreamFramer* fUsingSource; unsigned char* fTo; unsigned fMaxSize; unsigned char* fStartOfFrame; unsigned char* fSavedTo; unsigned char* fLimit; unsigned fNumTruncatedBytes; unsigned fSavedNumTruncatedBytes; private: H263INFO fNextInfo; // Holds information about the next frame H263INFO fCurrentInfo; // Holds information about the current frame MaxBitrate_CTX fMaxBitrateCtx; // Context for the GetMaxBitrate function char fStates[3][256]; u_int8_t fNextHeader[H263_REQUIRE_HEADER_SIZE_BYTES]; u_int32_t fnextTR; // The next frame's presentation time in TR units u_int64_t fcurrentPT; // The current frame's presentation time in milli-seconds }; #endif live/liveMedia/AMRAudioSource.cpp000444 001751 000000 00000002473 12265042432 017132 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A source object for AMR audio sources // Implementation #include "AMRAudioSource.hh" AMRAudioSource::AMRAudioSource(UsageEnvironment& env, Boolean isWideband, unsigned numChannels) : FramedSource(env), fIsWideband(isWideband), fNumChannels(numChannels), fLastFrameHeader(0) { } AMRAudioSource::~AMRAudioSource() { } char const* AMRAudioSource::MIMEtype() const { return "audio/AMR"; } Boolean AMRAudioSource::isAMRAudioSource() const { return True; } live/liveMedia/AMRAudioRTPSource.cpp000444 001751 000000 00000062622 12265042432 017522 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // AMR Audio RTP Sources (RFC 4867) // Implementation #include "AMRAudioRTPSource.hh" #include "MultiFramedRTPSource.hh" #include "BitVector.hh" #include #include // This source is implemented internally by two separate sources: // (i) a RTP source for the raw (and possibly interleaved) AMR frames, and // (ii) a deinterleaving filter that reads from this. // Define these two new classes here: class RawAMRRTPSource: public MultiFramedRTPSource { public: static RawAMRRTPSource* createNew(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, Boolean isWideband, Boolean isOctetAligned, Boolean isInterleaved, Boolean CRCsArePresent); Boolean isWideband() const { return fIsWideband; } unsigned char ILL() const { return fILL; } unsigned char ILP() const { return fILP; } unsigned TOCSize() const { return fTOCSize; } // total # of frames in the last pkt unsigned char* TOC() const { return fTOC; } // FT+Q value for each TOC entry unsigned& frameIndex() { return fFrameIndex; } // index of frame-block within pkt Boolean& isSynchronized() { return fIsSynchronized; } private: RawAMRRTPSource(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, Boolean isWideband, Boolean isOctetAligned, Boolean isInterleaved, Boolean CRCsArePresent); // called only by createNew() virtual ~RawAMRRTPSource(); private: // redefined virtual functions: virtual Boolean hasBeenSynchronizedUsingRTCP(); virtual Boolean processSpecialHeader(BufferedPacket* packet, unsigned& resultSpecialHeaderSize); virtual char const* MIMEtype() const; private: Boolean fIsWideband, fIsOctetAligned, fIsInterleaved, fCRCsArePresent; unsigned char fILL, fILP; unsigned fTOCSize; unsigned char* fTOC; unsigned fFrameIndex; Boolean fIsSynchronized; }; class AMRDeinterleaver: public AMRAudioSource { public: static AMRDeinterleaver* createNew(UsageEnvironment& env, Boolean isWideband, unsigned numChannels, unsigned maxInterleaveGroupSize, RawAMRRTPSource* inputSource); private: AMRDeinterleaver(UsageEnvironment& env, Boolean isWideband, unsigned numChannels, unsigned maxInterleaveGroupSize, RawAMRRTPSource* inputSource); // called only by "createNew()" virtual ~AMRDeinterleaver(); static void afterGettingFrame(void* clientData, unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime, unsigned durationInMicroseconds); void afterGettingFrame1(unsigned frameSize, struct timeval presentationTime); private: // Redefined virtual functions: void doGetNextFrame(); virtual void doStopGettingFrames(); private: RawAMRRTPSource* fInputSource; class AMRDeinterleavingBuffer* fDeinterleavingBuffer; Boolean fNeedAFrame; }; ////////// AMRAudioRTPSource implementation ////////// #define MAX_NUM_CHANNELS 20 // far larger than ever expected... #define MAX_INTERLEAVING_GROUP_SIZE 1000 // far larger than ever expected... AMRAudioSource* AMRAudioRTPSource::createNew(UsageEnvironment& env, Groupsock* RTPgs, RTPSource*& resultRTPSource, unsigned char rtpPayloadFormat, Boolean isWideband, unsigned numChannels, Boolean isOctetAligned, unsigned interleaving, Boolean robustSortingOrder, Boolean CRCsArePresent) { // Perform sanity checks on the input parameters: if (robustSortingOrder) { env << "AMRAudioRTPSource::createNew(): 'Robust sorting order' was specified, but we don't yet support this!\n"; return NULL; } else if (numChannels > MAX_NUM_CHANNELS) { env << "AMRAudioRTPSource::createNew(): The \"number of channels\" parameter (" << numChannels << ") is much too large!\n"; return NULL; } else if (interleaving > MAX_INTERLEAVING_GROUP_SIZE) { env << "AMRAudioRTPSource::createNew(): The \"interleaving\" parameter (" << interleaving << ") is much too large!\n"; return NULL; } // 'Bandwidth-efficient mode' precludes some other options: if (!isOctetAligned) { if (interleaving > 0 || robustSortingOrder || CRCsArePresent) { env << "AMRAudioRTPSource::createNew(): 'Bandwidth-efficient mode' was specified, along with interleaving, 'robust sorting order', and/or CRCs, so we assume 'octet-aligned mode' instead.\n"; isOctetAligned = True; } } Boolean isInterleaved; unsigned maxInterleaveGroupSize; // in frames (not frame-blocks) if (interleaving > 0) { isInterleaved = True; maxInterleaveGroupSize = interleaving*numChannels; } else { isInterleaved = False; maxInterleaveGroupSize = numChannels; } RawAMRRTPSource* rawRTPSource; resultRTPSource = rawRTPSource = RawAMRRTPSource::createNew(env, RTPgs, rtpPayloadFormat, isWideband, isOctetAligned, isInterleaved, CRCsArePresent); if (resultRTPSource == NULL) return NULL; AMRDeinterleaver* deinterleaver = AMRDeinterleaver::createNew(env, isWideband, numChannels, maxInterleaveGroupSize, rawRTPSource); if (deinterleaver == NULL) { Medium::close(resultRTPSource); resultRTPSource = NULL; } return deinterleaver; } ////////// AMRBufferedPacket and AMRBufferedPacketFactory ////////// // A subclass of BufferedPacket, used to separate out AMR frames. class AMRBufferedPacket: public BufferedPacket { public: AMRBufferedPacket(RawAMRRTPSource& ourSource); virtual ~AMRBufferedPacket(); private: // redefined virtual functions virtual unsigned nextEnclosedFrameSize(unsigned char*& framePtr, unsigned dataSize); private: RawAMRRTPSource& fOurSource; }; class AMRBufferedPacketFactory: public BufferedPacketFactory { private: // redefined virtual functions virtual BufferedPacket* createNewPacket(MultiFramedRTPSource* ourSource); }; ///////// RawAMRRTPSource implementation //////// RawAMRRTPSource* RawAMRRTPSource::createNew(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, Boolean isWideband, Boolean isOctetAligned, Boolean isInterleaved, Boolean CRCsArePresent) { return new RawAMRRTPSource(env, RTPgs, rtpPayloadFormat, isWideband, isOctetAligned, isInterleaved, CRCsArePresent); } RawAMRRTPSource ::RawAMRRTPSource(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, Boolean isWideband, Boolean isOctetAligned, Boolean isInterleaved, Boolean CRCsArePresent) : MultiFramedRTPSource(env, RTPgs, rtpPayloadFormat, isWideband ? 16000 : 8000, new AMRBufferedPacketFactory), fIsWideband(isWideband), fIsOctetAligned(isOctetAligned), fIsInterleaved(isInterleaved), fCRCsArePresent(CRCsArePresent), fILL(0), fILP(0), fTOCSize(0), fTOC(NULL), fFrameIndex(0), fIsSynchronized(False) { } RawAMRRTPSource::~RawAMRRTPSource() { delete[] fTOC; } #define FT_SPEECH_LOST 14 #define FT_NO_DATA 15 static void unpackBandwidthEfficientData(BufferedPacket* packet, Boolean isWideband); // forward Boolean RawAMRRTPSource ::processSpecialHeader(BufferedPacket* packet, unsigned& resultSpecialHeaderSize) { // If the data is 'bandwidth-efficient', first unpack it so that it's // 'octet-aligned': if (!fIsOctetAligned) unpackBandwidthEfficientData(packet, fIsWideband); unsigned char* headerStart = packet->data(); unsigned packetSize = packet->dataSize(); // There's at least a 1-byte header, containing the CMR: if (packetSize < 1) return False; resultSpecialHeaderSize = 1; if (fIsInterleaved) { // There's an extra byte, containing the interleave parameters: if (packetSize < 2) return False; // Get the interleaving parameters, and check them for validity: unsigned char const secondByte = headerStart[1]; fILL = (secondByte&0xF0)>>4; fILP = secondByte&0x0F; if (fILP > fILL) return False; // invalid ++resultSpecialHeaderSize; } #ifdef DEBUG fprintf(stderr, "packetSize: %d, ILL: %d, ILP: %d\n", packetSize, fILL, fILP); #endif fFrameIndex = 0; // initially // Next, there's a "Payload Table of Contents" (one byte per entry): unsigned numFramesPresent = 0, numNonEmptyFramesPresent = 0; unsigned tocStartIndex = resultSpecialHeaderSize; Boolean F; do { if (resultSpecialHeaderSize >= packetSize) return False; unsigned char const tocByte = headerStart[resultSpecialHeaderSize++]; F = (tocByte&0x80) != 0; unsigned char const FT = (tocByte&0x78) >> 3; #ifdef DEBUG unsigned char Q = (tocByte&0x04)>>2; fprintf(stderr, "\tTOC entry: F %d, FT %d, Q %d\n", F, FT, Q); #endif ++numFramesPresent; if (FT != FT_SPEECH_LOST && FT != FT_NO_DATA) ++numNonEmptyFramesPresent; } while (F); #ifdef DEBUG fprintf(stderr, "TOC contains %d entries (%d non-empty)\n", numFramesPresent, numNonEmptyFramesPresent); #endif // Now that we know the size of the TOC, fill in our copy: if (numFramesPresent > fTOCSize) { delete[] fTOC; fTOC = new unsigned char[numFramesPresent]; } fTOCSize = numFramesPresent; for (unsigned i = 0; i < fTOCSize; ++i) { unsigned char const tocByte = headerStart[tocStartIndex + i]; fTOC[i] = tocByte&0x7C; // clear everything except the F and Q fields } if (fCRCsArePresent) { // 'numNonEmptyFramesPresent' CRC bytes will follow. // Note: we currently don't check the CRCs for validity ##### resultSpecialHeaderSize += numNonEmptyFramesPresent; #ifdef DEBUG fprintf(stderr, "Ignoring %d following CRC bytes\n", numNonEmptyFramesPresent); #endif if (resultSpecialHeaderSize > packetSize) return False; } #ifdef DEBUG fprintf(stderr, "Total special header size: %d\n", resultSpecialHeaderSize); #endif return True; } char const* RawAMRRTPSource::MIMEtype() const { return fIsWideband ? "audio/AMR-WB" : "audio/AMR"; } Boolean RawAMRRTPSource::hasBeenSynchronizedUsingRTCP() { return fIsSynchronized; } ///// AMRBufferedPacket and AMRBufferedPacketFactory implementation AMRBufferedPacket::AMRBufferedPacket(RawAMRRTPSource& ourSource) : fOurSource(ourSource) { } AMRBufferedPacket::~AMRBufferedPacket() { } // The mapping from the "FT" field to frame size. // Values of 65535 are invalid. #define FT_INVALID 65535 static unsigned short const frameBytesFromFT[16] = { 12, 13, 15, 17, 19, 20, 26, 31, 5, FT_INVALID, FT_INVALID, FT_INVALID, FT_INVALID, FT_INVALID, FT_INVALID, 0 }; static unsigned short const frameBytesFromFTWideband[16] = { 17, 23, 32, 36, 40, 46, 50, 58, 60, 5, FT_INVALID, FT_INVALID, FT_INVALID, FT_INVALID, 0, 0 }; unsigned AMRBufferedPacket:: nextEnclosedFrameSize(unsigned char*& framePtr, unsigned dataSize) { if (dataSize == 0) return 0; // sanity check // The size of the AMR frame is determined by the corresponding 'FT' value // in the packet's Table of Contents. unsigned const tocIndex = fOurSource.frameIndex(); if (tocIndex >= fOurSource.TOCSize()) return 0; // sanity check unsigned char const tocByte = fOurSource.TOC()[tocIndex]; unsigned char const FT = (tocByte&0x78) >> 3; // ASSERT: FT < 16 unsigned short frameSize = fOurSource.isWideband() ? frameBytesFromFTWideband[FT] : frameBytesFromFT[FT]; if (frameSize == FT_INVALID) { // Strange TOC entry! fOurSource.envir() << "AMRBufferedPacket::nextEnclosedFrameSize(): invalid FT: " << FT << "\n"; frameSize = 0; // This probably messes up the rest of this packet, but... } #ifdef DEBUG fprintf(stderr, "AMRBufferedPacket::nextEnclosedFrameSize(): frame #: %d, FT: %d, isWideband: %d => frameSize: %d (dataSize: %d)\n", tocIndex, FT, fOurSource.isWideband(), frameSize, dataSize); #endif ++fOurSource.frameIndex(); if (dataSize < frameSize) return 0; return frameSize; } BufferedPacket* AMRBufferedPacketFactory ::createNewPacket(MultiFramedRTPSource* ourSource) { return new AMRBufferedPacket((RawAMRRTPSource&)(*ourSource)); } ///////// AMRDeinterleavingBuffer ///////// // (used to implement AMRDeinterleaver) #define AMR_MAX_FRAME_SIZE 60 class AMRDeinterleavingBuffer { public: AMRDeinterleavingBuffer(unsigned numChannels, unsigned maxInterleaveGroupSize); virtual ~AMRDeinterleavingBuffer(); void deliverIncomingFrame(unsigned frameSize, RawAMRRTPSource* source, struct timeval presentationTime); Boolean retrieveFrame(unsigned char* to, unsigned maxSize, unsigned& resultFrameSize, unsigned& resultNumTruncatedBytes, u_int8_t& resultFrameHeader, struct timeval& resultPresentationTime, Boolean& resultIsSynchronized); unsigned char* inputBuffer() { return fInputBuffer; } unsigned inputBufferSize() const { return AMR_MAX_FRAME_SIZE; } private: unsigned char* createNewBuffer(); class FrameDescriptor { public: FrameDescriptor(); virtual ~FrameDescriptor(); unsigned frameSize; unsigned char* frameData; u_int8_t frameHeader; struct timeval presentationTime; Boolean fIsSynchronized; }; unsigned fNumChannels, fMaxInterleaveGroupSize; FrameDescriptor* fFrames[2]; unsigned char fIncomingBankId; // toggles between 0 and 1 unsigned char fIncomingBinMax; // in the incoming bank unsigned char fOutgoingBinMax; // in the outgoing bank unsigned char fNextOutgoingBin; Boolean fHaveSeenPackets; u_int16_t fLastPacketSeqNumForGroup; unsigned char* fInputBuffer; struct timeval fLastRetrievedPresentationTime; unsigned fNumSuccessiveSyncedFrames; unsigned char fILL; }; ////////// AMRDeinterleaver implementation ///////// AMRDeinterleaver* AMRDeinterleaver ::createNew(UsageEnvironment& env, Boolean isWideband, unsigned numChannels, unsigned maxInterleaveGroupSize, RawAMRRTPSource* inputSource) { return new AMRDeinterleaver(env, isWideband, numChannels, maxInterleaveGroupSize, inputSource); } AMRDeinterleaver::AMRDeinterleaver(UsageEnvironment& env, Boolean isWideband, unsigned numChannels, unsigned maxInterleaveGroupSize, RawAMRRTPSource* inputSource) : AMRAudioSource(env, isWideband, numChannels), fInputSource(inputSource), fNeedAFrame(False) { fDeinterleavingBuffer = new AMRDeinterleavingBuffer(numChannels, maxInterleaveGroupSize); } AMRDeinterleaver::~AMRDeinterleaver() { delete fDeinterleavingBuffer; Medium::close(fInputSource); } static unsigned const uSecsPerFrame = 20000; // 20 ms void AMRDeinterleaver::doGetNextFrame() { // First, try getting a frame from the deinterleaving buffer: if (fDeinterleavingBuffer->retrieveFrame(fTo, fMaxSize, fFrameSize, fNumTruncatedBytes, fLastFrameHeader, fPresentationTime, fInputSource->isSynchronized())) { // Success! fNeedAFrame = False; fDurationInMicroseconds = uSecsPerFrame; // Call our own 'after getting' function. Because we're not a 'leaf' // source, we can call this directly, without risking // infinite recursion afterGetting(this); return; } // No luck, so ask our source for help: fNeedAFrame = True; if (!fInputSource->isCurrentlyAwaitingData()) { fInputSource->getNextFrame(fDeinterleavingBuffer->inputBuffer(), fDeinterleavingBuffer->inputBufferSize(), afterGettingFrame, this, FramedSource::handleClosure, this); } } void AMRDeinterleaver::doStopGettingFrames() { fNeedAFrame = False; fInputSource->stopGettingFrames(); } void AMRDeinterleaver ::afterGettingFrame(void* clientData, unsigned frameSize, unsigned /*numTruncatedBytes*/, struct timeval presentationTime, unsigned /*durationInMicroseconds*/) { AMRDeinterleaver* deinterleaver = (AMRDeinterleaver*)clientData; deinterleaver->afterGettingFrame1(frameSize, presentationTime); } void AMRDeinterleaver ::afterGettingFrame1(unsigned frameSize, struct timeval presentationTime) { RawAMRRTPSource* source = (RawAMRRTPSource*)fInputSource; // First, put the frame into our deinterleaving buffer: fDeinterleavingBuffer->deliverIncomingFrame(frameSize, source, presentationTime); // Then, try delivering a frame to the client (if he wants one): if (fNeedAFrame) doGetNextFrame(); } ////////// AMRDeinterleavingBuffer implementation ///////// AMRDeinterleavingBuffer ::AMRDeinterleavingBuffer(unsigned numChannels, unsigned maxInterleaveGroupSize) : fNumChannels(numChannels), fMaxInterleaveGroupSize(maxInterleaveGroupSize), fIncomingBankId(0), fIncomingBinMax(0), fOutgoingBinMax(0), fNextOutgoingBin(0), fHaveSeenPackets(False), fNumSuccessiveSyncedFrames(0), fILL(0) { // Use two banks of descriptors - one for incoming, one for outgoing fFrames[0] = new FrameDescriptor[fMaxInterleaveGroupSize]; fFrames[1] = new FrameDescriptor[fMaxInterleaveGroupSize]; fInputBuffer = createNewBuffer(); } AMRDeinterleavingBuffer::~AMRDeinterleavingBuffer() { delete[] fInputBuffer; delete[] fFrames[0]; delete[] fFrames[1]; } void AMRDeinterleavingBuffer ::deliverIncomingFrame(unsigned frameSize, RawAMRRTPSource* source, struct timeval presentationTime) { fILL = source->ILL(); unsigned char const ILP = source->ILP(); unsigned frameIndex = source->frameIndex(); unsigned short packetSeqNum = source->curPacketRTPSeqNum(); // First perform a sanity check on the parameters: // (This is overkill, as the source should have already done this.) if (ILP > fILL || frameIndex == 0) { #ifdef DEBUG fprintf(stderr, "AMRDeinterleavingBuffer::deliverIncomingFrame() param sanity check failed (%d,%d,%d,%d)\n", frameSize, fILL, ILP, frameIndex); #endif source->envir().internalError(); } --frameIndex; // because it was incremented by the source when this frame was read u_int8_t frameHeader; if (frameIndex >= source->TOCSize()) { // sanity check frameHeader = FT_NO_DATA<<3; } else { frameHeader = source->TOC()[frameIndex]; } unsigned frameBlockIndex = frameIndex/fNumChannels; unsigned frameWithinFrameBlock = frameIndex%fNumChannels; // The input "presentationTime" was that of the first frame-block in this // packet. Update it for the current frame: unsigned uSecIncrement = frameBlockIndex*(fILL+1)*uSecsPerFrame; presentationTime.tv_usec += uSecIncrement; presentationTime.tv_sec += presentationTime.tv_usec/1000000; presentationTime.tv_usec = presentationTime.tv_usec%1000000; // Next, check whether this packet is part of a new interleave group if (!fHaveSeenPackets || seqNumLT(fLastPacketSeqNumForGroup, packetSeqNum + frameBlockIndex)) { // We've moved to a new interleave group #ifdef DEBUG fprintf(stderr, "AMRDeinterleavingBuffer::deliverIncomingFrame(): new interleave group\n"); #endif fHaveSeenPackets = True; fLastPacketSeqNumForGroup = packetSeqNum + fILL - ILP; // Switch the incoming and outgoing banks: fIncomingBankId ^= 1; unsigned char tmp = fIncomingBinMax; fIncomingBinMax = fOutgoingBinMax; fOutgoingBinMax = tmp; fNextOutgoingBin = 0; } // Now move the incoming frame into the appropriate bin: unsigned const binNumber = ((ILP + frameBlockIndex*(fILL+1))*fNumChannels + frameWithinFrameBlock) % fMaxInterleaveGroupSize; // the % is for sanity #ifdef DEBUG fprintf(stderr, "AMRDeinterleavingBuffer::deliverIncomingFrame(): frameIndex %d (%d,%d) put in bank %d, bin %d (%d): size %d, header 0x%02x, presentationTime %lu.%06ld\n", frameIndex, frameBlockIndex, frameWithinFrameBlock, fIncomingBankId, binNumber, fMaxInterleaveGroupSize, frameSize, frameHeader, presentationTime.tv_sec, presentationTime.tv_usec); #endif FrameDescriptor& inBin = fFrames[fIncomingBankId][binNumber]; unsigned char* curBuffer = inBin.frameData; inBin.frameData = fInputBuffer; inBin.frameSize = frameSize; inBin.frameHeader = frameHeader; inBin.presentationTime = presentationTime; inBin.fIsSynchronized = ((RTPSource*)source)->RTPSource::hasBeenSynchronizedUsingRTCP(); if (curBuffer == NULL) curBuffer = createNewBuffer(); fInputBuffer = curBuffer; if (binNumber >= fIncomingBinMax) { fIncomingBinMax = binNumber + 1; } } Boolean AMRDeinterleavingBuffer ::retrieveFrame(unsigned char* to, unsigned maxSize, unsigned& resultFrameSize, unsigned& resultNumTruncatedBytes, u_int8_t& resultFrameHeader, struct timeval& resultPresentationTime, Boolean& resultIsSynchronized) { if (fNextOutgoingBin >= fOutgoingBinMax) return False; // none left FrameDescriptor& outBin = fFrames[fIncomingBankId^1][fNextOutgoingBin]; unsigned char* fromPtr = outBin.frameData; unsigned char fromSize = outBin.frameSize; outBin.frameSize = 0; // for the next time this bin is used resultIsSynchronized = False; // by default; can be changed by: if (outBin.fIsSynchronized) { // Don't consider the outgoing frame to be synchronized until we've received at least a complete interleave cycle of // synchronized frames. This ensures that the receiver will be getting all synchronized frames from now on. if (++fNumSuccessiveSyncedFrames > fILL) { resultIsSynchronized = True; fNumSuccessiveSyncedFrames = fILL+1; // prevents overflow } } else { fNumSuccessiveSyncedFrames = 0; } // Check whether this frame is missing; if so, return a FT_NO_DATA frame: if (fromSize == 0) { resultFrameHeader = FT_NO_DATA<<3; // Compute this erasure frame's presentation time via extrapolation: resultPresentationTime = fLastRetrievedPresentationTime; resultPresentationTime.tv_usec += uSecsPerFrame; if (resultPresentationTime.tv_usec >= 1000000) { ++resultPresentationTime.tv_sec; resultPresentationTime.tv_usec -= 1000000; } } else { // Normal case - a frame exists: resultFrameHeader = outBin.frameHeader; resultPresentationTime = outBin.presentationTime; } fLastRetrievedPresentationTime = resultPresentationTime; if (fromSize > maxSize) { resultNumTruncatedBytes = fromSize - maxSize; resultFrameSize = maxSize; } else { resultNumTruncatedBytes = 0; resultFrameSize = fromSize; } memmove(to, fromPtr, resultFrameSize); #ifdef DEBUG fprintf(stderr, "AMRDeinterleavingBuffer::retrieveFrame(): from bank %d, bin %d: size %d, header 0x%02x, presentationTime %lu.%06ld\n", fIncomingBankId^1, fNextOutgoingBin, resultFrameSize, resultFrameHeader, resultPresentationTime.tv_sec, resultPresentationTime.tv_usec); #endif ++fNextOutgoingBin; return True; } unsigned char* AMRDeinterleavingBuffer::createNewBuffer() { return new unsigned char[inputBufferSize()]; } AMRDeinterleavingBuffer::FrameDescriptor::FrameDescriptor() : frameSize(0), frameData(NULL) { } AMRDeinterleavingBuffer::FrameDescriptor::~FrameDescriptor() { delete[] frameData; } // Unpack bandwidth-aligned data to octet-aligned: static unsigned short const frameBitsFromFT[16] = { 95, 103, 118, 134, 148, 159, 204, 244, 39, 0, 0, 0, 0, 0, 0, 0 }; static unsigned short const frameBitsFromFTWideband[16] = { 132, 177, 253, 285, 317, 365, 397, 461, 477, 40, 0, 0, 0, 0, 0, 0 }; static void unpackBandwidthEfficientData(BufferedPacket* packet, Boolean isWideband) { #ifdef DEBUG fprintf(stderr, "Unpacking 'bandwidth-efficient' payload (%d bytes):\n", packet->dataSize()); for (unsigned j = 0; j < packet->dataSize(); ++j) { fprintf(stderr, "%02x:", (packet->data())[j]); } fprintf(stderr, "\n"); #endif BitVector fromBV(packet->data(), 0, 8*packet->dataSize()); unsigned const toBufferSize = 2*packet->dataSize(); // conservatively large unsigned char* toBuffer = new unsigned char[toBufferSize]; unsigned toCount = 0; // Begin with the payload header: unsigned CMR = fromBV.getBits(4); toBuffer[toCount++] = CMR << 4; // Then, run through and unpack the TOC entries: while (1) { unsigned toc = fromBV.getBits(6); toBuffer[toCount++] = toc << 2; if ((toc&0x20) == 0) break; // the F bit is 0 } // Then, using the TOC data, unpack each frame payload: unsigned const tocSize = toCount - 1; for (unsigned i = 1; i <= tocSize; ++i) { unsigned char tocByte = toBuffer[i]; unsigned char const FT = (tocByte&0x78) >> 3; unsigned short frameSizeBits = isWideband ? frameBitsFromFTWideband[FT] : frameBitsFromFT[FT]; unsigned short frameSizeBytes = (frameSizeBits+7)/8; shiftBits(&toBuffer[toCount], 0, // to packet->data(), fromBV.curBitIndex(), // from frameSizeBits // num bits ); #ifdef DEBUG if (frameSizeBits > fromBV.numBitsRemaining()) { fprintf(stderr, "\tWarning: Unpacking frame %d of %d: want %d bits, but only %d are available!\n", i, tocSize, frameSizeBits, fromBV.numBitsRemaining()); } #endif fromBV.skipBits(frameSizeBits); toCount += frameSizeBytes; } #ifdef DEBUG if (fromBV.numBitsRemaining() > 7) { fprintf(stderr, "\tWarning: %d bits remain unused!\n", fromBV.numBitsRemaining()); } #endif // Finally, replace the current packet data with the unpacked data: packet->removePadding(packet->dataSize()); // throws away current packet data packet->appendData(toBuffer, toCount); delete[] toBuffer; } live/liveMedia/BasicUDPSink.cpp000444 001751 000000 00000007164 12265042432 016571 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A simple UDP sink (i.e., without RTP or other headers added); one frame per packet // Implementation #include "BasicUDPSink.hh" #include BasicUDPSink* BasicUDPSink::createNew(UsageEnvironment& env, Groupsock* gs, unsigned maxPayloadSize) { return new BasicUDPSink(env, gs, maxPayloadSize); } BasicUDPSink::BasicUDPSink(UsageEnvironment& env, Groupsock* gs, unsigned maxPayloadSize) : MediaSink(env), fGS(gs), fMaxPayloadSize(maxPayloadSize) { fOutputBuffer = new unsigned char[fMaxPayloadSize]; } BasicUDPSink::~BasicUDPSink() { delete[] fOutputBuffer; } Boolean BasicUDPSink::continuePlaying() { // Record the fact that we're starting to play now: gettimeofday(&fNextSendTime, NULL); // Arrange to get and send the first payload. // (This will also schedule any future sends.) continuePlaying1(); return True; } void BasicUDPSink::continuePlaying1() { if (fSource != NULL) { fSource->getNextFrame(fOutputBuffer, fMaxPayloadSize, afterGettingFrame, this, onSourceClosure, this); } } void BasicUDPSink::afterGettingFrame(void* clientData, unsigned frameSize, unsigned numTruncatedBytes, struct timeval /*presentationTime*/, unsigned durationInMicroseconds) { BasicUDPSink* sink = (BasicUDPSink*)clientData; sink->afterGettingFrame1(frameSize, numTruncatedBytes, durationInMicroseconds); } void BasicUDPSink::afterGettingFrame1(unsigned frameSize, unsigned numTruncatedBytes, unsigned durationInMicroseconds) { if (numTruncatedBytes > 0) { envir() << "BasicUDPSink::afterGettingFrame1(): The input frame data was too large for our spcified maximum payload size (" << fMaxPayloadSize << "). " << numTruncatedBytes << " bytes of trailing data was dropped!\n"; } // Send the packet: fGS->output(envir(), fGS->ttl(), fOutputBuffer, frameSize); // Figure out the time at which the next packet should be sent, based // on the duration of the payload that we just read: fNextSendTime.tv_usec += durationInMicroseconds; fNextSendTime.tv_sec += fNextSendTime.tv_usec/1000000; fNextSendTime.tv_usec %= 1000000; struct timeval timeNow; gettimeofday(&timeNow, NULL); int secsDiff = fNextSendTime.tv_sec - timeNow.tv_sec; int64_t uSecondsToGo = secsDiff*1000000 + (fNextSendTime.tv_usec - timeNow.tv_usec); if (uSecondsToGo < 0 || secsDiff < 0) { // sanity check: Make sure that the time-to-delay is non-negative: uSecondsToGo = 0; } // Delay this amount of time: nextTask() = envir().taskScheduler().scheduleDelayedTask(uSecondsToGo, (TaskFunc*)sendNext, this); } // The following is called after each delay between packet sends: void BasicUDPSink::sendNext(void* firstArg) { BasicUDPSink* sink = (BasicUDPSink*)firstArg; sink->continuePlaying1(); } live/liveMedia/RTSPRegisterSender.cpp000444 001751 000000 00000013417 12265042432 020006 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A special object which, when created, sends a custom RTSP "REGISTER" command to a specified client. // Implementation #include "RTSPRegisterSender.hh" #include // for MAKE_SOCKADDR_IN RTSPRegisterSender* RTSPRegisterSender ::createNew(UsageEnvironment& env, char const* remoteClientNameOrAddress, portNumBits remoteClientPortNum, char const* rtspURLToRegister, RTSPClient::responseHandler* rtspResponseHandler, Authenticator* authenticator, Boolean requestStreamingViaTCP, char const* proxyURLSuffix, Boolean reuseConnection, int verbosityLevel, char const* applicationName) { return new RTSPRegisterSender(env, remoteClientNameOrAddress, remoteClientPortNum, rtspURLToRegister, rtspResponseHandler, authenticator, requestStreamingViaTCP, proxyURLSuffix, reuseConnection, verbosityLevel, applicationName); } void RTSPRegisterSender::grabConnection(int& sock, struct sockaddr_in& remoteAddress) { sock = grabSocket(); MAKE_SOCKADDR_IN(remoteAddr, fServerAddress, htons(fRemoteClientPortNum)); remoteAddress = remoteAddr; } RTSPRegisterSender ::RTSPRegisterSender(UsageEnvironment& env, char const* remoteClientNameOrAddress, portNumBits remoteClientPortNum, char const* rtspURLToRegister, RTSPClient::responseHandler* rtspResponseHandler, Authenticator* authenticator, Boolean requestStreamingViaTCP, char const* proxyURLSuffix, Boolean reuseConnection, int verbosityLevel, char const* applicationName) : RTSPClient(env, NULL, verbosityLevel, applicationName, 0, -1), fRemoteClientPortNum(remoteClientPortNum) { // Set up a connection to the remote client. To do this, we create a fake "rtsp://" URL for it: char const* fakeRTSPURLFmt = "rtsp://%s:%u/"; unsigned fakeRTSPURLSize = strlen(fakeRTSPURLFmt) + strlen(remoteClientNameOrAddress) + 5/* max port num len */; char* fakeRTSPURL = new char[fakeRTSPURLSize]; sprintf(fakeRTSPURL, fakeRTSPURLFmt, remoteClientNameOrAddress, remoteClientPortNum); setBaseURL(fakeRTSPURL); delete[] fakeRTSPURL; // Send the "REGISTER" request: if (authenticator != NULL) fCurrentAuthenticator = *authenticator; (void)sendRequest(new RequestRecord_REGISTER(++fCSeq, rtspResponseHandler, rtspURLToRegister, reuseConnection, requestStreamingViaTCP, proxyURLSuffix)); } RTSPRegisterSender::~RTSPRegisterSender() { } Boolean RTSPRegisterSender::setRequestFields(RequestRecord* request, char*& cmdURL, Boolean& cmdURLWasAllocated, char const*& protocolStr, char*& extraHeaders, Boolean& extraHeadersWereAllocated) { if (strcmp(request->commandName(), "REGISTER") == 0) { RequestRecord_REGISTER* request_REGISTER = (RequestRecord_REGISTER*) request; setBaseURL(request_REGISTER->rtspURLToRegister()); cmdURL = (char*)url(); cmdURLWasAllocated = False; // Generate the "Transport:" header that will contain our REGISTER-specific parameters. This will be "extraHeaders". // First, generate the "proxy_url_suffix" parameter string, if any: char* proxyURLSuffixParameterStr; if (request_REGISTER->proxyURLSuffix() == NULL) { proxyURLSuffixParameterStr = strDup(""); } else { char const* proxyURLSuffixParameterFmt = "; proxy_url_suffix=%s"; unsigned proxyURLSuffixParameterSize = strlen(proxyURLSuffixParameterFmt) + strlen(request_REGISTER->proxyURLSuffix()); proxyURLSuffixParameterStr = new char[proxyURLSuffixParameterSize]; sprintf(proxyURLSuffixParameterStr, proxyURLSuffixParameterFmt, request_REGISTER->proxyURLSuffix()); } char const* transportHeaderFmt = "Transport: reuse_connection=%d; preferred_delivery_protocol=%s%s\r\n"; unsigned transportHeaderSize = strlen(transportHeaderFmt) + 100/*conservative*/ + strlen(proxyURLSuffixParameterStr); char* transportHeaderStr = new char[transportHeaderSize]; sprintf(transportHeaderStr, transportHeaderFmt, request_REGISTER->reuseConnection(), request_REGISTER->requestStreamingViaTCP() ? "interleaved" : "udp", proxyURLSuffixParameterStr); delete[] proxyURLSuffixParameterStr; extraHeaders = transportHeaderStr; extraHeadersWereAllocated = True; return True; } else { return RTSPClient::setRequestFields(request, cmdURL, cmdURLWasAllocated, protocolStr, extraHeaders, extraHeadersWereAllocated); } } RTSPRegisterSender::RequestRecord_REGISTER ::RequestRecord_REGISTER(unsigned cseq, RTSPClient::responseHandler* rtspResponseHandler, char const* rtspURLToRegister, Boolean reuseConnection, Boolean requestStreamingViaTCP, char const* proxyURLSuffix) : RTSPClient::RequestRecord(cseq, "REGISTER", rtspResponseHandler), fRTSPURLToRegister(strDup(rtspURLToRegister)), fReuseConnection(reuseConnection), fRequestStreamingViaTCP(requestStreamingViaTCP), fProxyURLSuffix(strDup(proxyURLSuffix)) { } RTSPRegisterSender::RequestRecord_REGISTER::~RequestRecord_REGISTER() { delete[] fRTSPURLToRegister; delete[] fProxyURLSuffix; } live/liveMedia/OutputFile.cpp000444 001751 000000 00000003777 12265042432 016460 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // Common routines for opening/closing named output files // Implementation #if (defined(__WIN32__) || defined(_WIN32)) && !defined(_WIN32_WCE) #include #include #endif #ifndef _WIN32_WCE #include #endif #include #include "OutputFile.hh" FILE* OpenOutputFile(UsageEnvironment& env, char const* fileName) { FILE* fid; // Check for special case 'file names': "stdout" and "stderr" if (strcmp(fileName, "stdout") == 0) { fid = stdout; #if (defined(__WIN32__) || defined(_WIN32)) && !defined(_WIN32_WCE) _setmode(_fileno(stdout), _O_BINARY); // convert to binary mode #endif } else if (strcmp(fileName, "stderr") == 0) { fid = stderr; #if (defined(__WIN32__) || defined(_WIN32)) && !defined(_WIN32_WCE) _setmode(_fileno(stderr), _O_BINARY); // convert to binary mode #endif } else { fid = fopen(fileName, "wb"); } if (fid == NULL) { env.setResultMsg("unable to open file \"", fileName, "\""); } return fid; } void CloseOutputFile(FILE* fid) { // Don't close 'stdout' or 'stderr', in case we want to use it again later. if (fid != NULL && fid != stdout && fid != stderr) fclose(fid); } live/liveMedia/H264VideoFileSink.cpp000444 001751 000000 00000006037 12265042432 017407 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // H.264 Video File sinks // Implementation #include "H264VideoFileSink.hh" #include "OutputFile.hh" #include "H264VideoRTPSource.hh" ////////// H264VideoFileSink ////////// H264VideoFileSink ::H264VideoFileSink(UsageEnvironment& env, FILE* fid, char const* sPropParameterSetsStr, unsigned bufferSize, char const* perFrameFileNamePrefix) : FileSink(env, fid, bufferSize, perFrameFileNamePrefix), fSPropParameterSetsStr(sPropParameterSetsStr), fHaveWrittenFirstFrame(False) { } H264VideoFileSink::~H264VideoFileSink() { } H264VideoFileSink* H264VideoFileSink::createNew(UsageEnvironment& env, char const* fileName, char const* sPropParameterSetsStr, unsigned bufferSize, Boolean oneFilePerFrame) { do { FILE* fid; char const* perFrameFileNamePrefix; if (oneFilePerFrame) { // Create the fid for each frame fid = NULL; perFrameFileNamePrefix = fileName; } else { // Normal case: create the fid once fid = OpenOutputFile(env, fileName); if (fid == NULL) break; perFrameFileNamePrefix = NULL; } return new H264VideoFileSink(env, fid, sPropParameterSetsStr, bufferSize, perFrameFileNamePrefix); } while (0); return NULL; } void H264VideoFileSink::afterGettingFrame(unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime) { unsigned char const start_code[4] = {0x00, 0x00, 0x00, 0x01}; if (!fHaveWrittenFirstFrame) { // If we have PPS/SPS NAL units encoded in a "sprop parameter string", prepend these to the file: unsigned numSPropRecords; SPropRecord* sPropRecords = parseSPropParameterSets(fSPropParameterSetsStr, numSPropRecords); for (unsigned i = 0; i < numSPropRecords; ++i) { addData(start_code, 4, presentationTime); addData(sPropRecords[i].sPropBytes, sPropRecords[i].sPropLength, presentationTime); } delete[] sPropRecords; fHaveWrittenFirstFrame = True; // for next time } // Write the input data to the file, with the start code in front: addData(start_code, 4, presentationTime); // Call the parent class to complete the normal file write with the input data: FileSink::afterGettingFrame(frameSize, numTruncatedBytes, presentationTime); } live/liveMedia/ADTSAudioFileSource.cpp000444 001751 000000 00000014413 12265042432 020043 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A source object for AAC audio files in ADTS format // Implementation #include "ADTSAudioFileSource.hh" #include "InputFile.hh" #include ////////// ADTSAudioFileSource ////////// static unsigned const samplingFrequencyTable[16] = { 96000, 88200, 64000, 48000, 44100, 32000, 24000, 22050, 16000, 12000, 11025, 8000, 7350, 0, 0, 0 }; ADTSAudioFileSource* ADTSAudioFileSource::createNew(UsageEnvironment& env, char const* fileName) { FILE* fid = NULL; do { fid = OpenInputFile(env, fileName); if (fid == NULL) break; // Now, having opened the input file, read the fixed header of the first frame, // to get the audio stream's parameters: unsigned char fixedHeader[4]; // it's actually 3.5 bytes long if (fread(fixedHeader, 1, sizeof fixedHeader, fid) < sizeof fixedHeader) break; // Check the 'syncword': if (!(fixedHeader[0] == 0xFF && (fixedHeader[1]&0xF0) == 0xF0)) { env.setResultMsg("Bad 'syncword' at start of ADTS file"); break; } // Get and check the 'profile': u_int8_t profile = (fixedHeader[2]&0xC0)>>6; // 2 bits if (profile == 3) { env.setResultMsg("Bad (reserved) 'profile': 3 in first frame of ADTS file"); break; } // Get and check the 'sampling_frequency_index': u_int8_t sampling_frequency_index = (fixedHeader[2]&0x3C)>>2; // 4 bits if (samplingFrequencyTable[sampling_frequency_index] == 0) { env.setResultMsg("Bad 'sampling_frequency_index' in first frame of ADTS file"); break; } // Get and check the 'channel_configuration': u_int8_t channel_configuration = ((fixedHeader[2]&0x01)<<2)|((fixedHeader[3]&0xC0)>>6); // 3 bits // If we get here, the frame header was OK. // Reset the fid to the beginning of the file: #ifndef _WIN32_WCE rewind(fid); #else SeekFile64(fid, SEEK_SET,0); #endif #ifdef DEBUG fprintf(stderr, "Read first frame: profile %d, " "sampling_frequency_index %d => samplingFrequency %d, " "channel_configuration %d\n", profile, sampling_frequency_index, samplingFrequencyTable[sampling_frequency_index], channel_configuration); #endif return new ADTSAudioFileSource(env, fid, profile, sampling_frequency_index, channel_configuration); } while (0); // An error occurred: CloseInputFile(fid); return NULL; } ADTSAudioFileSource ::ADTSAudioFileSource(UsageEnvironment& env, FILE* fid, u_int8_t profile, u_int8_t samplingFrequencyIndex, u_int8_t channelConfiguration) : FramedFileSource(env, fid) { fSamplingFrequency = samplingFrequencyTable[samplingFrequencyIndex]; fNumChannels = channelConfiguration == 0 ? 2 : channelConfiguration; fuSecsPerFrame = (1024/*samples-per-frame*/*1000000) / fSamplingFrequency/*samples-per-second*/; // Construct the 'AudioSpecificConfig', and from it, the corresponding ASCII string: unsigned char audioSpecificConfig[2]; u_int8_t const audioObjectType = profile + 1; audioSpecificConfig[0] = (audioObjectType<<3) | (samplingFrequencyIndex>>1); audioSpecificConfig[1] = (samplingFrequencyIndex<<7) | (channelConfiguration<<3); sprintf(fConfigStr, "%02X%02x", audioSpecificConfig[0], audioSpecificConfig[1]); } ADTSAudioFileSource::~ADTSAudioFileSource() { CloseInputFile(fFid); } // Note: We should change the following to use asynchronous file reading, ##### // as we now do with ByteStreamFileSource. ##### void ADTSAudioFileSource::doGetNextFrame() { // Begin by reading the 7-byte fixed_variable headers: unsigned char headers[7]; if (fread(headers, 1, sizeof headers, fFid) < sizeof headers || feof(fFid) || ferror(fFid)) { // The input source has ended: handleClosure(this); return; } // Extract important fields from the headers: Boolean protection_absent = headers[1]&0x01; u_int16_t frame_length = ((headers[3]&0x03)<<11) | (headers[4]<<3) | ((headers[5]&0xE0)>>5); #ifdef DEBUG u_int16_t syncword = (headers[0]<<4) | (headers[1]>>4); fprintf(stderr, "Read frame: syncword 0x%x, protection_absent %d, frame_length %d\n", syncword, protection_absent, frame_length); if (syncword != 0xFFF) fprintf(stderr, "WARNING: Bad syncword!\n"); #endif unsigned numBytesToRead = frame_length > sizeof headers ? frame_length - sizeof headers : 0; // If there's a 'crc_check' field, skip it: if (!protection_absent) { SeekFile64(fFid, 2, SEEK_CUR); numBytesToRead = numBytesToRead > 2 ? numBytesToRead - 2 : 0; } // Next, read the raw frame data into the buffer provided: if (numBytesToRead > fMaxSize) { fNumTruncatedBytes = numBytesToRead - fMaxSize; numBytesToRead = fMaxSize; } int numBytesRead = fread(fTo, 1, numBytesToRead, fFid); if (numBytesRead < 0) numBytesRead = 0; fFrameSize = numBytesRead; fNumTruncatedBytes += numBytesToRead - numBytesRead; // Set the 'presentation time': if (fPresentationTime.tv_sec == 0 && fPresentationTime.tv_usec == 0) { // This is the first frame, so use the current time: gettimeofday(&fPresentationTime, NULL); } else { // Increment by the play time of the previous frame: unsigned uSeconds = fPresentationTime.tv_usec + fuSecsPerFrame; fPresentationTime.tv_sec += uSeconds/1000000; fPresentationTime.tv_usec = uSeconds%1000000; } fDurationInMicroseconds = fuSecsPerFrame; // Switch to another task, and inform the reader that he has data: nextTask() = envir().taskScheduler().scheduleDelayedTask(0, (TaskFunc*)FramedSource::afterGetting, this); } live/liveMedia/MP3InternalsHuffmanTable.cpp000444 001751 000000 00000330135 12265042432 021103 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // MP3 internal implementation details (Huffman encoding) // Table #include "MP3InternalsHuffman.hh" unsigned char huffdec[] = { 0x0a, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x20, 0x20, 0x30, 0x20, 0x20, 0x20, 0x30, 0x20, 0x20, 0x30, 0x20, 0x20, 0x30, 0x20, 0x20, 0x30, 0x0a, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x64, 0x61, 0x74, 0x61, 0x0a, 0x0a, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x20, 0x20, 0x31, 0x20, 0x20, 0x20, 0x37, 0x20, 0x20, 0x32, 0x20, 0x20, 0x32, 0x20, 0x20, 0x30, 0x0a, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x64, 0x61, 0x74, 0x61, 0x0a, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x30, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x30, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x31, 0x20, 0x0a, 0x0a, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x37, 0x20, 0x20, 0x33, 0x20, 0x20, 0x33, 0x20, 0x20, 0x30, 0x0a, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x64, 0x61, 0x74, 0x61, 0x0a, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x30, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x30, 0x20, 0x20, 0x30, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x32, 0x30, 0x20, 0x20, 0x30, 0x20, 0x32, 0x31, 0x20, 0x0a, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x32, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x32, 0x20, 0x20, 0x30, 0x20, 0x32, 0x32, 0x20, 0x0a, 0x0a, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x20, 0x20, 0x33, 0x20, 0x20, 0x31, 0x37, 0x20, 0x20, 0x33, 0x20, 0x20, 0x33, 0x20, 0x20, 0x30, 0x0a, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x64, 0x61, 0x74, 0x61, 0x0a, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x30, 0x20, 0x20, 0x30, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x30, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x32, 0x30, 0x20, 0x20, 0x30, 0x20, 0x32, 0x31, 0x20, 0x0a, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x32, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x32, 0x20, 0x20, 0x30, 0x20, 0x32, 0x32, 0x20, 0x0a, 0x0a, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x20, 0x20, 0x34, 0x20, 0x20, 0x20, 0x30, 0x20, 0x20, 0x30, 0x20, 0x20, 0x30, 0x20, 0x20, 0x30, 0x0a, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x64, 0x61, 0x74, 0x61, 0x0a, 0x0a, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x20, 0x20, 0x35, 0x20, 0x20, 0x33, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x34, 0x20, 0x20, 0x30, 0x0a, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x64, 0x61, 0x74, 0x61, 0x0a, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x30, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x30, 0x20, 0x20, 0x30, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x31, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x32, 0x30, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x20, 0x32, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x32, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x32, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x32, 0x32, 0x20, 0x20, 0x30, 0x20, 0x33, 0x30, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x33, 0x20, 0x20, 0x30, 0x20, 0x31, 0x33, 0x20, 0x0a, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x33, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x33, 0x32, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x32, 0x33, 0x20, 0x20, 0x30, 0x20, 0x33, 0x33, 0x20, 0x0a, 0x0a, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x20, 0x20, 0x36, 0x20, 0x20, 0x33, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x34, 0x20, 0x20, 0x30, 0x0a, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x64, 0x61, 0x74, 0x61, 0x0a, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x30, 0x20, 0x20, 0x30, 0x20, 0x31, 0x30, 0x20, 0x20, 0x30, 0x20, 0x31, 0x31, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x32, 0x30, 0x20, 0x20, 0x30, 0x20, 0x32, 0x31, 0x20, 0x0a, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x32, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x32, 0x20, 0x20, 0x30, 0x20, 0x32, 0x32, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x33, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x33, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x33, 0x30, 0x20, 0x20, 0x30, 0x20, 0x33, 0x32, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x32, 0x33, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x33, 0x20, 0x20, 0x30, 0x20, 0x33, 0x33, 0x20, 0x0a, 0x0a, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x20, 0x20, 0x37, 0x20, 0x20, 0x37, 0x31, 0x20, 0x20, 0x36, 0x20, 0x20, 0x36, 0x20, 0x20, 0x30, 0x0a, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x64, 0x61, 0x74, 0x61, 0x0a, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x30, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x30, 0x20, 0x20, 0x30, 0x20, 0x20, 0x31, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x32, 0x30, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x20, 0x32, 0x20, 0x20, 0x30, 0x20, 0x32, 0x31, 0x20, 0x31, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x32, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x32, 0x32, 0x20, 0x20, 0x30, 0x20, 0x33, 0x30, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x33, 0x31, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x31, 0x33, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x33, 0x20, 0x20, 0x30, 0x20, 0x33, 0x32, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x32, 0x33, 0x20, 0x20, 0x30, 0x20, 0x20, 0x34, 0x20, 0x20, 0x61, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x34, 0x30, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x34, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x34, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x34, 0x32, 0x20, 0x20, 0x30, 0x20, 0x32, 0x34, 0x20, 0x20, 0x63, 0x20, 0x20, 0x31, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x33, 0x33, 0x20, 0x20, 0x30, 0x20, 0x34, 0x33, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x35, 0x30, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x33, 0x34, 0x20, 0x20, 0x30, 0x20, 0x20, 0x35, 0x20, 0x20, 0x30, 0x20, 0x35, 0x31, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x35, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x35, 0x32, 0x20, 0x20, 0x30, 0x20, 0x32, 0x35, 0x20, 0x0a, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x34, 0x34, 0x20, 0x20, 0x30, 0x20, 0x33, 0x35, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x35, 0x33, 0x20, 0x20, 0x30, 0x20, 0x35, 0x34, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x34, 0x35, 0x20, 0x20, 0x30, 0x20, 0x35, 0x35, 0x20, 0x0a, 0x0a, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x20, 0x20, 0x38, 0x20, 0x20, 0x37, 0x31, 0x20, 0x20, 0x36, 0x20, 0x20, 0x36, 0x20, 0x20, 0x30, 0x0a, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x64, 0x61, 0x74, 0x61, 0x0a, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x30, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x30, 0x20, 0x20, 0x30, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x32, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x32, 0x20, 0x0a, 0x20, 0x65, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x32, 0x30, 0x20, 0x20, 0x30, 0x20, 0x20, 0x32, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x32, 0x32, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x33, 0x30, 0x20, 0x20, 0x30, 0x20, 0x20, 0x33, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x33, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x33, 0x20, 0x20, 0x65, 0x20, 0x20, 0x31, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x33, 0x32, 0x20, 0x20, 0x30, 0x20, 0x32, 0x33, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x34, 0x30, 0x20, 0x20, 0x30, 0x20, 0x20, 0x34, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x34, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x34, 0x20, 0x20, 0x30, 0x20, 0x34, 0x32, 0x20, 0x20, 0x63, 0x20, 0x20, 0x31, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x32, 0x34, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x33, 0x33, 0x20, 0x20, 0x30, 0x20, 0x35, 0x30, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x34, 0x33, 0x20, 0x20, 0x30, 0x20, 0x33, 0x34, 0x20, 0x20, 0x30, 0x20, 0x35, 0x31, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x35, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x35, 0x20, 0x20, 0x30, 0x20, 0x35, 0x32, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x32, 0x35, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x34, 0x34, 0x20, 0x20, 0x30, 0x20, 0x33, 0x35, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x35, 0x33, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x34, 0x35, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x35, 0x34, 0x20, 0x20, 0x30, 0x20, 0x35, 0x35, 0x20, 0x0a, 0x0a, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x20, 0x20, 0x39, 0x20, 0x20, 0x37, 0x31, 0x20, 0x20, 0x36, 0x20, 0x20, 0x36, 0x20, 0x20, 0x30, 0x0a, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x64, 0x61, 0x74, 0x61, 0x0a, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x30, 0x20, 0x20, 0x30, 0x20, 0x31, 0x30, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x31, 0x20, 0x20, 0x61, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x32, 0x30, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x32, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x32, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x32, 0x20, 0x20, 0x30, 0x20, 0x32, 0x32, 0x20, 0x20, 0x63, 0x20, 0x20, 0x31, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x33, 0x30, 0x20, 0x20, 0x30, 0x20, 0x20, 0x33, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x33, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x33, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x33, 0x32, 0x20, 0x20, 0x30, 0x20, 0x32, 0x33, 0x20, 0x20, 0x63, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x34, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x34, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x34, 0x30, 0x20, 0x20, 0x30, 0x20, 0x33, 0x33, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x34, 0x32, 0x20, 0x20, 0x30, 0x20, 0x32, 0x34, 0x20, 0x20, 0x61, 0x20, 0x20, 0x31, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x34, 0x20, 0x20, 0x30, 0x20, 0x35, 0x30, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x34, 0x33, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x33, 0x34, 0x20, 0x20, 0x30, 0x20, 0x35, 0x31, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x35, 0x20, 0x20, 0x30, 0x20, 0x35, 0x32, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x32, 0x35, 0x20, 0x20, 0x30, 0x20, 0x34, 0x34, 0x20, 0x0a, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x35, 0x20, 0x20, 0x30, 0x20, 0x35, 0x34, 0x20, 0x20, 0x30, 0x20, 0x35, 0x33, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x33, 0x35, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x34, 0x35, 0x20, 0x20, 0x30, 0x20, 0x35, 0x35, 0x20, 0x0a, 0x0a, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x20, 0x31, 0x30, 0x20, 0x31, 0x32, 0x37, 0x20, 0x20, 0x38, 0x20, 0x20, 0x38, 0x20, 0x20, 0x30, 0x0a, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x64, 0x61, 0x74, 0x61, 0x0a, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x30, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x30, 0x20, 0x20, 0x30, 0x20, 0x20, 0x31, 0x20, 0x20, 0x61, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x32, 0x30, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x20, 0x32, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x32, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x32, 0x20, 0x31, 0x63, 0x20, 0x20, 0x31, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x32, 0x32, 0x20, 0x20, 0x30, 0x20, 0x33, 0x30, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x33, 0x31, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x31, 0x33, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x33, 0x20, 0x20, 0x30, 0x20, 0x33, 0x32, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x32, 0x33, 0x20, 0x20, 0x30, 0x20, 0x34, 0x30, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x34, 0x31, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x31, 0x34, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x34, 0x20, 0x20, 0x30, 0x20, 0x33, 0x33, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x34, 0x32, 0x20, 0x20, 0x30, 0x20, 0x32, 0x34, 0x20, 0x31, 0x63, 0x20, 0x20, 0x31, 0x20, 0x20, 0x61, 0x20, 0x20, 0x31, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x35, 0x30, 0x20, 0x20, 0x30, 0x20, 0x20, 0x35, 0x20, 0x20, 0x30, 0x20, 0x36, 0x30, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x36, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x36, 0x20, 0x20, 0x63, 0x20, 0x20, 0x31, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x34, 0x33, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x33, 0x34, 0x20, 0x20, 0x30, 0x20, 0x35, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x35, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x35, 0x32, 0x20, 0x20, 0x30, 0x20, 0x32, 0x35, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x32, 0x36, 0x20, 0x20, 0x30, 0x20, 0x33, 0x36, 0x20, 0x20, 0x30, 0x20, 0x37, 0x31, 0x20, 0x0a, 0x31, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x37, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x34, 0x34, 0x20, 0x20, 0x30, 0x20, 0x35, 0x33, 0x20, 0x20, 0x30, 0x20, 0x20, 0x36, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x33, 0x35, 0x20, 0x20, 0x30, 0x20, 0x34, 0x35, 0x20, 0x20, 0x30, 0x20, 0x36, 0x32, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x37, 0x30, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x37, 0x20, 0x20, 0x30, 0x20, 0x36, 0x34, 0x20, 0x20, 0x65, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x37, 0x32, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x32, 0x37, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x36, 0x33, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x35, 0x34, 0x20, 0x20, 0x30, 0x20, 0x35, 0x35, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x34, 0x36, 0x20, 0x20, 0x30, 0x20, 0x37, 0x33, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x33, 0x37, 0x20, 0x20, 0x30, 0x20, 0x36, 0x35, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x35, 0x36, 0x20, 0x20, 0x30, 0x20, 0x37, 0x34, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x34, 0x37, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x36, 0x36, 0x20, 0x20, 0x30, 0x20, 0x37, 0x35, 0x20, 0x0a, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x35, 0x37, 0x20, 0x20, 0x30, 0x20, 0x37, 0x36, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x36, 0x37, 0x20, 0x20, 0x30, 0x20, 0x37, 0x37, 0x20, 0x0a, 0x0a, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x20, 0x31, 0x31, 0x20, 0x31, 0x32, 0x37, 0x20, 0x20, 0x38, 0x20, 0x20, 0x38, 0x20, 0x20, 0x30, 0x0a, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x64, 0x61, 0x74, 0x61, 0x0a, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x30, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x30, 0x20, 0x20, 0x30, 0x20, 0x20, 0x31, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x32, 0x30, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x20, 0x32, 0x20, 0x20, 0x30, 0x20, 0x31, 0x32, 0x20, 0x31, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x32, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x32, 0x32, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x33, 0x30, 0x20, 0x20, 0x30, 0x20, 0x20, 0x33, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x33, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x33, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x33, 0x32, 0x20, 0x20, 0x30, 0x20, 0x32, 0x33, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x34, 0x30, 0x20, 0x20, 0x30, 0x20, 0x20, 0x34, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x34, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x34, 0x20, 0x31, 0x65, 0x20, 0x20, 0x31, 0x20, 0x31, 0x30, 0x20, 0x20, 0x31, 0x20, 0x20, 0x61, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x34, 0x32, 0x20, 0x20, 0x30, 0x20, 0x32, 0x34, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x33, 0x33, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x34, 0x33, 0x20, 0x20, 0x30, 0x20, 0x35, 0x30, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x33, 0x34, 0x20, 0x20, 0x30, 0x20, 0x35, 0x31, 0x20, 0x20, 0x30, 0x20, 0x36, 0x31, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x36, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x36, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x32, 0x36, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x36, 0x32, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x35, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x35, 0x20, 0x20, 0x30, 0x20, 0x35, 0x32, 0x20, 0x31, 0x30, 0x20, 0x20, 0x31, 0x20, 0x20, 0x61, 0x20, 0x20, 0x31, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x32, 0x35, 0x20, 0x20, 0x30, 0x20, 0x34, 0x34, 0x20, 0x20, 0x30, 0x20, 0x36, 0x30, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x36, 0x33, 0x20, 0x20, 0x30, 0x20, 0x33, 0x36, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x37, 0x30, 0x20, 0x20, 0x30, 0x20, 0x31, 0x37, 0x20, 0x20, 0x30, 0x20, 0x37, 0x31, 0x20, 0x0a, 0x31, 0x30, 0x20, 0x20, 0x31, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x37, 0x20, 0x20, 0x30, 0x20, 0x36, 0x34, 0x20, 0x20, 0x30, 0x20, 0x37, 0x32, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x32, 0x37, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x35, 0x33, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x33, 0x35, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x35, 0x34, 0x20, 0x20, 0x30, 0x20, 0x34, 0x35, 0x20, 0x20, 0x61, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x34, 0x36, 0x20, 0x20, 0x30, 0x20, 0x37, 0x33, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x33, 0x37, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x36, 0x35, 0x20, 0x20, 0x30, 0x20, 0x35, 0x36, 0x20, 0x20, 0x61, 0x20, 0x20, 0x31, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x35, 0x35, 0x20, 0x20, 0x30, 0x20, 0x35, 0x37, 0x20, 0x20, 0x30, 0x20, 0x37, 0x34, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x34, 0x37, 0x20, 0x20, 0x30, 0x20, 0x36, 0x36, 0x20, 0x0a, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x37, 0x35, 0x20, 0x20, 0x30, 0x20, 0x37, 0x36, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x36, 0x37, 0x20, 0x20, 0x30, 0x20, 0x37, 0x37, 0x20, 0x0a, 0x0a, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x20, 0x31, 0x32, 0x20, 0x31, 0x32, 0x37, 0x20, 0x20, 0x38, 0x20, 0x20, 0x38, 0x20, 0x20, 0x30, 0x0a, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x64, 0x61, 0x74, 0x61, 0x0a, 0x20, 0x63, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x30, 0x20, 0x20, 0x30, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x30, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x32, 0x30, 0x20, 0x20, 0x30, 0x20, 0x20, 0x32, 0x20, 0x0a, 0x31, 0x30, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x32, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x32, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x32, 0x32, 0x20, 0x20, 0x30, 0x20, 0x33, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x33, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x33, 0x30, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x33, 0x20, 0x20, 0x30, 0x20, 0x34, 0x30, 0x20, 0x31, 0x61, 0x20, 0x20, 0x31, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x33, 0x32, 0x20, 0x20, 0x30, 0x20, 0x32, 0x33, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x34, 0x31, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x33, 0x33, 0x20, 0x20, 0x61, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x34, 0x20, 0x20, 0x30, 0x20, 0x34, 0x32, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x32, 0x34, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x34, 0x20, 0x20, 0x30, 0x20, 0x35, 0x30, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x34, 0x33, 0x20, 0x20, 0x30, 0x20, 0x33, 0x34, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x35, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x35, 0x20, 0x31, 0x63, 0x20, 0x20, 0x31, 0x20, 0x20, 0x65, 0x20, 0x20, 0x31, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x35, 0x32, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x32, 0x35, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x35, 0x33, 0x20, 0x20, 0x30, 0x20, 0x33, 0x35, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x36, 0x30, 0x20, 0x20, 0x30, 0x20, 0x31, 0x36, 0x20, 0x20, 0x30, 0x20, 0x36, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x36, 0x32, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x32, 0x36, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x35, 0x20, 0x20, 0x30, 0x20, 0x20, 0x36, 0x20, 0x20, 0x30, 0x20, 0x34, 0x34, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x35, 0x34, 0x20, 0x20, 0x30, 0x20, 0x34, 0x35, 0x20, 0x31, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x61, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x36, 0x33, 0x20, 0x20, 0x30, 0x20, 0x33, 0x36, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x37, 0x30, 0x20, 0x20, 0x30, 0x20, 0x20, 0x37, 0x20, 0x20, 0x30, 0x20, 0x37, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x37, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x36, 0x34, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x34, 0x36, 0x20, 0x20, 0x30, 0x20, 0x37, 0x32, 0x20, 0x20, 0x61, 0x20, 0x20, 0x31, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x32, 0x37, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x35, 0x35, 0x20, 0x20, 0x30, 0x20, 0x37, 0x33, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x33, 0x37, 0x20, 0x20, 0x30, 0x20, 0x35, 0x36, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x36, 0x35, 0x20, 0x20, 0x30, 0x20, 0x37, 0x34, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x34, 0x37, 0x20, 0x20, 0x30, 0x20, 0x36, 0x36, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x37, 0x35, 0x20, 0x20, 0x30, 0x20, 0x35, 0x37, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x37, 0x36, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x36, 0x37, 0x20, 0x20, 0x30, 0x20, 0x37, 0x37, 0x20, 0x0a, 0x0a, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x20, 0x31, 0x33, 0x20, 0x35, 0x31, 0x31, 0x20, 0x31, 0x36, 0x20, 0x31, 0x36, 0x20, 0x20, 0x30, 0x0a, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x64, 0x61, 0x74, 0x61, 0x0a, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x30, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x30, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x31, 0x20, 0x31, 0x63, 0x20, 0x20, 0x31, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x32, 0x30, 0x20, 0x20, 0x30, 0x20, 0x20, 0x32, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x32, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x32, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x32, 0x32, 0x20, 0x20, 0x30, 0x20, 0x33, 0x30, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x33, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x33, 0x31, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x33, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x33, 0x32, 0x20, 0x20, 0x30, 0x20, 0x32, 0x33, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x34, 0x30, 0x20, 0x20, 0x30, 0x20, 0x20, 0x34, 0x20, 0x20, 0x30, 0x20, 0x34, 0x31, 0x20, 0x0a, 0x34, 0x36, 0x20, 0x20, 0x31, 0x20, 0x31, 0x63, 0x20, 0x20, 0x31, 0x20, 0x20, 0x65, 0x20, 0x20, 0x31, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x34, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x33, 0x33, 0x20, 0x20, 0x30, 0x20, 0x34, 0x32, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x32, 0x34, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x35, 0x30, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x34, 0x33, 0x20, 0x20, 0x30, 0x20, 0x33, 0x34, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x35, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x35, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x35, 0x20, 0x20, 0x30, 0x20, 0x35, 0x32, 0x20, 0x0a, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x32, 0x35, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x34, 0x34, 0x20, 0x20, 0x30, 0x20, 0x35, 0x33, 0x20, 0x20, 0x65, 0x20, 0x20, 0x31, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x36, 0x30, 0x20, 0x20, 0x30, 0x20, 0x20, 0x36, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x36, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x36, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x38, 0x30, 0x20, 0x20, 0x30, 0x20, 0x20, 0x38, 0x20, 0x20, 0x30, 0x20, 0x38, 0x31, 0x20, 0x31, 0x30, 0x20, 0x20, 0x31, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x33, 0x35, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x36, 0x32, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x32, 0x36, 0x20, 0x20, 0x30, 0x20, 0x35, 0x34, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x34, 0x35, 0x20, 0x20, 0x30, 0x20, 0x36, 0x33, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x33, 0x36, 0x20, 0x20, 0x30, 0x20, 0x37, 0x30, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x37, 0x20, 0x20, 0x30, 0x20, 0x35, 0x35, 0x20, 0x20, 0x30, 0x20, 0x37, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x37, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x32, 0x37, 0x20, 0x20, 0x30, 0x20, 0x33, 0x37, 0x20, 0x34, 0x38, 0x20, 0x20, 0x31, 0x20, 0x31, 0x38, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x63, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x38, 0x20, 0x20, 0x30, 0x20, 0x38, 0x32, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x32, 0x38, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x36, 0x34, 0x20, 0x20, 0x30, 0x20, 0x34, 0x36, 0x20, 0x20, 0x30, 0x20, 0x37, 0x32, 0x20, 0x0a, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x38, 0x34, 0x20, 0x20, 0x30, 0x20, 0x34, 0x38, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x39, 0x30, 0x20, 0x20, 0x30, 0x20, 0x20, 0x39, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x39, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x39, 0x20, 0x31, 0x38, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x65, 0x20, 0x20, 0x31, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x37, 0x33, 0x20, 0x20, 0x30, 0x20, 0x36, 0x35, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x35, 0x36, 0x20, 0x20, 0x30, 0x20, 0x37, 0x34, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x34, 0x37, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x36, 0x36, 0x20, 0x20, 0x30, 0x20, 0x38, 0x33, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x33, 0x38, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x37, 0x35, 0x20, 0x20, 0x30, 0x20, 0x35, 0x37, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x39, 0x32, 0x20, 0x20, 0x30, 0x20, 0x32, 0x39, 0x20, 0x20, 0x65, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x36, 0x37, 0x20, 0x20, 0x30, 0x20, 0x38, 0x35, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x35, 0x38, 0x20, 0x20, 0x30, 0x20, 0x33, 0x39, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x39, 0x33, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x34, 0x39, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x38, 0x36, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x61, 0x30, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x36, 0x38, 0x20, 0x20, 0x30, 0x20, 0x20, 0x61, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x61, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x61, 0x20, 0x34, 0x34, 0x20, 0x20, 0x31, 0x20, 0x31, 0x38, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x63, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x61, 0x32, 0x20, 0x20, 0x30, 0x20, 0x32, 0x61, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x39, 0x35, 0x20, 0x20, 0x30, 0x20, 0x35, 0x39, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x61, 0x33, 0x20, 0x20, 0x30, 0x20, 0x33, 0x61, 0x20, 0x0a, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x34, 0x61, 0x20, 0x20, 0x30, 0x20, 0x39, 0x36, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x62, 0x30, 0x20, 0x20, 0x30, 0x20, 0x20, 0x62, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x62, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x62, 0x20, 0x31, 0x34, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x62, 0x32, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x37, 0x36, 0x20, 0x20, 0x30, 0x20, 0x37, 0x37, 0x20, 0x20, 0x30, 0x20, 0x39, 0x34, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x38, 0x37, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x37, 0x38, 0x20, 0x20, 0x30, 0x20, 0x61, 0x34, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x36, 0x39, 0x20, 0x20, 0x30, 0x20, 0x61, 0x35, 0x20, 0x20, 0x30, 0x20, 0x32, 0x62, 0x20, 0x20, 0x63, 0x20, 0x20, 0x31, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x35, 0x61, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x38, 0x38, 0x20, 0x20, 0x30, 0x20, 0x62, 0x33, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x33, 0x62, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x37, 0x39, 0x20, 0x20, 0x30, 0x20, 0x61, 0x36, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x36, 0x61, 0x20, 0x20, 0x30, 0x20, 0x62, 0x34, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x63, 0x30, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x63, 0x20, 0x20, 0x30, 0x20, 0x39, 0x38, 0x20, 0x20, 0x30, 0x20, 0x63, 0x31, 0x20, 0x33, 0x63, 0x20, 0x20, 0x31, 0x20, 0x31, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x61, 0x20, 0x20, 0x31, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x63, 0x20, 0x0a, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x38, 0x39, 0x20, 0x20, 0x30, 0x20, 0x62, 0x35, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x35, 0x62, 0x20, 0x20, 0x30, 0x20, 0x63, 0x32, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x32, 0x63, 0x20, 0x20, 0x30, 0x20, 0x33, 0x63, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x62, 0x36, 0x20, 0x20, 0x30, 0x20, 0x36, 0x62, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x63, 0x34, 0x20, 0x20, 0x30, 0x20, 0x34, 0x63, 0x20, 0x31, 0x30, 0x20, 0x20, 0x31, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x61, 0x38, 0x20, 0x20, 0x30, 0x20, 0x38, 0x61, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x64, 0x30, 0x20, 0x20, 0x30, 0x20, 0x20, 0x64, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x64, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x34, 0x62, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x39, 0x37, 0x20, 0x20, 0x30, 0x20, 0x61, 0x37, 0x20, 0x20, 0x63, 0x20, 0x20, 0x31, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x63, 0x33, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x37, 0x61, 0x20, 0x20, 0x30, 0x20, 0x39, 0x39, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x63, 0x35, 0x20, 0x20, 0x30, 0x20, 0x35, 0x63, 0x20, 0x20, 0x30, 0x20, 0x62, 0x37, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x64, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x64, 0x32, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x32, 0x64, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x37, 0x62, 0x20, 0x20, 0x30, 0x20, 0x64, 0x33, 0x20, 0x33, 0x34, 0x20, 0x20, 0x31, 0x20, 0x31, 0x63, 0x20, 0x20, 0x31, 0x20, 0x20, 0x63, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x33, 0x64, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x63, 0x36, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x36, 0x63, 0x20, 0x20, 0x30, 0x20, 0x61, 0x39, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x39, 0x61, 0x20, 0x20, 0x30, 0x20, 0x64, 0x34, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x62, 0x38, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x38, 0x62, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x34, 0x64, 0x20, 0x20, 0x30, 0x20, 0x63, 0x37, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x37, 0x63, 0x20, 0x20, 0x30, 0x20, 0x64, 0x35, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x35, 0x64, 0x20, 0x20, 0x30, 0x20, 0x65, 0x30, 0x20, 0x20, 0x61, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x65, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x65, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x65, 0x20, 0x20, 0x30, 0x20, 0x32, 0x65, 0x20, 0x20, 0x30, 0x20, 0x65, 0x32, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x65, 0x33, 0x20, 0x20, 0x30, 0x20, 0x36, 0x64, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x38, 0x63, 0x20, 0x20, 0x30, 0x20, 0x65, 0x34, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x65, 0x35, 0x20, 0x20, 0x30, 0x20, 0x62, 0x61, 0x20, 0x20, 0x30, 0x20, 0x66, 0x30, 0x20, 0x32, 0x36, 0x20, 0x20, 0x31, 0x20, 0x31, 0x30, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x66, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x66, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x61, 0x61, 0x20, 0x20, 0x30, 0x20, 0x39, 0x62, 0x20, 0x20, 0x30, 0x20, 0x62, 0x39, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x33, 0x65, 0x20, 0x0a, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x64, 0x36, 0x20, 0x20, 0x30, 0x20, 0x63, 0x38, 0x20, 0x20, 0x63, 0x20, 0x20, 0x31, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x34, 0x65, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x64, 0x37, 0x20, 0x20, 0x30, 0x20, 0x37, 0x64, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x61, 0x62, 0x20, 0x0a, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x35, 0x65, 0x20, 0x20, 0x30, 0x20, 0x63, 0x39, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x66, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x39, 0x63, 0x20, 0x20, 0x30, 0x20, 0x36, 0x65, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x66, 0x32, 0x20, 0x20, 0x30, 0x20, 0x32, 0x66, 0x20, 0x0a, 0x32, 0x30, 0x20, 0x20, 0x31, 0x20, 0x31, 0x30, 0x20, 0x20, 0x31, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x64, 0x38, 0x20, 0x20, 0x30, 0x20, 0x38, 0x64, 0x20, 0x20, 0x30, 0x20, 0x33, 0x66, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x66, 0x33, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x65, 0x36, 0x20, 0x20, 0x30, 0x20, 0x63, 0x61, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x66, 0x34, 0x20, 0x20, 0x30, 0x20, 0x34, 0x66, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x62, 0x62, 0x20, 0x20, 0x30, 0x20, 0x61, 0x63, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x65, 0x37, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x66, 0x35, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x64, 0x39, 0x20, 0x20, 0x30, 0x20, 0x39, 0x64, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x35, 0x66, 0x20, 0x20, 0x30, 0x20, 0x65, 0x38, 0x20, 0x31, 0x65, 0x20, 0x20, 0x31, 0x20, 0x20, 0x63, 0x20, 0x20, 0x31, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x36, 0x66, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x66, 0x36, 0x20, 0x20, 0x30, 0x20, 0x63, 0x62, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x62, 0x63, 0x20, 0x20, 0x30, 0x20, 0x61, 0x64, 0x20, 0x20, 0x30, 0x20, 0x64, 0x61, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x66, 0x37, 0x20, 0x0a, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x37, 0x65, 0x20, 0x20, 0x30, 0x20, 0x37, 0x66, 0x20, 0x20, 0x30, 0x20, 0x38, 0x65, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x39, 0x65, 0x20, 0x20, 0x30, 0x20, 0x61, 0x65, 0x20, 0x20, 0x30, 0x20, 0x63, 0x63, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x66, 0x38, 0x20, 0x20, 0x30, 0x20, 0x38, 0x66, 0x20, 0x31, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x64, 0x62, 0x20, 0x20, 0x30, 0x20, 0x62, 0x64, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x65, 0x61, 0x20, 0x20, 0x30, 0x20, 0x66, 0x39, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x39, 0x66, 0x20, 0x20, 0x30, 0x20, 0x65, 0x62, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x62, 0x65, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x63, 0x64, 0x20, 0x20, 0x30, 0x20, 0x66, 0x61, 0x20, 0x20, 0x65, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x64, 0x64, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x65, 0x63, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x65, 0x39, 0x20, 0x20, 0x30, 0x20, 0x61, 0x66, 0x20, 0x20, 0x30, 0x20, 0x64, 0x63, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x63, 0x65, 0x20, 0x20, 0x30, 0x20, 0x66, 0x62, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x62, 0x66, 0x20, 0x20, 0x30, 0x20, 0x64, 0x65, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x63, 0x66, 0x20, 0x20, 0x30, 0x20, 0x65, 0x65, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x64, 0x66, 0x20, 0x20, 0x30, 0x20, 0x65, 0x66, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x66, 0x66, 0x20, 0x0a, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x65, 0x64, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x66, 0x64, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x66, 0x63, 0x20, 0x20, 0x30, 0x20, 0x66, 0x65, 0x20, 0x0a, 0x0a, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x20, 0x31, 0x34, 0x20, 0x20, 0x20, 0x30, 0x20, 0x20, 0x30, 0x20, 0x20, 0x30, 0x20, 0x20, 0x30, 0x0a, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x64, 0x61, 0x74, 0x61, 0x0a, 0x0a, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x20, 0x31, 0x35, 0x20, 0x35, 0x31, 0x31, 0x20, 0x31, 0x36, 0x20, 0x31, 0x36, 0x20, 0x20, 0x30, 0x0a, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x64, 0x61, 0x74, 0x61, 0x0a, 0x31, 0x30, 0x20, 0x20, 0x31, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x30, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x30, 0x20, 0x20, 0x30, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x32, 0x30, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x20, 0x32, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x32, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x32, 0x20, 0x33, 0x32, 0x20, 0x20, 0x31, 0x20, 0x31, 0x30, 0x20, 0x20, 0x31, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x32, 0x32, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x33, 0x30, 0x20, 0x20, 0x30, 0x20, 0x33, 0x31, 0x20, 0x0a, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x33, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x33, 0x20, 0x20, 0x30, 0x20, 0x34, 0x30, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x33, 0x32, 0x20, 0x20, 0x30, 0x20, 0x32, 0x33, 0x20, 0x20, 0x65, 0x20, 0x20, 0x31, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x34, 0x20, 0x20, 0x30, 0x20, 0x31, 0x34, 0x20, 0x20, 0x30, 0x20, 0x34, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x33, 0x33, 0x20, 0x20, 0x30, 0x20, 0x34, 0x32, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x32, 0x34, 0x20, 0x20, 0x30, 0x20, 0x34, 0x33, 0x20, 0x20, 0x61, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x33, 0x34, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x35, 0x30, 0x20, 0x20, 0x30, 0x20, 0x20, 0x35, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x35, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x35, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x35, 0x32, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x32, 0x35, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x34, 0x34, 0x20, 0x20, 0x30, 0x20, 0x35, 0x33, 0x20, 0x20, 0x30, 0x20, 0x36, 0x31, 0x20, 0x35, 0x61, 0x20, 0x20, 0x31, 0x20, 0x32, 0x34, 0x20, 0x20, 0x31, 0x20, 0x31, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x61, 0x20, 0x20, 0x31, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x33, 0x35, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x36, 0x30, 0x20, 0x20, 0x30, 0x20, 0x20, 0x36, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x36, 0x20, 0x20, 0x30, 0x20, 0x36, 0x32, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x32, 0x36, 0x20, 0x20, 0x30, 0x20, 0x35, 0x34, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x34, 0x35, 0x20, 0x20, 0x30, 0x20, 0x36, 0x33, 0x20, 0x20, 0x61, 0x20, 0x20, 0x31, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x33, 0x36, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x37, 0x30, 0x20, 0x20, 0x30, 0x20, 0x20, 0x37, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x37, 0x31, 0x20, 0x20, 0x30, 0x20, 0x35, 0x35, 0x20, 0x0a, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x37, 0x20, 0x20, 0x30, 0x20, 0x36, 0x34, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x37, 0x32, 0x20, 0x20, 0x30, 0x20, 0x32, 0x37, 0x20, 0x31, 0x38, 0x20, 0x20, 0x31, 0x20, 0x31, 0x30, 0x20, 0x20, 0x31, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x34, 0x36, 0x20, 0x20, 0x30, 0x20, 0x37, 0x33, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x33, 0x37, 0x20, 0x20, 0x30, 0x20, 0x36, 0x35, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x35, 0x36, 0x20, 0x20, 0x30, 0x20, 0x38, 0x30, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x38, 0x20, 0x20, 0x30, 0x20, 0x37, 0x34, 0x20, 0x0a, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x38, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x38, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x38, 0x32, 0x20, 0x20, 0x30, 0x20, 0x32, 0x38, 0x20, 0x31, 0x30, 0x20, 0x20, 0x31, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x34, 0x37, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x36, 0x36, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x38, 0x33, 0x20, 0x20, 0x30, 0x20, 0x33, 0x38, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x37, 0x35, 0x20, 0x20, 0x30, 0x20, 0x35, 0x37, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x38, 0x34, 0x20, 0x20, 0x30, 0x20, 0x34, 0x38, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x39, 0x30, 0x20, 0x20, 0x30, 0x20, 0x31, 0x39, 0x20, 0x20, 0x30, 0x20, 0x39, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x39, 0x32, 0x20, 0x20, 0x30, 0x20, 0x37, 0x36, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x36, 0x37, 0x20, 0x20, 0x30, 0x20, 0x32, 0x39, 0x20, 0x0a, 0x35, 0x63, 0x20, 0x20, 0x31, 0x20, 0x32, 0x34, 0x20, 0x20, 0x31, 0x20, 0x31, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x61, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x38, 0x35, 0x20, 0x20, 0x30, 0x20, 0x35, 0x38, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x39, 0x20, 0x20, 0x30, 0x20, 0x37, 0x37, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x39, 0x33, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x33, 0x39, 0x20, 0x20, 0x30, 0x20, 0x39, 0x34, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x34, 0x39, 0x20, 0x20, 0x30, 0x20, 0x38, 0x36, 0x20, 0x20, 0x61, 0x20, 0x20, 0x31, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x36, 0x38, 0x20, 0x0a, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x61, 0x30, 0x20, 0x20, 0x30, 0x20, 0x20, 0x61, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x61, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x61, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x61, 0x32, 0x20, 0x20, 0x30, 0x20, 0x32, 0x61, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x39, 0x35, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x35, 0x39, 0x20, 0x31, 0x61, 0x20, 0x20, 0x31, 0x20, 0x20, 0x65, 0x20, 0x20, 0x31, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x61, 0x33, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x33, 0x61, 0x20, 0x20, 0x30, 0x20, 0x38, 0x37, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x37, 0x38, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x61, 0x34, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x34, 0x61, 0x20, 0x20, 0x30, 0x20, 0x39, 0x36, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x36, 0x39, 0x20, 0x20, 0x30, 0x20, 0x62, 0x30, 0x20, 0x20, 0x30, 0x20, 0x62, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x31, 0x62, 0x20, 0x20, 0x30, 0x20, 0x61, 0x35, 0x20, 0x20, 0x30, 0x20, 0x62, 0x32, 0x20, 0x20, 0x65, 0x20, 0x20, 0x31, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x35, 0x61, 0x20, 0x20, 0x30, 0x20, 0x32, 0x62, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x38, 0x38, 0x20, 0x20, 0x30, 0x20, 0x39, 0x37, 0x20, 0x0a, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x62, 0x33, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x37, 0x39, 0x20, 0x20, 0x30, 0x20, 0x33, 0x62, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x36, 0x61, 0x20, 0x20, 0x30, 0x20, 0x62, 0x34, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x34, 0x62, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x63, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x39, 0x38, 0x20, 0x20, 0x30, 0x20, 0x38, 0x39, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x63, 0x20, 0x20, 0x30, 0x20, 0x62, 0x35, 0x20, 0x35, 0x30, 0x20, 0x20, 0x31, 0x20, 0x32, 0x32, 0x20, 0x20, 0x31, 0x20, 0x31, 0x30, 0x20, 0x20, 0x31, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x35, 0x62, 0x20, 0x20, 0x30, 0x20, 0x32, 0x63, 0x20, 0x20, 0x30, 0x20, 0x63, 0x32, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x62, 0x20, 0x20, 0x30, 0x20, 0x63, 0x30, 0x20, 0x20, 0x30, 0x20, 0x61, 0x36, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x61, 0x37, 0x20, 0x20, 0x30, 0x20, 0x37, 0x61, 0x20, 0x20, 0x61, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x63, 0x33, 0x20, 0x20, 0x30, 0x20, 0x33, 0x63, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x63, 0x20, 0x20, 0x30, 0x20, 0x39, 0x39, 0x20, 0x20, 0x30, 0x20, 0x62, 0x36, 0x20, 0x0a, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x36, 0x62, 0x20, 0x20, 0x30, 0x20, 0x63, 0x34, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x34, 0x63, 0x20, 0x20, 0x30, 0x20, 0x61, 0x38, 0x20, 0x31, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x61, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x38, 0x61, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x63, 0x35, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x64, 0x30, 0x20, 0x20, 0x30, 0x20, 0x35, 0x63, 0x20, 0x20, 0x30, 0x20, 0x64, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x62, 0x37, 0x20, 0x20, 0x30, 0x20, 0x37, 0x62, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x64, 0x20, 0x0a, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x64, 0x20, 0x20, 0x30, 0x20, 0x32, 0x64, 0x20, 0x20, 0x63, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x64, 0x32, 0x20, 0x20, 0x30, 0x20, 0x64, 0x33, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x33, 0x64, 0x20, 0x20, 0x30, 0x20, 0x63, 0x36, 0x20, 0x0a, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x36, 0x63, 0x20, 0x20, 0x30, 0x20, 0x61, 0x39, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x39, 0x61, 0x20, 0x20, 0x30, 0x20, 0x62, 0x38, 0x20, 0x20, 0x30, 0x20, 0x64, 0x34, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x38, 0x62, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x34, 0x64, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x63, 0x37, 0x20, 0x20, 0x30, 0x20, 0x37, 0x63, 0x20, 0x34, 0x34, 0x20, 0x20, 0x31, 0x20, 0x32, 0x32, 0x20, 0x20, 0x31, 0x20, 0x31, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x61, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x64, 0x35, 0x20, 0x20, 0x30, 0x20, 0x35, 0x64, 0x20, 0x0a, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x65, 0x30, 0x20, 0x20, 0x30, 0x20, 0x20, 0x65, 0x20, 0x20, 0x30, 0x20, 0x65, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x65, 0x20, 0x20, 0x30, 0x20, 0x65, 0x32, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x61, 0x61, 0x20, 0x20, 0x30, 0x20, 0x32, 0x65, 0x20, 0x0a, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x62, 0x39, 0x20, 0x20, 0x30, 0x20, 0x39, 0x62, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x65, 0x33, 0x20, 0x20, 0x30, 0x20, 0x64, 0x36, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x36, 0x64, 0x20, 0x20, 0x30, 0x20, 0x33, 0x65, 0x20, 0x0a, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x63, 0x38, 0x20, 0x20, 0x30, 0x20, 0x38, 0x63, 0x20, 0x31, 0x30, 0x20, 0x20, 0x31, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x65, 0x34, 0x20, 0x20, 0x30, 0x20, 0x34, 0x65, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x64, 0x37, 0x20, 0x20, 0x30, 0x20, 0x37, 0x64, 0x20, 0x0a, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x65, 0x35, 0x20, 0x20, 0x30, 0x20, 0x62, 0x61, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x61, 0x62, 0x20, 0x20, 0x30, 0x20, 0x35, 0x65, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x63, 0x39, 0x20, 0x20, 0x30, 0x20, 0x39, 0x63, 0x20, 0x0a, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x66, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x66, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x66, 0x30, 0x20, 0x20, 0x30, 0x20, 0x36, 0x65, 0x20, 0x20, 0x30, 0x20, 0x66, 0x32, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x32, 0x66, 0x20, 0x20, 0x30, 0x20, 0x65, 0x36, 0x20, 0x0a, 0x32, 0x36, 0x20, 0x20, 0x31, 0x20, 0x31, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x64, 0x38, 0x20, 0x20, 0x30, 0x20, 0x66, 0x33, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x33, 0x66, 0x20, 0x20, 0x30, 0x20, 0x66, 0x34, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x34, 0x66, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x38, 0x64, 0x20, 0x20, 0x30, 0x20, 0x64, 0x39, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x62, 0x62, 0x20, 0x20, 0x30, 0x20, 0x63, 0x61, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x61, 0x63, 0x20, 0x20, 0x30, 0x20, 0x65, 0x37, 0x20, 0x0a, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x37, 0x65, 0x20, 0x20, 0x30, 0x20, 0x66, 0x35, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x39, 0x64, 0x20, 0x20, 0x30, 0x20, 0x35, 0x66, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x65, 0x38, 0x20, 0x20, 0x30, 0x20, 0x38, 0x65, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x66, 0x36, 0x20, 0x20, 0x30, 0x20, 0x63, 0x62, 0x20, 0x32, 0x32, 0x20, 0x20, 0x31, 0x20, 0x31, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x61, 0x20, 0x20, 0x31, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x66, 0x20, 0x20, 0x30, 0x20, 0x61, 0x65, 0x20, 0x20, 0x30, 0x20, 0x36, 0x66, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x62, 0x63, 0x20, 0x20, 0x30, 0x20, 0x64, 0x61, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x61, 0x64, 0x20, 0x20, 0x30, 0x20, 0x66, 0x37, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x37, 0x66, 0x20, 0x20, 0x30, 0x20, 0x65, 0x39, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x39, 0x65, 0x20, 0x20, 0x30, 0x20, 0x63, 0x63, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x66, 0x38, 0x20, 0x20, 0x30, 0x20, 0x38, 0x66, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x64, 0x62, 0x20, 0x20, 0x30, 0x20, 0x62, 0x64, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x65, 0x61, 0x20, 0x20, 0x30, 0x20, 0x66, 0x39, 0x20, 0x0a, 0x31, 0x30, 0x20, 0x20, 0x31, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x39, 0x66, 0x20, 0x20, 0x30, 0x20, 0x64, 0x63, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x63, 0x64, 0x20, 0x20, 0x30, 0x20, 0x65, 0x62, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x62, 0x65, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x66, 0x61, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x61, 0x66, 0x20, 0x20, 0x30, 0x20, 0x64, 0x64, 0x20, 0x20, 0x65, 0x20, 0x20, 0x31, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x65, 0x63, 0x20, 0x20, 0x30, 0x20, 0x63, 0x65, 0x20, 0x20, 0x30, 0x20, 0x66, 0x62, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x62, 0x66, 0x20, 0x20, 0x30, 0x20, 0x65, 0x64, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x64, 0x65, 0x20, 0x20, 0x30, 0x20, 0x66, 0x63, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x63, 0x66, 0x20, 0x20, 0x30, 0x20, 0x66, 0x64, 0x20, 0x20, 0x30, 0x20, 0x65, 0x65, 0x20, 0x0a, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x64, 0x66, 0x20, 0x20, 0x30, 0x20, 0x66, 0x65, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x65, 0x66, 0x20, 0x20, 0x30, 0x20, 0x66, 0x66, 0x20, 0x0a, 0x0a, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x20, 0x31, 0x36, 0x20, 0x35, 0x31, 0x31, 0x20, 0x31, 0x36, 0x20, 0x31, 0x36, 0x20, 0x20, 0x31, 0x0a, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x64, 0x61, 0x74, 0x61, 0x0a, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x30, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x30, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x31, 0x20, 0x32, 0x61, 0x20, 0x20, 0x31, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x32, 0x30, 0x20, 0x20, 0x30, 0x20, 0x20, 0x32, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x32, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x32, 0x20, 0x20, 0x61, 0x20, 0x20, 0x31, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x32, 0x32, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x33, 0x30, 0x20, 0x20, 0x30, 0x20, 0x20, 0x33, 0x20, 0x0a, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x33, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x33, 0x20, 0x20, 0x61, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x33, 0x32, 0x20, 0x20, 0x30, 0x20, 0x32, 0x33, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x34, 0x30, 0x20, 0x20, 0x30, 0x20, 0x20, 0x34, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x34, 0x31, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x34, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x33, 0x33, 0x20, 0x20, 0x30, 0x20, 0x34, 0x32, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x32, 0x34, 0x20, 0x20, 0x30, 0x20, 0x35, 0x30, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x34, 0x33, 0x20, 0x20, 0x30, 0x20, 0x33, 0x34, 0x20, 0x38, 0x61, 0x20, 0x20, 0x31, 0x20, 0x32, 0x38, 0x20, 0x20, 0x31, 0x20, 0x31, 0x30, 0x20, 0x20, 0x31, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x35, 0x20, 0x20, 0x30, 0x20, 0x31, 0x35, 0x20, 0x20, 0x30, 0x20, 0x35, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x35, 0x32, 0x20, 0x20, 0x30, 0x20, 0x32, 0x35, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x34, 0x34, 0x20, 0x20, 0x30, 0x20, 0x33, 0x35, 0x20, 0x20, 0x30, 0x20, 0x35, 0x33, 0x20, 0x20, 0x61, 0x20, 0x20, 0x31, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x36, 0x30, 0x20, 0x20, 0x30, 0x20, 0x20, 0x36, 0x20, 0x20, 0x30, 0x20, 0x36, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x36, 0x20, 0x20, 0x30, 0x20, 0x36, 0x32, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x32, 0x36, 0x20, 0x20, 0x30, 0x20, 0x35, 0x34, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x34, 0x35, 0x20, 0x20, 0x30, 0x20, 0x36, 0x33, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x33, 0x36, 0x20, 0x20, 0x30, 0x20, 0x37, 0x30, 0x20, 0x20, 0x30, 0x20, 0x37, 0x31, 0x20, 0x32, 0x38, 0x20, 0x20, 0x31, 0x20, 0x31, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x37, 0x20, 0x0a, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x37, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x35, 0x35, 0x20, 0x20, 0x30, 0x20, 0x36, 0x34, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x37, 0x32, 0x20, 0x20, 0x30, 0x20, 0x32, 0x37, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x34, 0x36, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x36, 0x35, 0x20, 0x20, 0x30, 0x20, 0x37, 0x33, 0x20, 0x20, 0x61, 0x20, 0x20, 0x31, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x33, 0x37, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x35, 0x36, 0x20, 0x20, 0x30, 0x20, 0x20, 0x38, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x38, 0x30, 0x20, 0x20, 0x30, 0x20, 0x38, 0x31, 0x20, 0x0a, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x38, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x37, 0x34, 0x20, 0x20, 0x30, 0x20, 0x34, 0x37, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x38, 0x32, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x32, 0x38, 0x20, 0x20, 0x30, 0x20, 0x36, 0x36, 0x20, 0x31, 0x38, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x65, 0x20, 0x20, 0x31, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x38, 0x33, 0x20, 0x20, 0x30, 0x20, 0x33, 0x38, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x37, 0x35, 0x20, 0x20, 0x30, 0x20, 0x38, 0x34, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x34, 0x38, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x39, 0x30, 0x20, 0x20, 0x30, 0x20, 0x39, 0x31, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x39, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x39, 0x20, 0x20, 0x30, 0x20, 0x37, 0x36, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x39, 0x32, 0x20, 0x20, 0x30, 0x20, 0x32, 0x39, 0x20, 0x20, 0x65, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x38, 0x35, 0x20, 0x20, 0x30, 0x20, 0x35, 0x38, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x39, 0x33, 0x20, 0x20, 0x30, 0x20, 0x33, 0x39, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x61, 0x30, 0x20, 0x20, 0x30, 0x20, 0x20, 0x61, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x31, 0x61, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x61, 0x32, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x36, 0x37, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x35, 0x37, 0x20, 0x20, 0x30, 0x20, 0x34, 0x39, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x39, 0x34, 0x20, 0x0a, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x37, 0x37, 0x20, 0x20, 0x30, 0x20, 0x38, 0x36, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x61, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x36, 0x38, 0x20, 0x20, 0x30, 0x20, 0x39, 0x35, 0x20, 0x64, 0x63, 0x20, 0x20, 0x31, 0x20, 0x37, 0x65, 0x20, 0x20, 0x31, 0x20, 0x33, 0x32, 0x20, 0x20, 0x31, 0x20, 0x31, 0x61, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x63, 0x20, 0x20, 0x31, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x32, 0x61, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x35, 0x39, 0x20, 0x20, 0x30, 0x20, 0x33, 0x61, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x61, 0x33, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x38, 0x37, 0x20, 0x20, 0x30, 0x20, 0x37, 0x38, 0x20, 0x0a, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x61, 0x34, 0x20, 0x20, 0x30, 0x20, 0x34, 0x61, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x39, 0x36, 0x20, 0x20, 0x30, 0x20, 0x36, 0x39, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x62, 0x30, 0x20, 0x20, 0x30, 0x20, 0x20, 0x62, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x62, 0x31, 0x20, 0x20, 0x61, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x62, 0x20, 0x20, 0x30, 0x20, 0x62, 0x32, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x32, 0x62, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x61, 0x35, 0x20, 0x20, 0x30, 0x20, 0x35, 0x61, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x62, 0x33, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x61, 0x36, 0x20, 0x20, 0x30, 0x20, 0x36, 0x61, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x62, 0x34, 0x20, 0x20, 0x30, 0x20, 0x34, 0x62, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x63, 0x20, 0x20, 0x30, 0x20, 0x63, 0x31, 0x20, 0x0a, 0x31, 0x65, 0x20, 0x20, 0x31, 0x20, 0x20, 0x65, 0x20, 0x20, 0x31, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x62, 0x35, 0x20, 0x20, 0x30, 0x20, 0x63, 0x32, 0x20, 0x20, 0x30, 0x20, 0x32, 0x63, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x61, 0x37, 0x20, 0x20, 0x30, 0x20, 0x63, 0x33, 0x20, 0x0a, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x36, 0x62, 0x20, 0x20, 0x30, 0x20, 0x63, 0x34, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x64, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x38, 0x38, 0x20, 0x20, 0x30, 0x20, 0x39, 0x37, 0x20, 0x20, 0x30, 0x20, 0x33, 0x62, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x64, 0x31, 0x20, 0x20, 0x30, 0x20, 0x64, 0x32, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x32, 0x64, 0x20, 0x20, 0x30, 0x20, 0x64, 0x33, 0x20, 0x31, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x65, 0x20, 0x20, 0x30, 0x20, 0x32, 0x65, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x65, 0x32, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x37, 0x39, 0x20, 0x20, 0x30, 0x20, 0x39, 0x38, 0x20, 0x20, 0x30, 0x20, 0x63, 0x30, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x63, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x38, 0x39, 0x20, 0x20, 0x30, 0x20, 0x35, 0x62, 0x20, 0x0a, 0x20, 0x65, 0x20, 0x20, 0x31, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x33, 0x63, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x37, 0x61, 0x20, 0x20, 0x30, 0x20, 0x62, 0x36, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x34, 0x63, 0x20, 0x20, 0x30, 0x20, 0x39, 0x39, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x61, 0x38, 0x20, 0x20, 0x30, 0x20, 0x38, 0x61, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x64, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x63, 0x35, 0x20, 0x20, 0x30, 0x20, 0x35, 0x63, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x33, 0x64, 0x20, 0x20, 0x30, 0x20, 0x63, 0x36, 0x20, 0x0a, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x36, 0x63, 0x20, 0x20, 0x30, 0x20, 0x39, 0x61, 0x20, 0x35, 0x38, 0x20, 0x20, 0x31, 0x20, 0x35, 0x36, 0x20, 0x20, 0x31, 0x20, 0x32, 0x34, 0x20, 0x20, 0x31, 0x20, 0x31, 0x30, 0x20, 0x20, 0x31, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x38, 0x62, 0x20, 0x20, 0x30, 0x20, 0x34, 0x64, 0x20, 0x0a, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x63, 0x37, 0x20, 0x20, 0x30, 0x20, 0x37, 0x63, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x64, 0x35, 0x20, 0x20, 0x30, 0x20, 0x35, 0x64, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x65, 0x30, 0x20, 0x20, 0x30, 0x20, 0x20, 0x65, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x65, 0x33, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x64, 0x30, 0x20, 0x20, 0x30, 0x20, 0x62, 0x37, 0x20, 0x20, 0x30, 0x20, 0x37, 0x62, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x61, 0x39, 0x20, 0x20, 0x30, 0x20, 0x62, 0x38, 0x20, 0x20, 0x30, 0x20, 0x64, 0x34, 0x20, 0x0a, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x65, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x61, 0x61, 0x20, 0x20, 0x30, 0x20, 0x62, 0x39, 0x20, 0x31, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x61, 0x20, 0x20, 0x31, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x39, 0x62, 0x20, 0x20, 0x30, 0x20, 0x64, 0x36, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x36, 0x64, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x33, 0x65, 0x20, 0x20, 0x30, 0x20, 0x63, 0x38, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x38, 0x63, 0x20, 0x20, 0x30, 0x20, 0x65, 0x34, 0x20, 0x20, 0x30, 0x20, 0x34, 0x65, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x64, 0x37, 0x20, 0x20, 0x30, 0x20, 0x65, 0x35, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x62, 0x61, 0x20, 0x20, 0x30, 0x20, 0x61, 0x62, 0x20, 0x20, 0x63, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x39, 0x63, 0x20, 0x20, 0x30, 0x20, 0x65, 0x36, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x36, 0x65, 0x20, 0x20, 0x30, 0x20, 0x64, 0x38, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x38, 0x64, 0x20, 0x20, 0x30, 0x20, 0x62, 0x62, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x65, 0x37, 0x20, 0x20, 0x30, 0x20, 0x39, 0x64, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x65, 0x38, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x38, 0x65, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x63, 0x62, 0x20, 0x20, 0x30, 0x20, 0x62, 0x63, 0x20, 0x20, 0x30, 0x20, 0x39, 0x65, 0x20, 0x20, 0x30, 0x20, 0x66, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x66, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x66, 0x20, 0x20, 0x30, 0x20, 0x32, 0x66, 0x20, 0x0a, 0x34, 0x32, 0x20, 0x20, 0x31, 0x20, 0x33, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x66, 0x32, 0x20, 0x33, 0x34, 0x20, 0x20, 0x31, 0x20, 0x33, 0x32, 0x20, 0x20, 0x31, 0x20, 0x31, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x62, 0x64, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x35, 0x65, 0x20, 0x0a, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x37, 0x64, 0x20, 0x20, 0x30, 0x20, 0x63, 0x39, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x63, 0x61, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x61, 0x63, 0x20, 0x20, 0x30, 0x20, 0x37, 0x65, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x64, 0x61, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x61, 0x64, 0x20, 0x20, 0x30, 0x20, 0x63, 0x63, 0x20, 0x20, 0x61, 0x20, 0x20, 0x31, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x61, 0x65, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x64, 0x62, 0x20, 0x20, 0x30, 0x20, 0x64, 0x63, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x63, 0x64, 0x20, 0x20, 0x30, 0x20, 0x62, 0x65, 0x20, 0x0a, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x65, 0x62, 0x20, 0x20, 0x30, 0x20, 0x65, 0x64, 0x20, 0x20, 0x30, 0x20, 0x65, 0x65, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x64, 0x39, 0x20, 0x20, 0x30, 0x20, 0x65, 0x61, 0x20, 0x20, 0x30, 0x20, 0x65, 0x39, 0x20, 0x0a, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x64, 0x65, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x64, 0x64, 0x20, 0x20, 0x30, 0x20, 0x65, 0x63, 0x20, 0x20, 0x30, 0x20, 0x63, 0x65, 0x20, 0x20, 0x30, 0x20, 0x33, 0x66, 0x20, 0x20, 0x30, 0x20, 0x66, 0x30, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x66, 0x33, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x66, 0x34, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x34, 0x66, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x66, 0x35, 0x20, 0x20, 0x30, 0x20, 0x35, 0x66, 0x20, 0x20, 0x61, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x66, 0x66, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x66, 0x36, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x36, 0x66, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x66, 0x37, 0x20, 0x20, 0x30, 0x20, 0x37, 0x66, 0x20, 0x20, 0x63, 0x20, 0x20, 0x31, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x38, 0x66, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x66, 0x38, 0x20, 0x20, 0x30, 0x20, 0x66, 0x39, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x39, 0x66, 0x20, 0x20, 0x30, 0x20, 0x66, 0x61, 0x20, 0x20, 0x30, 0x20, 0x61, 0x66, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x66, 0x62, 0x20, 0x20, 0x30, 0x20, 0x62, 0x66, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x66, 0x63, 0x20, 0x20, 0x30, 0x20, 0x63, 0x66, 0x20, 0x0a, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x66, 0x64, 0x20, 0x20, 0x30, 0x20, 0x64, 0x66, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x66, 0x65, 0x20, 0x20, 0x30, 0x20, 0x65, 0x66, 0x20, 0x0a, 0x0a, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x20, 0x31, 0x37, 0x20, 0x35, 0x31, 0x31, 0x20, 0x31, 0x36, 0x20, 0x31, 0x36, 0x20, 0x20, 0x32, 0x0a, 0x2e, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x20, 0x31, 0x36, 0x0a, 0x0a, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x20, 0x31, 0x38, 0x20, 0x35, 0x31, 0x31, 0x20, 0x31, 0x36, 0x20, 0x31, 0x36, 0x20, 0x20, 0x33, 0x0a, 0x2e, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x20, 0x31, 0x36, 0x0a, 0x0a, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x20, 0x31, 0x39, 0x20, 0x35, 0x31, 0x31, 0x20, 0x31, 0x36, 0x20, 0x31, 0x36, 0x20, 0x20, 0x34, 0x0a, 0x2e, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x20, 0x31, 0x36, 0x0a, 0x0a, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x20, 0x32, 0x30, 0x20, 0x35, 0x31, 0x31, 0x20, 0x31, 0x36, 0x20, 0x31, 0x36, 0x20, 0x20, 0x36, 0x0a, 0x2e, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x20, 0x31, 0x36, 0x0a, 0x0a, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x20, 0x32, 0x31, 0x20, 0x35, 0x31, 0x31, 0x20, 0x31, 0x36, 0x20, 0x31, 0x36, 0x20, 0x20, 0x38, 0x0a, 0x2e, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x20, 0x31, 0x36, 0x0a, 0x0a, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x20, 0x32, 0x32, 0x20, 0x35, 0x31, 0x31, 0x20, 0x31, 0x36, 0x20, 0x31, 0x36, 0x20, 0x31, 0x30, 0x0a, 0x2e, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x20, 0x31, 0x36, 0x0a, 0x0a, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x20, 0x32, 0x33, 0x20, 0x35, 0x31, 0x31, 0x20, 0x31, 0x36, 0x20, 0x31, 0x36, 0x20, 0x31, 0x33, 0x0a, 0x2e, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x20, 0x31, 0x36, 0x0a, 0x0a, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x20, 0x32, 0x34, 0x20, 0x35, 0x31, 0x32, 0x20, 0x31, 0x36, 0x20, 0x31, 0x36, 0x20, 0x20, 0x34, 0x0a, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x64, 0x61, 0x74, 0x61, 0x0a, 0x33, 0x63, 0x20, 0x20, 0x31, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x30, 0x20, 0x20, 0x30, 0x20, 0x31, 0x30, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x31, 0x20, 0x20, 0x65, 0x20, 0x20, 0x31, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x32, 0x30, 0x20, 0x20, 0x30, 0x20, 0x20, 0x32, 0x20, 0x20, 0x30, 0x20, 0x32, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x32, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x32, 0x32, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x33, 0x30, 0x20, 0x20, 0x30, 0x20, 0x20, 0x33, 0x20, 0x20, 0x65, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x33, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x33, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x33, 0x32, 0x20, 0x20, 0x30, 0x20, 0x32, 0x33, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x34, 0x30, 0x20, 0x20, 0x30, 0x20, 0x20, 0x34, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x34, 0x31, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x34, 0x20, 0x20, 0x30, 0x20, 0x33, 0x33, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x34, 0x32, 0x20, 0x20, 0x30, 0x20, 0x32, 0x34, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x34, 0x33, 0x20, 0x20, 0x30, 0x20, 0x33, 0x34, 0x20, 0x20, 0x30, 0x20, 0x35, 0x31, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x35, 0x30, 0x20, 0x20, 0x30, 0x20, 0x20, 0x35, 0x20, 0x20, 0x30, 0x20, 0x31, 0x35, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x35, 0x32, 0x20, 0x20, 0x30, 0x20, 0x32, 0x35, 0x20, 0x0a, 0x66, 0x61, 0x20, 0x20, 0x31, 0x20, 0x36, 0x32, 0x20, 0x20, 0x31, 0x20, 0x32, 0x32, 0x20, 0x20, 0x31, 0x20, 0x31, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x61, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x34, 0x34, 0x20, 0x20, 0x30, 0x20, 0x35, 0x33, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x33, 0x35, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x36, 0x30, 0x20, 0x20, 0x30, 0x20, 0x20, 0x36, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x36, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x36, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x36, 0x32, 0x20, 0x20, 0x30, 0x20, 0x32, 0x36, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x35, 0x34, 0x20, 0x20, 0x30, 0x20, 0x34, 0x35, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x36, 0x33, 0x20, 0x20, 0x30, 0x20, 0x33, 0x36, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x37, 0x31, 0x20, 0x20, 0x30, 0x20, 0x35, 0x35, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x36, 0x34, 0x20, 0x20, 0x30, 0x20, 0x34, 0x36, 0x20, 0x0a, 0x32, 0x30, 0x20, 0x20, 0x31, 0x20, 0x20, 0x65, 0x20, 0x20, 0x31, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x37, 0x32, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x32, 0x37, 0x20, 0x20, 0x30, 0x20, 0x33, 0x37, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x37, 0x33, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x37, 0x30, 0x20, 0x20, 0x30, 0x20, 0x20, 0x37, 0x20, 0x20, 0x30, 0x20, 0x31, 0x37, 0x20, 0x20, 0x61, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x36, 0x35, 0x20, 0x20, 0x30, 0x20, 0x35, 0x36, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x38, 0x30, 0x20, 0x20, 0x30, 0x20, 0x20, 0x38, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x38, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x37, 0x34, 0x20, 0x20, 0x30, 0x20, 0x34, 0x37, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x38, 0x20, 0x20, 0x30, 0x20, 0x38, 0x32, 0x20, 0x31, 0x30, 0x20, 0x20, 0x31, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x32, 0x38, 0x20, 0x20, 0x30, 0x20, 0x36, 0x36, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x38, 0x33, 0x20, 0x20, 0x30, 0x20, 0x33, 0x38, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x37, 0x35, 0x20, 0x20, 0x30, 0x20, 0x35, 0x37, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x38, 0x34, 0x20, 0x20, 0x30, 0x20, 0x34, 0x38, 0x20, 0x0a, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x39, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x39, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x39, 0x32, 0x20, 0x20, 0x30, 0x20, 0x37, 0x36, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x36, 0x37, 0x20, 0x20, 0x30, 0x20, 0x32, 0x39, 0x20, 0x0a, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x38, 0x35, 0x20, 0x20, 0x30, 0x20, 0x35, 0x38, 0x20, 0x35, 0x63, 0x20, 0x20, 0x31, 0x20, 0x32, 0x32, 0x20, 0x20, 0x31, 0x20, 0x31, 0x30, 0x20, 0x20, 0x31, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x39, 0x33, 0x20, 0x20, 0x30, 0x20, 0x33, 0x39, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x39, 0x34, 0x20, 0x20, 0x30, 0x20, 0x34, 0x39, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x37, 0x37, 0x20, 0x20, 0x30, 0x20, 0x38, 0x36, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x36, 0x38, 0x20, 0x20, 0x30, 0x20, 0x61, 0x31, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x61, 0x32, 0x20, 0x20, 0x30, 0x20, 0x32, 0x61, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x39, 0x35, 0x20, 0x20, 0x30, 0x20, 0x35, 0x39, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x61, 0x33, 0x20, 0x20, 0x30, 0x20, 0x33, 0x61, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x38, 0x37, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x37, 0x38, 0x20, 0x20, 0x30, 0x20, 0x34, 0x61, 0x20, 0x31, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x63, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x61, 0x34, 0x20, 0x20, 0x30, 0x20, 0x39, 0x36, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x36, 0x39, 0x20, 0x20, 0x30, 0x20, 0x62, 0x31, 0x20, 0x0a, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x62, 0x20, 0x20, 0x30, 0x20, 0x61, 0x35, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x62, 0x32, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x35, 0x61, 0x20, 0x20, 0x30, 0x20, 0x32, 0x62, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x38, 0x38, 0x20, 0x20, 0x30, 0x20, 0x62, 0x33, 0x20, 0x0a, 0x31, 0x30, 0x20, 0x20, 0x31, 0x20, 0x20, 0x61, 0x20, 0x20, 0x31, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x39, 0x30, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x39, 0x20, 0x20, 0x30, 0x20, 0x61, 0x30, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x39, 0x37, 0x20, 0x20, 0x30, 0x20, 0x37, 0x39, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x61, 0x36, 0x20, 0x20, 0x30, 0x20, 0x36, 0x61, 0x20, 0x20, 0x30, 0x20, 0x62, 0x34, 0x20, 0x20, 0x63, 0x20, 0x20, 0x31, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x61, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x61, 0x20, 0x20, 0x30, 0x20, 0x62, 0x30, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x33, 0x62, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x62, 0x20, 0x20, 0x30, 0x20, 0x63, 0x30, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x34, 0x62, 0x20, 0x20, 0x30, 0x20, 0x63, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x39, 0x38, 0x20, 0x20, 0x30, 0x20, 0x38, 0x39, 0x20, 0x34, 0x33, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x32, 0x32, 0x20, 0x20, 0x31, 0x20, 0x31, 0x30, 0x20, 0x20, 0x31, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x63, 0x20, 0x20, 0x30, 0x20, 0x62, 0x35, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x35, 0x62, 0x20, 0x20, 0x30, 0x20, 0x63, 0x32, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x32, 0x63, 0x20, 0x20, 0x30, 0x20, 0x61, 0x37, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x37, 0x61, 0x20, 0x20, 0x30, 0x20, 0x63, 0x33, 0x20, 0x20, 0x61, 0x20, 0x20, 0x31, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x33, 0x63, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x63, 0x20, 0x20, 0x30, 0x20, 0x64, 0x30, 0x20, 0x0a, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x62, 0x36, 0x20, 0x20, 0x30, 0x20, 0x36, 0x62, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x63, 0x34, 0x20, 0x20, 0x30, 0x20, 0x34, 0x63, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x39, 0x39, 0x20, 0x20, 0x30, 0x20, 0x61, 0x38, 0x20, 0x31, 0x30, 0x20, 0x20, 0x31, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x38, 0x61, 0x20, 0x20, 0x30, 0x20, 0x63, 0x35, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x35, 0x63, 0x20, 0x20, 0x30, 0x20, 0x64, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x62, 0x37, 0x20, 0x20, 0x30, 0x20, 0x37, 0x62, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x31, 0x64, 0x20, 0x20, 0x30, 0x20, 0x64, 0x32, 0x20, 0x20, 0x39, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x32, 0x64, 0x20, 0x20, 0x30, 0x20, 0x64, 0x33, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x33, 0x64, 0x20, 0x20, 0x30, 0x20, 0x63, 0x36, 0x20, 0x35, 0x35, 0x20, 0x66, 0x61, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x36, 0x63, 0x20, 0x20, 0x30, 0x20, 0x61, 0x39, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x39, 0x61, 0x20, 0x20, 0x30, 0x20, 0x64, 0x34, 0x20, 0x32, 0x30, 0x20, 0x20, 0x31, 0x20, 0x31, 0x30, 0x20, 0x20, 0x31, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x62, 0x38, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x38, 0x62, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x34, 0x64, 0x20, 0x20, 0x30, 0x20, 0x63, 0x37, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x37, 0x63, 0x20, 0x20, 0x30, 0x20, 0x64, 0x35, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x35, 0x64, 0x20, 0x20, 0x30, 0x20, 0x65, 0x31, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x65, 0x20, 0x20, 0x30, 0x20, 0x65, 0x32, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x61, 0x61, 0x20, 0x20, 0x30, 0x20, 0x62, 0x39, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x39, 0x62, 0x20, 0x20, 0x30, 0x20, 0x65, 0x33, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x64, 0x36, 0x20, 0x20, 0x30, 0x20, 0x36, 0x64, 0x20, 0x31, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x61, 0x20, 0x20, 0x31, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x33, 0x65, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x32, 0x65, 0x20, 0x20, 0x30, 0x20, 0x34, 0x65, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x63, 0x38, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x38, 0x63, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x65, 0x34, 0x20, 0x20, 0x30, 0x20, 0x64, 0x37, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x37, 0x64, 0x20, 0x20, 0x30, 0x20, 0x61, 0x62, 0x20, 0x20, 0x30, 0x20, 0x65, 0x35, 0x20, 0x20, 0x61, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x62, 0x61, 0x20, 0x20, 0x30, 0x20, 0x35, 0x65, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x63, 0x39, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x39, 0x63, 0x20, 0x20, 0x30, 0x20, 0x36, 0x65, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x65, 0x36, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x20, 0x64, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x65, 0x30, 0x20, 0x20, 0x30, 0x20, 0x20, 0x65, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x64, 0x38, 0x20, 0x20, 0x30, 0x20, 0x38, 0x64, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x62, 0x62, 0x20, 0x20, 0x30, 0x20, 0x63, 0x61, 0x20, 0x34, 0x61, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x66, 0x66, 0x20, 0x34, 0x30, 0x20, 0x20, 0x31, 0x20, 0x33, 0x61, 0x20, 0x20, 0x31, 0x20, 0x32, 0x30, 0x20, 0x20, 0x31, 0x20, 0x31, 0x30, 0x20, 0x20, 0x31, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x61, 0x63, 0x20, 0x20, 0x30, 0x20, 0x65, 0x37, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x37, 0x65, 0x20, 0x20, 0x30, 0x20, 0x64, 0x39, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x39, 0x64, 0x20, 0x20, 0x30, 0x20, 0x65, 0x38, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x38, 0x65, 0x20, 0x20, 0x30, 0x20, 0x63, 0x62, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x62, 0x63, 0x20, 0x20, 0x30, 0x20, 0x64, 0x61, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x61, 0x64, 0x20, 0x20, 0x30, 0x20, 0x65, 0x39, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x39, 0x65, 0x20, 0x20, 0x30, 0x20, 0x63, 0x63, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x64, 0x62, 0x20, 0x20, 0x30, 0x20, 0x62, 0x64, 0x20, 0x0a, 0x31, 0x30, 0x20, 0x20, 0x31, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x65, 0x61, 0x20, 0x20, 0x30, 0x20, 0x61, 0x65, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x64, 0x63, 0x20, 0x20, 0x30, 0x20, 0x63, 0x64, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x65, 0x62, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x62, 0x65, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x64, 0x64, 0x20, 0x20, 0x30, 0x20, 0x65, 0x63, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x63, 0x65, 0x20, 0x20, 0x30, 0x20, 0x65, 0x64, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x64, 0x65, 0x20, 0x20, 0x30, 0x20, 0x65, 0x65, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x20, 0x66, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x66, 0x30, 0x20, 0x20, 0x30, 0x20, 0x31, 0x66, 0x20, 0x20, 0x30, 0x20, 0x66, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x66, 0x32, 0x20, 0x20, 0x30, 0x20, 0x32, 0x66, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x66, 0x33, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x33, 0x66, 0x20, 0x31, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x66, 0x34, 0x20, 0x20, 0x30, 0x20, 0x34, 0x66, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x66, 0x35, 0x20, 0x20, 0x30, 0x20, 0x35, 0x66, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x66, 0x36, 0x20, 0x20, 0x30, 0x20, 0x36, 0x66, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x66, 0x37, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x37, 0x66, 0x20, 0x20, 0x30, 0x20, 0x38, 0x66, 0x20, 0x20, 0x61, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x66, 0x38, 0x20, 0x20, 0x30, 0x20, 0x66, 0x39, 0x20, 0x0a, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x39, 0x66, 0x20, 0x20, 0x30, 0x20, 0x61, 0x66, 0x20, 0x20, 0x30, 0x20, 0x66, 0x61, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x66, 0x62, 0x20, 0x20, 0x30, 0x20, 0x62, 0x66, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x66, 0x63, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x63, 0x66, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x66, 0x64, 0x20, 0x20, 0x30, 0x20, 0x64, 0x66, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x66, 0x65, 0x20, 0x20, 0x30, 0x20, 0x65, 0x66, 0x20, 0x0a, 0x0a, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x20, 0x32, 0x35, 0x20, 0x35, 0x31, 0x32, 0x20, 0x31, 0x36, 0x20, 0x31, 0x36, 0x20, 0x20, 0x35, 0x0a, 0x2e, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x20, 0x32, 0x34, 0x0a, 0x0a, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x20, 0x32, 0x36, 0x20, 0x35, 0x31, 0x32, 0x20, 0x31, 0x36, 0x20, 0x31, 0x36, 0x20, 0x20, 0x36, 0x0a, 0x2e, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x20, 0x32, 0x34, 0x0a, 0x0a, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x20, 0x32, 0x37, 0x20, 0x35, 0x31, 0x32, 0x20, 0x31, 0x36, 0x20, 0x31, 0x36, 0x20, 0x20, 0x37, 0x0a, 0x2e, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x20, 0x32, 0x34, 0x0a, 0x0a, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x20, 0x32, 0x38, 0x20, 0x35, 0x31, 0x32, 0x20, 0x31, 0x36, 0x20, 0x31, 0x36, 0x20, 0x20, 0x38, 0x0a, 0x2e, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x20, 0x32, 0x34, 0x0a, 0x0a, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x20, 0x32, 0x39, 0x20, 0x35, 0x31, 0x32, 0x20, 0x31, 0x36, 0x20, 0x31, 0x36, 0x20, 0x20, 0x39, 0x0a, 0x2e, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x20, 0x32, 0x34, 0x0a, 0x0a, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x20, 0x33, 0x30, 0x20, 0x35, 0x31, 0x32, 0x20, 0x31, 0x36, 0x20, 0x31, 0x36, 0x20, 0x31, 0x31, 0x0a, 0x2e, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x20, 0x32, 0x34, 0x0a, 0x0a, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x20, 0x33, 0x31, 0x20, 0x35, 0x31, 0x32, 0x20, 0x31, 0x36, 0x20, 0x31, 0x36, 0x20, 0x31, 0x33, 0x0a, 0x2e, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x20, 0x32, 0x34, 0x0a, 0x0a, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x20, 0x33, 0x32, 0x20, 0x20, 0x33, 0x31, 0x20, 0x20, 0x31, 0x20, 0x31, 0x36, 0x20, 0x20, 0x30, 0x0a, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x64, 0x61, 0x74, 0x61, 0x0a, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x30, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x38, 0x20, 0x20, 0x30, 0x20, 0x20, 0x34, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x32, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x63, 0x20, 0x20, 0x30, 0x20, 0x20, 0x61, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x33, 0x20, 0x20, 0x30, 0x20, 0x20, 0x36, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x39, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x35, 0x20, 0x20, 0x30, 0x20, 0x20, 0x37, 0x20, 0x0a, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x65, 0x20, 0x20, 0x30, 0x20, 0x20, 0x64, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x66, 0x20, 0x20, 0x30, 0x20, 0x20, 0x62, 0x20, 0x0a, 0x0a, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x20, 0x33, 0x33, 0x20, 0x20, 0x33, 0x31, 0x20, 0x20, 0x31, 0x20, 0x31, 0x36, 0x20, 0x20, 0x30, 0x0a, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x64, 0x61, 0x74, 0x61, 0x0a, 0x31, 0x30, 0x20, 0x20, 0x31, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x30, 0x20, 0x20, 0x30, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x32, 0x20, 0x20, 0x30, 0x20, 0x20, 0x33, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x34, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x20, 0x35, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x36, 0x20, 0x20, 0x30, 0x20, 0x20, 0x37, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x38, 0x20, 0x20, 0x30, 0x20, 0x20, 0x39, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x61, 0x20, 0x20, 0x30, 0x20, 0x20, 0x62, 0x20, 0x0a, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x63, 0x20, 0x20, 0x30, 0x20, 0x20, 0x64, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x65, 0x20, 0x20, 0x30, 0x20, 0x20, 0x66, 0x20, 0x0a, 0x0a, 0x2e, 0x65, 0x6e, 0x64, 0x0a }; live/liveMedia/ADTSAudioFileServerMediaSubsession.cpp000444 001751 000000 00000004474 12265042432 023075 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A 'ServerMediaSubsession' object that creates new, unicast, "RTPSink"s // on demand, from an AAC audio file in ADTS format // Implementation #include "ADTSAudioFileServerMediaSubsession.hh" #include "ADTSAudioFileSource.hh" #include "MPEG4GenericRTPSink.hh" ADTSAudioFileServerMediaSubsession* ADTSAudioFileServerMediaSubsession::createNew(UsageEnvironment& env, char const* fileName, Boolean reuseFirstSource) { return new ADTSAudioFileServerMediaSubsession(env, fileName, reuseFirstSource); } ADTSAudioFileServerMediaSubsession ::ADTSAudioFileServerMediaSubsession(UsageEnvironment& env, char const* fileName, Boolean reuseFirstSource) : FileServerMediaSubsession(env, fileName, reuseFirstSource) { } ADTSAudioFileServerMediaSubsession ::~ADTSAudioFileServerMediaSubsession() { } FramedSource* ADTSAudioFileServerMediaSubsession ::createNewStreamSource(unsigned /*clientSessionId*/, unsigned& estBitrate) { estBitrate = 96; // kbps, estimate return ADTSAudioFileSource::createNew(envir(), fFileName); } RTPSink* ADTSAudioFileServerMediaSubsession ::createNewRTPSink(Groupsock* rtpGroupsock, unsigned char rtpPayloadTypeIfDynamic, FramedSource* inputSource) { ADTSAudioFileSource* adtsSource = (ADTSAudioFileSource*)inputSource; return MPEG4GenericRTPSink::createNew(envir(), rtpGroupsock, rtpPayloadTypeIfDynamic, adtsSource->samplingFrequency(), "audio", "AAC-hbr", adtsSource->configStr(), adtsSource->numChannels()); } live/liveMedia/MPEGVideoStreamParser.hh000444 001751 000000 00000007214 12265042432 020233 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // An abstract parser for MPEG video streams // C++ header #ifndef _MPEG_VIDEO_STREAM_PARSER_HH #define _MPEG_VIDEO_STREAM_PARSER_HH #ifndef _STREAM_PARSER_HH #include "StreamParser.hh" #endif #ifndef _MPEG_VIDEO_STREAM_FRAMER_HH #include "MPEGVideoStreamFramer.hh" #endif ////////// MPEGVideoStreamParser definition ////////// class MPEGVideoStreamParser: public StreamParser { public: MPEGVideoStreamParser(MPEGVideoStreamFramer* usingSource, FramedSource* inputSource); virtual ~MPEGVideoStreamParser(); public: void registerReadInterest(unsigned char* to, unsigned maxSize); virtual unsigned parse() = 0; // returns the size of the frame that was acquired, or 0 if none was // The number of truncated bytes (if any) is given by: unsigned numTruncatedBytes() const { return fNumTruncatedBytes; } protected: void setParseState() { fSavedTo = fTo; fSavedNumTruncatedBytes = fNumTruncatedBytes; saveParserState(); } // Record "byte" in the current output frame: void saveByte(u_int8_t byte) { if (fTo >= fLimit) { // there's no space left ++fNumTruncatedBytes; return; } *fTo++ = byte; } void save4Bytes(u_int32_t word) { if (fTo+4 > fLimit) { // there's no space left fNumTruncatedBytes += 4; return; } *fTo++ = word>>24; *fTo++ = word>>16; *fTo++ = word>>8; *fTo++ = word; } // Save data until we see a sync word (0x000001xx): void saveToNextCode(u_int32_t& curWord) { saveByte(curWord>>24); curWord = (curWord<<8)|get1Byte(); while ((curWord&0xFFFFFF00) != 0x00000100) { if ((unsigned)(curWord&0xFF) > 1) { // a sync word definitely doesn't begin anywhere in "curWord" save4Bytes(curWord); curWord = get4Bytes(); } else { // a sync word might begin in "curWord", although not at its start saveByte(curWord>>24); unsigned char newByte = get1Byte(); curWord = (curWord<<8)|newByte; } } } // Skip data until we see a sync word (0x000001xx): void skipToNextCode(u_int32_t& curWord) { curWord = (curWord<<8)|get1Byte(); while ((curWord&0xFFFFFF00) != 0x00000100) { if ((unsigned)(curWord&0xFF) > 1) { // a sync word definitely doesn't begin anywhere in "curWord" curWord = get4Bytes(); } else { // a sync word might begin in "curWord", although not at its start unsigned char newByte = get1Byte(); curWord = (curWord<<8)|newByte; } } } protected: MPEGVideoStreamFramer* fUsingSource; // state of the frame that's currently being read: unsigned char* fStartOfFrame; unsigned char* fTo; unsigned char* fLimit; unsigned fNumTruncatedBytes; unsigned curFrameSize() { return fTo - fStartOfFrame; } unsigned char* fSavedTo; unsigned fSavedNumTruncatedBytes; private: // redefined virtual functions virtual void restoreSavedParserState(); }; #endif live/liveMedia/ourMD5.hh000444 001751 000000 00000002560 12265042432 015275 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // Because MD5 may not be implemented (at least, with the same interface) on all systems, // we have our own implementation. // C++ header #ifndef _OUR_MD5_HH #define _OUR_MD5_HH extern char* our_MD5Data(unsigned char const* data, unsigned dataSize, char* outputDigest); // "outputDigest" must be either NULL (in which case this function returns a heap-allocated // buffer, which should be later delete[]d by the caller), or else it must point to // a (>=)33-byte buffer (which this function will also return). #endif live/liveMedia/Locale.cpp000444 001751 000000 00000003742 12265042432 015547 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // Support for temporarily setting the locale (e.g., to "C" or "POSIX") for (e.g.) parsing or printing // floating-point numbers in protocol headers, or calling toupper()/tolower() on human-input strings. // Implementation #include "Locale.hh" #include Locale::Locale(char const* newLocale, LocaleCategory category) { #ifndef LOCALE_NOT_USED #ifndef XLOCALE_NOT_USED int categoryMask; switch (category) { case All: { categoryMask = LC_ALL_MASK; break; } case Numeric: { categoryMask = LC_NUMERIC_MASK; break; } } fLocale = newlocale(categoryMask, newLocale, NULL); fPrevLocale = uselocale(fLocale); #else switch (category) { case All: { fCategoryNum = LC_ALL; break; } case Numeric: { fCategoryNum = LC_NUMERIC; break; } } fPrevLocale = strDup(setlocale(fCategoryNum, NULL)); setlocale(fCategoryNum, newLocale); #endif #endif } Locale::~Locale() { #ifndef LOCALE_NOT_USED #ifndef XLOCALE_NOT_USED if (fLocale != (locale_t)0) { uselocale(fPrevLocale); freelocale(fLocale); } #else if (fPrevLocale != NULL) { setlocale(fCategoryNum, fPrevLocale); delete[] fPrevLocale; } #endif #endif } live/liveMedia/MPEG2TransportStreamFromESSource.cpp000444 001751 000000 00000022124 12265042432 022503 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A filter for converting one or more MPEG Elementary Streams // to a MPEG-2 Transport Stream // Implementation #include "MPEG2TransportStreamFromESSource.hh" #define MAX_INPUT_ES_FRAME_SIZE 100000 #define SIMPLE_PES_HEADER_SIZE 14 #define LOW_WATER_MARK 1000 // <= MAX_INPUT_ES_FRAME_SIZE #define INPUT_BUFFER_SIZE (SIMPLE_PES_HEADER_SIZE + 2*MAX_INPUT_ES_FRAME_SIZE) ////////// InputESSourceRecord definition ////////// class InputESSourceRecord { public: InputESSourceRecord(MPEG2TransportStreamFromESSource& parent, FramedSource* inputSource, u_int8_t streamId, int mpegVersion, InputESSourceRecord* next); virtual ~InputESSourceRecord(); InputESSourceRecord* next() const { return fNext; } FramedSource* inputSource() const { return fInputSource; } void askForNewData(); Boolean deliverBufferToClient(); unsigned char* buffer() const { return fInputBuffer; } void reset() { // Reset the buffer for future use: fInputBufferBytesAvailable = 0; fInputBufferInUse = False; } private: static void afterGettingFrame(void* clientData, unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime, unsigned durationInMicroseconds); void afterGettingFrame1(unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime); private: InputESSourceRecord* fNext; MPEG2TransportStreamFromESSource& fParent; FramedSource* fInputSource; u_int8_t fStreamId; int fMPEGVersion; unsigned char* fInputBuffer; unsigned fInputBufferBytesAvailable; Boolean fInputBufferInUse; MPEG1or2Demux::SCR fSCR; }; ////////// MPEG2TransportStreamFromESSource implementation ////////// MPEG2TransportStreamFromESSource* MPEG2TransportStreamFromESSource ::createNew(UsageEnvironment& env) { return new MPEG2TransportStreamFromESSource(env); } void MPEG2TransportStreamFromESSource ::addNewVideoSource(FramedSource* inputSource, int mpegVersion) { u_int8_t streamId = 0xE0 | (fVideoSourceCounter++&0x0F); addNewInputSource(inputSource, streamId, mpegVersion); fHaveVideoStreams = True; } void MPEG2TransportStreamFromESSource ::addNewAudioSource(FramedSource* inputSource, int mpegVersion) { u_int8_t streamId = 0xC0 | (fAudioSourceCounter++&0x0F); addNewInputSource(inputSource, streamId, mpegVersion); } MPEG2TransportStreamFromESSource ::MPEG2TransportStreamFromESSource(UsageEnvironment& env) : MPEG2TransportStreamMultiplexor(env), fInputSources(NULL), fVideoSourceCounter(0), fAudioSourceCounter(0) { fHaveVideoStreams = False; // unless we add a video source } MPEG2TransportStreamFromESSource::~MPEG2TransportStreamFromESSource() { doStopGettingFrames(); delete fInputSources; } void MPEG2TransportStreamFromESSource::doStopGettingFrames() { // Stop each input source: for (InputESSourceRecord* sourceRec = fInputSources; sourceRec != NULL; sourceRec = sourceRec->next()) { sourceRec->inputSource()->stopGettingFrames(); } } void MPEG2TransportStreamFromESSource ::awaitNewBuffer(unsigned char* oldBuffer) { InputESSourceRecord* sourceRec; // Begin by resetting the old buffer: if (oldBuffer != NULL) { for (sourceRec = fInputSources; sourceRec != NULL; sourceRec = sourceRec->next()) { if (sourceRec->buffer() == oldBuffer) { sourceRec->reset(); break; } } } if (isCurrentlyAwaitingData()) { // Try to deliver one filled-in buffer to the client: for (sourceRec = fInputSources; sourceRec != NULL; sourceRec = sourceRec->next()) { if (sourceRec->deliverBufferToClient()) break; } } // No filled-in buffers are available. Ask each of our inputs for data: for (sourceRec = fInputSources; sourceRec != NULL; sourceRec = sourceRec->next()) { sourceRec->askForNewData(); } } void MPEG2TransportStreamFromESSource ::addNewInputSource(FramedSource* inputSource, u_int8_t streamId, int mpegVersion) { if (inputSource == NULL) return; fInputSources = new InputESSourceRecord(*this, inputSource, streamId, mpegVersion, fInputSources); } ////////// InputESSourceRecord implementation ////////// InputESSourceRecord ::InputESSourceRecord(MPEG2TransportStreamFromESSource& parent, FramedSource* inputSource, u_int8_t streamId, int mpegVersion, InputESSourceRecord* next) : fNext(next), fParent(parent), fInputSource(inputSource), fStreamId(streamId), fMPEGVersion(mpegVersion) { fInputBuffer = new unsigned char[INPUT_BUFFER_SIZE]; reset(); } InputESSourceRecord::~InputESSourceRecord() { Medium::close(fInputSource); delete[] fInputBuffer; delete fNext; } void InputESSourceRecord::askForNewData() { if (fInputBufferInUse) return; if (fInputBufferBytesAvailable == 0) { // Reset our buffer, by adding a simple PES header at the start: fInputBuffer[0] = 0; fInputBuffer[1] = 0; fInputBuffer[2] = 1; fInputBuffer[3] = fStreamId; fInputBuffer[4] = 0; fInputBuffer[5] = 0; // fill in later with the length fInputBuffer[6] = 0x80; fInputBuffer[7] = 0x80; // include a PTS fInputBuffer[8] = 5; // PES_header_data_length (enough for a PTS) // fInputBuffer[9..13] will be the PTS; fill this in later fInputBufferBytesAvailable = SIMPLE_PES_HEADER_SIZE; } if (fInputBufferBytesAvailable < LOW_WATER_MARK && !fInputSource->isCurrentlyAwaitingData()) { // We don't yet have enough data in our buffer. Arrange to read more: fInputSource->getNextFrame(&fInputBuffer[fInputBufferBytesAvailable], INPUT_BUFFER_SIZE-fInputBufferBytesAvailable, afterGettingFrame, this, FramedSource::handleClosure, &fParent); } } Boolean InputESSourceRecord::deliverBufferToClient() { if (fInputBufferInUse || fInputBufferBytesAvailable < LOW_WATER_MARK) return False; // Fill in the PES_packet_length field that we left unset before: unsigned PES_packet_length = fInputBufferBytesAvailable - 6; if (PES_packet_length > 0xFFFF) { // Set the PES_packet_length field to 0. This indicates an unbounded length (see ISO 13818-1, 2.4.3.7) PES_packet_length = 0; } fInputBuffer[4] = PES_packet_length>>8; fInputBuffer[5] = PES_packet_length; // Fill in the PES PTS (from our SCR): fInputBuffer[9] = 0x20|(fSCR.highBit<<3)|(fSCR.remainingBits>>29)|0x01; fInputBuffer[10] = fSCR.remainingBits>>22; fInputBuffer[11] = (fSCR.remainingBits>>14)|0x01; fInputBuffer[12] = fSCR.remainingBits>>7; fInputBuffer[13] = (fSCR.remainingBits<<1)|0x01; fInputBufferInUse = True; // Do the delivery: fParent.handleNewBuffer(fInputBuffer, fInputBufferBytesAvailable, fMPEGVersion, fSCR); return True; } void InputESSourceRecord ::afterGettingFrame(void* clientData, unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime, unsigned /*durationInMicroseconds*/) { InputESSourceRecord* source = (InputESSourceRecord*)clientData; source->afterGettingFrame1(frameSize, numTruncatedBytes, presentationTime); } void InputESSourceRecord ::afterGettingFrame1(unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime) { if (numTruncatedBytes > 0) { fParent.envir() << "MPEG2TransportStreamFromESSource: input buffer too small; increase \"MAX_INPUT_ES_FRAME_SIZE\" in \"MPEG2TransportStreamFromESSource\" by at least " << numTruncatedBytes << " bytes!\n"; } if (fInputBufferBytesAvailable == SIMPLE_PES_HEADER_SIZE) { // Use this presentationTime for our SCR: fSCR.highBit = ((presentationTime.tv_sec*45000 + (presentationTime.tv_usec*9)/200)& 0x80000000) != 0; fSCR.remainingBits = presentationTime.tv_sec*90000 + (presentationTime.tv_usec*9)/100; fSCR.extension = (presentationTime.tv_usec*9)%100; #ifdef DEBUG_SCR fprintf(stderr, "PES header: stream_id 0x%02x, pts: %u.%06u => SCR 0x%x%08x:%03x\n", fStreamId, (unsigned)presentationTime.tv_sec, (unsigned)presentationTime.tv_usec, fSCR.highBit, fSCR.remainingBits, fSCR.extension); #endif } fInputBufferBytesAvailable += frameSize; fParent.fPresentationTime = presentationTime; // Now that we have new input data, check if we can deliver to the client: fParent.awaitNewBuffer(NULL); } live/liveMedia/H263plusVideoStreamParser.cpp000444 001751 000000 00000105437 12265042432 021222 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // Author Bernhard Feiten // A filter that breaks up an H.263plus video stream into frames. // Based on MPEG4IP/mp4creator/h263.c #include "H263plusVideoStreamParser.hh" #include "H263plusVideoStreamFramer.hh" //#include //#include "GroupsockHelper.hh" H263plusVideoStreamParser::H263plusVideoStreamParser( H263plusVideoStreamFramer* usingSource, FramedSource* inputSource) : StreamParser(inputSource, FramedSource::handleClosure, usingSource, &H263plusVideoStreamFramer::continueReadProcessing, usingSource), fUsingSource(usingSource), fnextTR(0), fcurrentPT(0) { memset(fStates, 0, sizeof(fStates)); memset(&fNextInfo, 0, sizeof(fNextInfo)); memset(&fCurrentInfo, 0, sizeof(fCurrentInfo)); memset(&fMaxBitrateCtx, 0, sizeof(fMaxBitrateCtx)); memset(fNextHeader,0, H263_REQUIRE_HEADER_SIZE_BYTES); } /////////////////////////////////////////////////////////////////////////////// H263plusVideoStreamParser::~H263plusVideoStreamParser() { } /////////////////////////////////////////////////////////////////////////////// void H263plusVideoStreamParser::restoreSavedParserState() { StreamParser::restoreSavedParserState(); fTo = fSavedTo; fNumTruncatedBytes = fSavedNumTruncatedBytes; } /////////////////////////////////////////////////////////////////////////////// void H263plusVideoStreamParser::setParseState() { fSavedTo = fTo; fSavedNumTruncatedBytes = fNumTruncatedBytes; saveParserState(); // Needed for the parsing process in StreamParser } /////////////////////////////////////////////////////////////////////////////// void H263plusVideoStreamParser::registerReadInterest( unsigned char* to, unsigned maxSize) { fStartOfFrame = fTo = fSavedTo = to; fLimit = to + maxSize; fMaxSize = maxSize; fNumTruncatedBytes = fSavedNumTruncatedBytes = 0; } /////////////////////////////////////////////////////////////////////////////// // parse() , derived from H263Creator of MPEG4IP, h263.c unsigned H263plusVideoStreamParser::parse(u_int64_t & currentDuration) { // u_int8_t frameBuffer[H263_BUFFER_SIZE]; // The input buffer // Pointer which tells LoadNextH263Object where to read data to // u_int8_t* pFrameBuffer = fTo + H263_REQUIRE_HEADER_SIZE_BYTES; u_int32_t frameSize; // The current frame size // Pointer to receive address of the header data // u_int8_t* pCurrentHeader;// = pFrameBuffer; // u_int64_t currentDuration; // The current frame's duration u_int8_t trDifference; // The current TR difference // The previous TR difference // u_int8_t prevTrDifference = H263_BASIC_FRAME_RATE; // u_int64_t totalDuration = 0;// Duration accumulator // u_int64_t avgBitrate; // Average bitrate // u_int64_t totalBytes = 0; // Size accumulator try // The get data routines of the class FramedFilter returns an error when { // the buffer is empty. This occurs at the beginning and at the end of the file. fCurrentInfo = fNextInfo; // Parse 1 frame // For the first time, only the first frame's header is returned. // The second time the full first frame is returned frameSize = parseH263Frame(); currentDuration = 0; if ((frameSize > 0)){ // We were able to acquire a frame from the input. // Parse the returned frame header (if any) if (!ParseShortHeader(fTo, &fNextInfo)) { #ifdef DEBUG fprintf(stderr,"H263plusVideoStreamParser: Fatal error\n"); #endif } trDifference = GetTRDifference(fNextInfo.tr, fCurrentInfo.tr); // calculate the current frame duration currentDuration = CalculateDuration(trDifference); // Accumulate the frame's size and duration for avgBitrate calculation //totalDuration += currentDuration; //totalBytes += frameSize; // If needed, recalculate bitrate information // if (h263Bitrates) //GetMaxBitrate(&fMaxBitrateCtx, frameSize, prevTrDifference); //prevTrDifference = trDifference; setParseState(); // Needed for the parsing process in StreamParser } } catch (int /*e*/) { #ifdef DEBUG fprintf(stderr, "H263plusVideoStreamParser::parse() EXCEPTION (This is normal behavior - *not* an error)\n"); #endif frameSize=0; } return frameSize; } /////////////////////////////////////////////////////////////////////////////// // parseH263Frame derived from LoadNextH263Object of MPEG4IP // - service routine that reads a single frame from the input file. // It shall fill the input buffer with data up until - and including - the // next start code and shall report back both the number of bytes read and a // pointer to the next start code. The first call to this function shall only // yield a pointer with 0 data bytes and the last call to this function shall // only yield data bytes with a NULL pointer as the next header. // // TODO: This function only supports valid bit streams. Upon error, it fails // without the possibility to recover. A Better idea would be to skip frames // until a parsable frame is read from the file. // // Parameters: // ppNextHeader - output parameter that upon return points to the location // of the next frame's head in the buffer. // This pointer shall be NULL for the last frame read. // Returns the total number of bytes read. // Uses FrameFileSource intantiated by constructor. /////////////////////////////////////////////////////////////////////////////// int H263plusVideoStreamParser::parseH263Frame( ) { char row = 0; u_int8_t * bufferIndex = fTo; // The buffer end which will allow the loop to leave place for // the additionalBytesNeeded u_int8_t * bufferEnd = fTo + fMaxSize - ADDITIONAL_BYTES_NEEDED - 1; memcpy(fTo, fNextHeader, H263_REQUIRE_HEADER_SIZE_BYTES); bufferIndex += H263_REQUIRE_HEADER_SIZE_BYTES; // The state table and the following loop implements a state machine enabling // us to read bytes from the file until (and inclusing) the requested // start code (00 00 8X) is found // Initialize the states array, if it hasn't been initialized yet... if (!fStates[0][0]) { // One 00 was read fStates[0][0] = 1; // Two sequential 0x00 ware read fStates[1][0] = fStates[2][0] = 2; // A full start code was read fStates[2][128] = fStates[2][129] = fStates[2][130] = fStates[2][131] = -1; } // Read data from file into the output buffer until either a start code // is found, or the end of file has been reached. do { *bufferIndex = get1Byte(); } while ((bufferIndex < bufferEnd) && // We have place in the buffer ((row = fStates[(unsigned char)row][*(bufferIndex++)]) != -1)); // Start code was not found if (row != -1) { fprintf(stderr, "%s: Buffer too small (%lu)\n", "h263reader:", bufferEnd - fTo + ADDITIONAL_BYTES_NEEDED); return 0; } // Cool ... now we have a start code // Now we just have to read the additionalBytesNeeded getBytes(bufferIndex, ADDITIONAL_BYTES_NEEDED); memcpy(fNextHeader, bufferIndex - H263_STARTCODE_SIZE_BYTES, H263_REQUIRE_HEADER_SIZE_BYTES); int sz = bufferIndex - fTo - H263_STARTCODE_SIZE_BYTES; if (sz == 5) // first frame memcpy(fTo, fTo+H263_REQUIRE_HEADER_SIZE_BYTES, H263_REQUIRE_HEADER_SIZE_BYTES); return sz; } //////////////////////////////////////////////////////////////////////////////// // ParseShortHeader - service routine that accepts a buffer containing a frame // header and extracts relevant codec information from it. // // NOTE: the first bit in the following commnets is 0 (zero). // // 0 1 2 3 // 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ // | PSC (Picture Start Code=22 bits) | (TR=8 bits) | > // |0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0| |1 0> // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ // < (PTYPE=13 bits) | // <. . .|(FMT)|Z|. . . .| // +-+-+-+-+-+-+-+-+-+-+-+ // -> PTYPE.FMT contains a width/height identification // -> PTYPE.Z is 1 for P-Frames, 0 for I-Frames // Note: When FMT is 111, there is an extended PTYPE... // // Inputs: // headerBuffer - pointer to the current header buffer // outputInfoStruct - pointer to the structure receiving the data // Outputs: // This function returns a structure of important codec-specific // information (The Temporal Reference bits, width & height of the current // frame and the sync - or "frame type" - bit. It reports success or // failure to the calling function. //////////////////////////////////////////////////////////////////////////////// bool H263plusVideoStreamParser::ParseShortHeader( u_int8_t *headerBuffer, H263INFO *outputInfoStruct) { u_int8_t fmt = 0; // Extract temporal reference (TR) from the buffer (bits 22-29 inclusive) outputInfoStruct->tr = (headerBuffer[2] << 6) & 0xC0; // 2 LS bits out of the 3rd byte outputInfoStruct->tr |= (headerBuffer[3] >> 2) & 0x3F; // 6 MS bits out of the 4th byte // Extract the FMT part of PTYPE from the buffer (bits 35-37 inclusive) fmt = (headerBuffer[4] >> 2) & 0x07; // bits 3-5 ouf of the 5th byte // If PTYPE is not supported, return a failure notice to the calling function // FIXME: PLUSPTYPE is not supported if (fmt == 0x07) { return false; } // If PTYPE is supported, calculate the current width and height according to // a predefined table if (!GetWidthAndHeight(fmt, &(outputInfoStruct->width), &(outputInfoStruct->height))) { return false; } // Extract the frame-type bit, which is the 9th bit of PTYPE (bit 38) outputInfoStruct->isSyncFrame = !(headerBuffer[4] & 0x02); return true; } //////////////////////////////////////////////////////////////////////////////// // GetMaxBitrate- service routine that accepts frame information and // derives bitrate information from it. This function uses a sliding window // technique to calculate the maximum bitrates in any window of 1 second // inside the file. // The sliding window is implemented with a table of bitrates for the last // second (30 entries - one entry per TR unit). // // Inputs: // ctx - context for this function // frameSize - the size of the current frame in bytes // frameTRDiff - the "duration" of the frame in TR units // Outputs: // This function returns the up-to-date maximum bitrate //////////////////////////////////////////////////////////////////////////////// void H263plusVideoStreamParser::GetMaxBitrate( MaxBitrate_CTX *ctx, u_int32_t frameSize, u_int8_t frameTRDiff) { if (frameTRDiff == 0) return; // Calculate the current frame's bitrate as bits per TR unit (round the result // upwards) u_int32_t frameBitrate = frameSize * 8 / frameTRDiff + 1; // for each TRdiff received, while (frameTRDiff--) { // Subtract the oldest bitrate entry from the current bitrate ctx->windowBitrate -= ctx->bitrateTable[ctx->tableIndex]; // Update the oldest bitrate entry with the current frame's bitrate ctx->bitrateTable[ctx->tableIndex] = frameBitrate; // Add the current frame's bitrate to the current bitrate ctx->windowBitrate += frameBitrate; // Check if we have a new maximum bitrate if (ctx->windowBitrate > ctx->maxBitrate) { ctx->maxBitrate = ctx->windowBitrate; } // Advance the table index // Wrapping around the bitrateTable size ctx->tableIndex = (ctx->tableIndex + 1) % ( sizeof(ctx->bitrateTable) / sizeof(ctx->bitrateTable[0]) ); } } //////////////////////////////////////////////////////////////////////////////// // CalculateDuration - service routine that calculates the current frame's // duration in milli-seconds using it's duration in TR units. // - In order not to accumulate the calculation error, we are using the TR // duration to calculate the current and the next frame's presentation time in // milli-seconds. // // Inputs: trDiff - The current frame's duration in TR units // Return: The current frame's duration in milli-seconds //////////////////////////////////////////////////////////////////////////////// u_int64_t H263plusVideoStreamParser::CalculateDuration(u_int8_t trDiff) { u_int64_t nextPT; // The next frame's presentation time in milli-seconds u_int64_t duration; // The current frame's duration in milli-seconds fnextTR += trDiff; // Calculate the next frame's presentation time, in milli-seconds nextPT = (fnextTR * 1001) / H263_BASIC_FRAME_RATE; // The frame's duration is the difference between the next presentation // time and the current presentation time. duration = nextPT - fcurrentPT; // "Remember" the next presentation time for the next time this function is called fcurrentPT = nextPT; return duration; } //////////////////////////////////////////////////////////////////////////////// bool H263plusVideoStreamParser::GetWidthAndHeight( u_int8_t fmt, u_int16_t *width, u_int16_t *height) { // The 'fmt' corresponds to bits 5-7 of the PTYPE static struct { u_int16_t width; u_int16_t height; } const dimensionsTable[8] = { { 0, 0 }, // 000 - 0 - forbidden, generates an error { 128, 96 }, // 001 - 1 - Sub QCIF { 176, 144 }, // 010 - 2 - QCIF { 352, 288 }, // 011 - 3 - CIF { 704, 576 }, // 100 - 4 - 4CIF { 1409, 1152 }, // 101 - 5 - 16CIF { 0, 0 }, // 110 - 6 - reserved, generates an error { 0, 0 } // 111 - 7 - extended, not supported by profile 0 }; if (fmt > 7) return false; *width = dimensionsTable[fmt].width; *height = dimensionsTable[fmt].height; if (*width == 0) return false; return true; } //////////////////////////////////////////////////////////////////////////////// u_int8_t H263plusVideoStreamParser::GetTRDifference( u_int8_t nextTR, u_int8_t currentTR) { if (currentTR > nextTR) { // Wrap around 255... return nextTR + (256 - currentTR); } else { return nextTR - currentTR; } } //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// // this is the h263.c file of MPEG4IP mp4creator /* #include "mp4creator.h" // Default timescale for H.263 (1000ms) #define H263_TIMESCALE 1000 // Default H263 frame rate (30fps) #define H263_BASIC_FRAME_RATE 30 // Minimum number of bytes needed to parse an H263 header #define H263_REQUIRE_HEADER_SIZE_BYTES 5 // Number of bytes the start code requries #define H263_STARTCODE_SIZE_BYTES 3 // This is the input buffer's size. It should contain // 1 frame with the following start code #define H263_BUFFER_SIZE 256 * 1024 // The default max different (in %) betwqeen max and average bitrates #define H263_DEFAULT_CBR_TOLERANCE 10 // The following structure holds information extracted from each frame's header: typedef struct _H263INFO { u_int8_t tr; // Temporal Reference, used in duration calculation u_int16_t width; // Width of the picture u_int16_t height; // Height of the picture bool isSyncFrame; // Frame type (true = I frame = "sync" frame) } H263INFO; // Context for the GetMaxBitrate function typedef struct _MaxBitrate_CTX { u_int32_t bitrateTable[H263_BASIC_FRAME_RATE];// Window of 1 second u_int32_t windowBitrate; // The bitrate of the current window u_int32_t maxBitrate; // The up-to-date maximum bitrate u_int32_t tableIndex; // The next TR unit to update } MaxBitrate_CTX; // Forward declarations: static int LoadNextH263Object( FILE *inputFileHandle, u_int8_t *frameBuffer, u_int32_t *frameBufferSize, u_int32_t additionalBytesNeeded, u_int8_t **ppNextHeader); static bool ParseShortHeader( u_int8_t *headerBuffer, H263INFO *outputInfoStruct); static u_int8_t GetTRDifference(u_int8_t nextTR, u_int8_t currentTR); static void GetMaxBitrate( MaxBitrate_CTX *ctx, u_int32_t frameSize, u_int8_t frameTRDiff); static MP4Duration CalculateDuration(u_int8_t trDiff); static bool GetWidthAndHeight( u_int8_t fmt, u_int16_t *width, u_int16_t *height); static char states[3][256]; / * * H263Creator - Main function * Inputs: * outputFileHandle - The handle of the output file * inputFileHandle - The handle of the input file * Codec-specific parameters: * H263Level - H.263 Level used for this track * H263Profile - H.263 Profile used for this track * H263Bitrates - A Parameter indicating whether the function * should calculate H263 bitrates or not. * cbrTolerance - CBR tolerance indicates when to set the * average bitrate. * Outputs: * This function returns either the track ID of the newly added track upon * success or a predefined value representing an erroneous state. * / MP4TrackId H263Creator(MP4FileHandle outputFileHandle, FILE* inputFileHandle, u_int8_t h263Profile, u_int8_t h263Level, bool h263Bitrates, u_int8_t cbrTolerance) { H263INFO nextInfo; // Holds information about the next frame H263INFO currentInfo;// Holds information about the current frame MaxBitrate_CTX maxBitrateCtx;// Context for the GetMaxBitrate function memset(&nextInfo, 0, sizeof(nextInfo)); memset(¤tInfo, 0, sizeof(currentInfo)); memset(&maxBitrateCtx, 0, sizeof(maxBitrateCtx)); memset(states, 0, sizeof(states)); u_int8_t frameBuffer[H263_BUFFER_SIZE]; // The input buffer // Pointer which tells LoadNextH263Object where to read data to u_int8_t* pFrameBuffer = frameBuffer + H263_REQUIRE_HEADER_SIZE_BYTES; u_int32_t frameSize; // The current frame size // Pointer to receive address of the header data u_int8_t* pCurrentHeader = pFrameBuffer; MP4Duration currentDuration; // The current frame's duration u_int8_t trDifference; // The current TR difference // The previous TR difference u_int8_t prevTrDifference = H263_BASIC_FRAME_RATE; MP4Duration totalDuration = 0;// Duration accumulator MP4Duration avgBitrate; // Average bitrate u_int64_t totalBytes = 0; // Size accumulator MP4TrackId trackId = MP4_INVALID_TRACK_ID; // Our MP4 track bool stay = true; // loop flag while (stay) { currentInfo = nextInfo; memmove(frameBuffer, pCurrentHeader, H263_REQUIRE_HEADER_SIZE_BYTES); frameSize = H263_BUFFER_SIZE - H263_REQUIRE_HEADER_SIZE_BYTES; // Read 1 frame and the next frame's header from the file. // For the first frame, only the first frame's header is returned. // For the last frame, only the last frame's data is returned. if (! LoadNextH263Object(inputFileHandle, pFrameBuffer, &frameSize, H263_REQUIRE_HEADER_SIZE_BYTES - H263_STARTCODE_SIZE_BYTES, &pCurrentHeader)) break; // Fatal error ... if (pCurrentHeader) { // Parse the returned frame header (if any) if (!ParseShortHeader(pCurrentHeader, &nextInfo)) break; // Fatal error trDifference = GetTRDifference(nextInfo.tr, currentInfo.tr); } else { // This is the last frame ... we have to fake the trDifference ... trDifference = 1; // No header data has been read at this iteration, so we have to manually // add the frame's header we read at the previous iteration. // Note that LoadNextH263Object returns the number of bytes read, which // are the current frame's data and the next frame's header frameSize += H263_REQUIRE_HEADER_SIZE_BYTES; // There is no need for the next iteration ... stay = false; } // If this is the first iteration ... if (currentInfo.width == 0) { // If we have more data than just the header if ((frameSize > H263_REQUIRE_HEADER_SIZE_BYTES) || !pCurrentHeader) // Or no header at all break; // Fatal error else continue; // We have only the first frame's header ... } if (trackId == MP4_INVALID_TRACK_ID) { // If a track has not been added yet, add the track to the file. trackId = MP4AddH263VideoTrack(outputFileHandle, H263_TIMESCALE, 0, currentInfo.width, currentInfo.height, h263Level, h263Profile, 0, 0); if (trackId == MP4_INVALID_TRACK_ID) break; // Fatal error } // calculate the current frame duration currentDuration = CalculateDuration(trDifference); // Write the current frame to the file. if (!MP4WriteSample(outputFileHandle, trackId, frameBuffer, frameSize, currentDuration, 0, currentInfo.isSyncFrame)) break; // Fatal error // Accumulate the frame's size and duration for avgBitrate calculation totalDuration += currentDuration; totalBytes += frameSize; // If needed, recalculate bitrate information if (h263Bitrates) GetMaxBitrate(&maxBitrateCtx, frameSize, prevTrDifference); prevTrDifference = trDifference; } // while (stay) // If this is the last frame, if (!stay) { // If needed and possible, update bitrate information in the file if (h263Bitrates && totalDuration) { avgBitrate = (totalBytes * 8 * H263_TIMESCALE) / totalDuration; if (cbrTolerance == 0) cbrTolerance = H263_DEFAULT_CBR_TOLERANCE; // Same as: if (maxBitrate / avgBitrate > (cbrTolerance + 100) / 100.0) if (maxBitrateCtx.maxBitrate * 100 > (cbrTolerance + 100) * avgBitrate) avgBitrate = 0; MP4SetH263Bitrates(outputFileHandle, trackId, avgBitrate, maxBitrateCtx.maxBitrate); } // Return the newly added track ID return trackId; } // If we got to here... something went wrong ... fprintf(stderr, "%s: Could not parse input file, invalid video stream?\n", ProgName); // Upon failure, delete the newly added track if it has been added if (trackId != MP4_INVALID_TRACK_ID) { MP4DeleteTrack(outputFileHandle, trackId); } return MP4_INVALID_TRACK_ID; } / * * LoadNextH263Object - service routine that reads a single frame from the input * file. It shall fill the input buffer with data up until - and including - the * next start code and shall report back both the number of bytes read and a * pointer to the next start code. The first call to this function shall only * yield a pointer with 0 data bytes and the last call to this function shall * only yield data bytes with a NULL pointer as the next header. * * TODO: This function only supports valid bit streams. Upon error, it fails * without the possibility to recover. A Better idea would be to skip frames * until a parsable frame is read from the file. * * Parameters: * inputFileHandle - The handle of the input file * frameBuffer - buffer where to place read data * frameBufferSize - in/out parameter indicating the size of the buffer on * entry and the number of bytes copied to the buffer upon * return * additionalBytesNeeded - indicates how many additional bytes are to be read * from the next frame's header (over the 3 bytes that * are already read). * NOTE: This number MUST be > 0 * ppNextHeader - output parameter that upon return points to the location * of the next frame's head in the buffer * Outputs: * This function returns two pieces of information: * 1. The total number of bytes read. * 2. A Pointer to the header of the next frame. This pointer shall be NULL * for the last frame read. * / static int LoadNextH263Object( FILE *inputFileHandle, u_int8_t *frameBuffer, u_int32_t *frameBufferSize, u_int32_t additionalBytesNeeded, u_int8_t **ppNextHeader) { // This table and the following loop implements a state machine enabling // us to read bytes from the file untill (and inclusing) the requested // start code (00 00 8X) is found int8_t row = 0; u_int8_t *bufferStart = frameBuffer; // The buffer end which will allow the loop to leave place for // the additionalBytesNeeded u_int8_t *bufferEnd = frameBuffer + *frameBufferSize - additionalBytesNeeded - 1; // Initialize the states array, if it hasn't been initialized yet... if (!states[0][0]) { // One 00 was read states[0][0] = 1; // Two sequential 0x00 ware read states[1][0] = states[2][0] = 2; // A full start code was read states[2][128] = states[2][129] = states[2][130] = states[2][131] = -1; } // Read data from file into the output buffer until either a start code // is found, or the end of file has been reached. do { if (fread(frameBuffer, 1, 1, inputFileHandle) != 1){ // EOF or other error before we got a start code *ppNextHeader = NULL; *frameBufferSize = frameBuffer - bufferStart; return 1; } } while ((frameBuffer < bufferEnd) && // We have place in the buffer ((row = states[row][*(frameBuffer++)]) != -1)); // Start code was not found if (row != -1) { fprintf(stderr, "%s: Buffer too small (%u)\n", ProgName, bufferEnd - bufferStart + additionalBytesNeeded); return 0; } // Cool ... now we have a start code *ppNextHeader = frameBuffer - H263_STARTCODE_SIZE_BYTES; *frameBufferSize = frameBuffer - bufferStart + additionalBytesNeeded; // Now we just have to read the additionalBytesNeeded if(fread(frameBuffer, additionalBytesNeeded, 1, inputFileHandle) != 1) { /// We got a start code but can't read additionalBytesNeeded ... that's a fatal error fprintf(stderr, "%s: Invalid H263 bitstream\n", ProgName); return 0; } return 1; } / * * ParseShortHeader - service routine that accepts a buffer containing a frame * header and extracts relevant codec information from it. * * NOTE: the first bit in the following commnets is 0 (zero). * * * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | PSC (Picture Start Code=22 bits) | (TR=8 bits) | > * |0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0| |1 0> * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * < (PTYPE=13 bits) | * <. . .|(FMT)|Z|. . . .| * +-+-+-+-+-+-+-+-+-+-+-+ * -> PTYPE.FMT contains a width/height identification * -> PTYPE.Z is 1 for P-Frames, 0 for I-Frames * Note: When FMT is 111, there is an extended PTYPE... * * Inputs: * headerBuffer - pointer to the current header buffer * outputInfoStruct - pointer to the structure receiving the data * Outputs: * This function returns a structure of important codec-specific * information (The Temporal Reference bits, width & height of the current * frame and the sync - or "frame type" - bit. It reports success or * failure to the calling function. * / static bool ParseShortHeader( u_int8_t *headerBuffer, H263INFO *outputInfoStruct) { u_int8_t fmt = 0; // Extract temporal reference (TR) from the buffer (bits 22-29 inclusive) outputInfoStruct->tr = (headerBuffer[2] << 6) & 0xC0; // 2 LS bits out of the 3rd byte outputInfoStruct->tr |= (headerBuffer[3] >> 2) & 0x3F; // 6 MS bits out of the 4th byte // Extract the FMT part of PTYPE from the buffer (bits 35-37 inclusive) fmt = (headerBuffer[4] >> 2) & 0x07; // bits 3-5 ouf of the 5th byte // If PTYPE is not supported, return a failure notice to the calling function // FIXME: PLUSPTYPE is not supported if (fmt == 0x07) { return false; } // If PTYPE is supported, calculate the current width and height according to // a predefined table if (!GetWidthAndHeight(fmt, &(outputInfoStruct->width), &(outputInfoStruct->height))) { return false; } // Extract the frame-type bit, which is the 9th bit of PTYPE (bit 38) outputInfoStruct->isSyncFrame = !(headerBuffer[4] & 0x02); return true; } / * * GetMaxBitrate- service routine that accepts frame information and * derives bitrate information from it. This function uses a sliding window * technique to calculate the maximum bitrates in any window of 1 second * inside the file. * The sliding window is implemented with a table of bitrates for the last * second (30 entries - one entry per TR unit). * * Inputs: * ctx - context for this function * frameSize - the size of the current frame in bytes * frameTRDiff - the "duration" of the frame in TR units * Outputs: * This function returns the up-to-date maximum bitrate * / static void GetMaxBitrate( MaxBitrate_CTX *ctx, u_int32_t frameSize, u_int8_t frameTRDiff) { if (frameTRDiff == 0) return; // Calculate the current frame's bitrate as bits per TR unit (round the result // upwards) u_int32_t frameBitrate = frameSize * 8 / frameTRDiff + 1; // for each TRdiff received, while (frameTRDiff--) { // Subtract the oldest bitrate entry from the current bitrate ctx->windowBitrate -= ctx->bitrateTable[ctx->tableIndex]; // Update the oldest bitrate entry with the current frame's bitrate ctx->bitrateTable[ctx->tableIndex] = frameBitrate; // Add the current frame's bitrate to the current bitrate ctx->windowBitrate += frameBitrate; // Check if we have a new maximum bitrate if (ctx->windowBitrate > ctx->maxBitrate) { ctx->maxBitrate = ctx->windowBitrate; } // Advance the table index ctx->tableIndex = (ctx->tableIndex + 1) % // Wrapping around the bitrateTable size ( sizeof(ctx->bitrateTable) / sizeof(ctx->bitrateTable[0]) ); } } / * * CalculateDuration - service routine that calculates the current frame's * duration in milli-seconds using it's duration in TR units. * - In order not to accumulate the calculation error, we are using the TR * duration to calculate the current and the next frame's presentation time in * milli-seconds. * * Inputs: * trDiff - The current frame's duration in TR units * Outputs: * The current frame's duration in milli-seconds * / static MP4Duration CalculateDuration(u_int8_t trDiff) { static u_int32_t const nextTR = 0; // The next frame's presentation time in TR units static MP4Duration const currentPT = 0; // The current frame's presentation time in milli-seconds MP4Duration nextPT; // The next frame's presentation time in milli-seconds MP4Duration duration; // The current frame's duration in milli-seconds nextTR += trDiff; // Calculate the next frame's presentation time, in milli-seconds nextPT = (nextTR * 1001) / H263_BASIC_FRAME_RATE; // The frame's duration is the difference between the next presentation // time and the current presentation time. duration = nextPT - currentPT; // "Remember" the next presentation time for the next time this function is // called currentPT = nextPT; return duration; } static bool GetWidthAndHeight( u_int8_t fmt, u_int16_t *width, u_int16_t *height) { // The 'fmt' corresponds to bits 5-7 of the PTYPE static struct { u_int16_t width; u_int16_t height; } const dimensionsTable[8] = { { 0, 0 }, // 000 - 0 - forbidden, generates an error { 128, 96 }, // 001 - 1 - Sub QCIF { 176, 144 }, // 010 - 2 - QCIF { 352, 288 }, // 011 - 3 - CIF { 704, 576 }, // 100 - 4 - 4CIF { 1409, 1152 }, // 101 - 5 - 16CIF { 0, 0 }, // 110 - 6 - reserved, generates an error { 0, 0 } // 111 - 7 - extended, not supported by profile 0 }; if (fmt > 7) return false; *width = dimensionsTable[fmt].width; *height = dimensionsTable[fmt].height; if (*width == 0) return false; return true; } static u_int8_t GetTRDifference(u_int8_t nextTR, u_int8_t currentTR) { if (currentTR > nextTR) { // Wrap around 255... return nextTR + (256 - currentTR); } else { return nextTR - currentTR; } } */ live/liveMedia/H263plusVideoStreamFramer.cpp000444 001751 000000 00000011605 12265042432 021173 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // Author Bernhard Feiten // A filter that breaks up an H.263plus video stream into frames. // #include "H263plusVideoStreamFramer.hh" #include "H263plusVideoStreamParser.hh" #include #include /////////////////////////////////////////////////////////////////////////////// ////////// H263plusVideoStreamFramer implementation ////////// //public/////////////////////////////////////////////////////////////////////// H263plusVideoStreamFramer* H263plusVideoStreamFramer::createNew( UsageEnvironment& env, FramedSource* inputSource) { // Need to add source type checking here??? ##### H263plusVideoStreamFramer* fr; fr = new H263plusVideoStreamFramer(env, inputSource); return fr; } /////////////////////////////////////////////////////////////////////////////// H263plusVideoStreamFramer::H263plusVideoStreamFramer( UsageEnvironment& env, FramedSource* inputSource, Boolean createParser) : FramedFilter(env, inputSource), fFrameRate(0.0), // until we learn otherwise fPictureEndMarker(False) { // Use the current wallclock time as the base 'presentation time': gettimeofday(&fPresentationTimeBase, NULL); fParser = createParser ? new H263plusVideoStreamParser(this, inputSource) : NULL; } /////////////////////////////////////////////////////////////////////////////// H263plusVideoStreamFramer::~H263plusVideoStreamFramer() { delete fParser; } /////////////////////////////////////////////////////////////////////////////// void H263plusVideoStreamFramer::doGetNextFrame() { fParser->registerReadInterest(fTo, fMaxSize); continueReadProcessing(); } /////////////////////////////////////////////////////////////////////////////// Boolean H263plusVideoStreamFramer::isH263plusVideoStreamFramer() const { return True; } /////////////////////////////////////////////////////////////////////////////// void H263plusVideoStreamFramer::continueReadProcessing( void* clientData, unsigned char* /*ptr*/, unsigned /*size*/, struct timeval /*presentationTime*/) { H263plusVideoStreamFramer* framer = (H263plusVideoStreamFramer*)clientData; framer->continueReadProcessing(); } /////////////////////////////////////////////////////////////////////////////// void H263plusVideoStreamFramer::continueReadProcessing() { unsigned acquiredFrameSize; u_int64_t frameDuration; // in ms acquiredFrameSize = fParser->parse(frameDuration); // Calculate some average bitrate information (to be adapted) // avgBitrate = (totalBytes * 8 * H263_TIMESCALE) / totalDuration; if (acquiredFrameSize > 0) { // We were able to acquire a frame from the input. // It has already been copied to the reader's space. fFrameSize = acquiredFrameSize; // fNumTruncatedBytes = fParser->numTruncatedBytes(); // not needed so far fFrameRate = frameDuration == 0 ? 0.0 : 1000./(long)frameDuration; // Compute "fPresentationTime" if (acquiredFrameSize == 5) // first frame fPresentationTime = fPresentationTimeBase; else fPresentationTime.tv_usec += (long) frameDuration*1000; while (fPresentationTime.tv_usec >= 1000000) { fPresentationTime.tv_usec -= 1000000; ++fPresentationTime.tv_sec; } // Compute "fDurationInMicroseconds" fDurationInMicroseconds = (unsigned int) frameDuration*1000;; // Call our own 'after getting' function. Because we're not a 'leaf' // source, we can call this directly, without risking infinite recursion. afterGetting(this); } else { // We were unable to parse a complete frame from the input, because: // - we had to read more data from the source stream, or // - the source stream has ended. } } live/liveMedia/WAVAudioFileServerMediaSubsession.cpp000444 001751 000000 00000017603 12265042432 022775 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A 'ServerMediaSubsession' object that creates new, unicast, "RTPSink"s // on demand, from an WAV audio file. // Implementation #include "WAVAudioFileServerMediaSubsession.hh" #include "WAVAudioFileSource.hh" #include "uLawAudioFilter.hh" #include "SimpleRTPSink.hh" WAVAudioFileServerMediaSubsession* WAVAudioFileServerMediaSubsession ::createNew(UsageEnvironment& env, char const* fileName, Boolean reuseFirstSource, Boolean convertToULaw) { return new WAVAudioFileServerMediaSubsession(env, fileName, reuseFirstSource, convertToULaw); } WAVAudioFileServerMediaSubsession ::WAVAudioFileServerMediaSubsession(UsageEnvironment& env, char const* fileName, Boolean reuseFirstSource, Boolean convertToULaw) : FileServerMediaSubsession(env, fileName, reuseFirstSource), fConvertToULaw(convertToULaw) { } WAVAudioFileServerMediaSubsession ::~WAVAudioFileServerMediaSubsession() { } void WAVAudioFileServerMediaSubsession ::seekStreamSource(FramedSource* inputSource, double& seekNPT, double streamDuration, u_int64_t& numBytes) { WAVAudioFileSource* wavSource; if (fBitsPerSample > 8) { // "inputSource" is a filter; its input source is the original WAV file source: wavSource = (WAVAudioFileSource*)(((FramedFilter*)inputSource)->inputSource()); } else { // "inputSource" is the original WAV file source: wavSource = (WAVAudioFileSource*)inputSource; } unsigned seekSampleNumber = (unsigned)(seekNPT*fSamplingFrequency); unsigned seekByteNumber = seekSampleNumber*((fNumChannels*fBitsPerSample)/8); unsigned numDurationSamples = (unsigned)(streamDuration*fSamplingFrequency); unsigned numDurationBytes = numDurationSamples*((fNumChannels*fBitsPerSample)/8); numBytes = (u_int64_t)numDurationBytes; wavSource->seekToPCMByte(seekByteNumber, numDurationBytes); } void WAVAudioFileServerMediaSubsession ::setStreamSourceScale(FramedSource* inputSource, float scale) { int iScale = (int)scale; WAVAudioFileSource* wavSource; if (fBitsPerSample > 8) { // "inputSource" is a filter; its input source is the original WAV file source: wavSource = (WAVAudioFileSource*)(((FramedFilter*)inputSource)->inputSource()); } else { // "inputSource" is the original WAV file source: wavSource = (WAVAudioFileSource*)inputSource; } wavSource->setScaleFactor(iScale); } FramedSource* WAVAudioFileServerMediaSubsession ::createNewStreamSource(unsigned /*clientSessionId*/, unsigned& estBitrate) { FramedSource* resultSource = NULL; do { WAVAudioFileSource* wavSource = WAVAudioFileSource::createNew(envir(), fFileName); if (wavSource == NULL) break; // Get attributes of the audio source: fAudioFormat = wavSource->getAudioFormat(); fBitsPerSample = wavSource->bitsPerSample(); // We handle only 4,8,16,20,24 bits-per-sample audio: if (fBitsPerSample%4 != 0 || fBitsPerSample < 4 || fBitsPerSample > 24 || fBitsPerSample == 12) { envir() << "The input file contains " << fBitsPerSample << " bit-per-sample audio, which we don't handle\n"; break; } fSamplingFrequency = wavSource->samplingFrequency(); fNumChannels = wavSource->numChannels(); unsigned bitsPerSecond = fSamplingFrequency*fBitsPerSample*fNumChannels; fFileDuration = (float)((8.0*wavSource->numPCMBytes())/(fSamplingFrequency*fNumChannels*fBitsPerSample)); // Add in any filter necessary to transform the data prior to streaming: resultSource = wavSource; // by default if (fAudioFormat == WA_PCM) { if (fBitsPerSample == 16) { // Note that samples in the WAV audio file are in little-endian order. if (fConvertToULaw) { // Add a filter that converts from raw 16-bit PCM audio to 8-bit u-law audio: resultSource = uLawFromPCMAudioSource::createNew(envir(), wavSource, 1/*little-endian*/); bitsPerSecond /= 2; } else { // Add a filter that converts from little-endian to network (big-endian) order: resultSource = EndianSwap16::createNew(envir(), wavSource); } } else if (fBitsPerSample == 20 || fBitsPerSample == 24) { // Add a filter that converts from little-endian to network (big-endian) order: resultSource = EndianSwap24::createNew(envir(), wavSource); } } estBitrate = (bitsPerSecond+500)/1000; // kbps return resultSource; } while (0); // An error occurred: Medium::close(resultSource); return NULL; } RTPSink* WAVAudioFileServerMediaSubsession ::createNewRTPSink(Groupsock* rtpGroupsock, unsigned char rtpPayloadTypeIfDynamic, FramedSource* /*inputSource*/) { do { char const* mimeType; unsigned char payloadFormatCode = rtpPayloadTypeIfDynamic; // by default, unless a static RTP payload type can be used if (fAudioFormat == WA_PCM) { if (fBitsPerSample == 16) { if (fConvertToULaw) { mimeType = "PCMU"; if (fSamplingFrequency == 8000 && fNumChannels == 1) { payloadFormatCode = 0; // a static RTP payload type } } else { mimeType = "L16"; if (fSamplingFrequency == 44100 && fNumChannels == 2) { payloadFormatCode = 10; // a static RTP payload type } else if (fSamplingFrequency == 44100 && fNumChannels == 1) { payloadFormatCode = 11; // a static RTP payload type } } } else if (fBitsPerSample == 20) { mimeType = "L20"; } else if (fBitsPerSample == 24) { mimeType = "L24"; } else { // fBitsPerSample == 8 (we assume that fBitsPerSample == 4 is only for WA_IMA_ADPCM) mimeType = "L8"; } } else if (fAudioFormat == WA_PCMU) { mimeType = "PCMU"; if (fSamplingFrequency == 8000 && fNumChannels == 1) { payloadFormatCode = 0; // a static RTP payload type } } else if (fAudioFormat == WA_PCMA) { mimeType = "PCMA"; if (fSamplingFrequency == 8000 && fNumChannels == 1) { payloadFormatCode = 8; // a static RTP payload type } } else if (fAudioFormat == WA_IMA_ADPCM) { mimeType = "DVI4"; // Use a static payload type, if one is defined: if (fNumChannels == 1) { if (fSamplingFrequency == 8000) { payloadFormatCode = 5; // a static RTP payload type } else if (fSamplingFrequency == 16000) { payloadFormatCode = 6; // a static RTP payload type } else if (fSamplingFrequency == 11025) { payloadFormatCode = 16; // a static RTP payload type } else if (fSamplingFrequency == 22050) { payloadFormatCode = 17; // a static RTP payload type } } } else { //unknown format break; } return SimpleRTPSink::createNew(envir(), rtpGroupsock, payloadFormatCode, fSamplingFrequency, "audio", mimeType, fNumChannels); } while (0); // An error occurred: return NULL; } void WAVAudioFileServerMediaSubsession::testScaleFactor(float& scale) { if (fFileDuration <= 0.0) { // The file is non-seekable, so is probably a live input source. // We don't support scale factors other than 1 scale = 1; } else { // We support any integral scale, other than 0 int iScale = scale < 0.0 ? (int)(scale - 0.5) : (int)(scale + 0.5); // round if (iScale == 0) iScale = 1; scale = (float)iScale; } } float WAVAudioFileServerMediaSubsession::duration() const { return fFileDuration; } live/liveMedia/H264VideoStreamDiscreteFramer.cpp000444 001751 000000 00000003227 12265042432 021754 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A simplified version of "H264VideoStreamFramer" that takes only complete, // discrete frames (rather than an arbitrary byte stream) as input. // This avoids the parsing and data copying overhead of the full // "H264VideoStreamFramer". // Implementation #include "H264VideoStreamDiscreteFramer.hh" H264VideoStreamDiscreteFramer* H264VideoStreamDiscreteFramer::createNew(UsageEnvironment& env, FramedSource* inputSource) { return new H264VideoStreamDiscreteFramer(env, inputSource); } H264VideoStreamDiscreteFramer ::H264VideoStreamDiscreteFramer(UsageEnvironment& env, FramedSource* inputSource) : H264or5VideoStreamDiscreteFramer(264, env, inputSource) { } H264VideoStreamDiscreteFramer::~H264VideoStreamDiscreteFramer() { } Boolean H264VideoStreamDiscreteFramer::isH264VideoStreamFramer() const { return True; } live/liveMedia/MPEG1or2DemuxedServerMediaSubsession.cpp000444 001751 000000 00000012523 12265042432 023322 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A 'ServerMediaSubsession' object that creates new, unicast, "RTPSink"s // on demand, from a MPEG-1 or 2 demuxer. // Implementation #include "MPEG1or2DemuxedServerMediaSubsession.hh" #include "MPEG1or2AudioStreamFramer.hh" #include "MPEG1or2AudioRTPSink.hh" #include "MPEG1or2VideoStreamFramer.hh" #include "MPEG1or2VideoRTPSink.hh" #include "AC3AudioStreamFramer.hh" #include "AC3AudioRTPSink.hh" #include "ByteStreamFileSource.hh" MPEG1or2DemuxedServerMediaSubsession* MPEG1or2DemuxedServerMediaSubsession ::createNew(MPEG1or2FileServerDemux& demux, u_int8_t streamIdTag, Boolean reuseFirstSource, Boolean iFramesOnly, double vshPeriod) { return new MPEG1or2DemuxedServerMediaSubsession(demux, streamIdTag, reuseFirstSource, iFramesOnly, vshPeriod); } MPEG1or2DemuxedServerMediaSubsession ::MPEG1or2DemuxedServerMediaSubsession(MPEG1or2FileServerDemux& demux, u_int8_t streamIdTag, Boolean reuseFirstSource, Boolean iFramesOnly, double vshPeriod) : OnDemandServerMediaSubsession(demux.envir(), reuseFirstSource), fOurDemux(demux), fStreamIdTag(streamIdTag), fIFramesOnly(iFramesOnly), fVSHPeriod(vshPeriod) { } MPEG1or2DemuxedServerMediaSubsession::~MPEG1or2DemuxedServerMediaSubsession() { } FramedSource* MPEG1or2DemuxedServerMediaSubsession ::createNewStreamSource(unsigned clientSessionId, unsigned& estBitrate) { FramedSource* es = NULL; do { es = fOurDemux.newElementaryStream(clientSessionId, fStreamIdTag); if (es == NULL) break; if ((fStreamIdTag&0xF0) == 0xC0 /*MPEG audio*/) { estBitrate = 128; // kbps, estimate return MPEG1or2AudioStreamFramer::createNew(envir(), es); } else if ((fStreamIdTag&0xF0) == 0xE0 /*video*/) { estBitrate = 500; // kbps, estimate return MPEG1or2VideoStreamFramer::createNew(envir(), es, fIFramesOnly, fVSHPeriod); } else if (fStreamIdTag == 0xBD /*AC-3 audio*/) { estBitrate = 192; // kbps, estimate return AC3AudioStreamFramer::createNew(envir(), es, 0x80); } else { // unknown stream type break; } } while (0); // An error occurred: Medium::close(es); return NULL; } RTPSink* MPEG1or2DemuxedServerMediaSubsession ::createNewRTPSink(Groupsock* rtpGroupsock, unsigned char rtpPayloadTypeIfDynamic, FramedSource* inputSource) { if ((fStreamIdTag&0xF0) == 0xC0 /*MPEG audio*/) { return MPEG1or2AudioRTPSink::createNew(envir(), rtpGroupsock); } else if ((fStreamIdTag&0xF0) == 0xE0 /*video*/) { return MPEG1or2VideoRTPSink::createNew(envir(), rtpGroupsock); } else if (fStreamIdTag == 0xBD /*AC-3 audio*/) { // Get the sampling frequency from the audio source; use it for the RTP frequency: AC3AudioStreamFramer* audioSource = (AC3AudioStreamFramer*)inputSource; return AC3AudioRTPSink::createNew(envir(), rtpGroupsock, rtpPayloadTypeIfDynamic, audioSource->samplingRate()); } else { return NULL; } } void MPEG1or2DemuxedServerMediaSubsession ::seekStreamSource(FramedSource* inputSource, double& seekNPT, double /*streamDuration*/, u_int64_t& /*numBytes*/) { float const dur = duration(); unsigned const size = fOurDemux.fileSize(); unsigned absBytePosition = dur == 0.0 ? 0 : (unsigned)((seekNPT/dur)*size); // "inputSource" is a 'framer' // Flush its data, to account for the seek that we're about to do: if ((fStreamIdTag&0xF0) == 0xC0 /*MPEG audio*/) { MPEG1or2AudioStreamFramer* framer = (MPEG1or2AudioStreamFramer*)inputSource; framer->flushInput(); } else if ((fStreamIdTag&0xF0) == 0xE0 /*video*/) { MPEG1or2VideoStreamFramer* framer = (MPEG1or2VideoStreamFramer*)inputSource; framer->flushInput(); } // "inputSource" is a filter; its input source is the original elem stream source: MPEG1or2DemuxedElementaryStream* elemStreamSource = (MPEG1or2DemuxedElementaryStream*)(((FramedFilter*)inputSource)->inputSource()); // Next, get the original source demux: MPEG1or2Demux& sourceDemux = elemStreamSource->sourceDemux(); // and flush its input buffers: sourceDemux.flushInput(); // Then, get the original input file stream from the source demux: ByteStreamFileSource* inputFileSource = (ByteStreamFileSource*)(sourceDemux.inputSource()); // Note: We can make that cast, because we know that the demux was originally // created from a "ByteStreamFileSource". // Do the appropriate seek within the input file stream: inputFileSource->seekToByteAbsolute(absBytePosition); } float MPEG1or2DemuxedServerMediaSubsession::duration() const { return fOurDemux.fileDuration(); } live/liveMedia/AACAudioMatroskaFileServerMediaSubsession.hh000444 001751 000000 00000004314 12265042432 024236 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A 'ServerMediaSubsession' object that creates new, unicast, "RTPSink"s // on demand, from an AAC audio track within a Matroska file. // C++ header #ifndef _AAC_AUDIO_MATROSKA_FILE_SERVER_MEDIA_SUBSESSION_HH #define _AAC_AUDIO_MATROSKA_FILE_SERVER_MEDIA_SUBSESSION_HH #ifndef _FILE_SERVER_MEDIA_SUBSESSION_HH #include "FileServerMediaSubsession.hh" #endif #ifndef _MATROSKA_FILE_SERVER_DEMUX_HH #include "MatroskaFileServerDemux.hh" #endif class AACAudioMatroskaFileServerMediaSubsession: public FileServerMediaSubsession { public: static AACAudioMatroskaFileServerMediaSubsession* createNew(MatroskaFileServerDemux& demux, unsigned trackNumber); private: AACAudioMatroskaFileServerMediaSubsession(MatroskaFileServerDemux& demux, unsigned trackNumber); // called only by createNew(); virtual ~AACAudioMatroskaFileServerMediaSubsession(); private: // redefined virtual functions virtual float duration() const; virtual void seekStreamSource(FramedSource* inputSource, double& seekNPT, double streamDuration, u_int64_t& numBytes); virtual FramedSource* createNewStreamSource(unsigned clientSessionId, unsigned& estBitrate); virtual RTPSink* createNewRTPSink(Groupsock* rtpGroupsock, unsigned char rtpPayloadTypeIfDynamic, FramedSource* inputSource); private: MatroskaFileServerDemux& fOurDemux; unsigned fTrackNumber; char* fConfigStr; }; #endif live/liveMedia/MPEG2TransportStreamIndexFile.cpp000444 001751 000000 00000027067 12265042432 022051 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A class that encapsulates MPEG-2 Transport Stream 'index files'/ // These index files are used to implement 'trick play' operations // (seek-by-time, fast forward, reverse play) on Transport Stream files. // // Implementation #include "MPEG2TransportStreamIndexFile.hh" #include "InputFile.hh" MPEG2TransportStreamIndexFile ::MPEG2TransportStreamIndexFile(UsageEnvironment& env, char const* indexFileName) : Medium(env), fFileName(strDup(indexFileName)), fFid(NULL), fMPEGVersion(0), fCurrentIndexRecordNum(0), fCachedPCR(0.0f), fCachedTSPacketNumber(0), fNumIndexRecords(0) { // Get the file size, to determine how many index records it contains: u_int64_t indexFileSize = GetFileSize(indexFileName, NULL); if (indexFileSize % INDEX_RECORD_SIZE != 0) { env << "Warning: Size of the index file \"" << indexFileName << "\" (" << (unsigned)indexFileSize << ") is not a multiple of the index record size (" << INDEX_RECORD_SIZE << ")\n"; } fNumIndexRecords = (unsigned long)(indexFileSize/INDEX_RECORD_SIZE); } MPEG2TransportStreamIndexFile* MPEG2TransportStreamIndexFile ::createNew(UsageEnvironment& env, char const* indexFileName) { if (indexFileName == NULL) return NULL; MPEG2TransportStreamIndexFile* indexFile = new MPEG2TransportStreamIndexFile(env, indexFileName); // Reject empty or non-existent index files: if (indexFile->getPlayingDuration() == 0.0f) { delete indexFile; indexFile = NULL; } return indexFile; } MPEG2TransportStreamIndexFile::~MPEG2TransportStreamIndexFile() { closeFid(); delete[] fFileName; } void MPEG2TransportStreamIndexFile ::lookupTSPacketNumFromNPT(float& npt, unsigned long& tsPacketNumber, unsigned long& indexRecordNumber) { if (npt <= 0.0 || fNumIndexRecords == 0) { // Fast-track a common case: npt = 0.0f; tsPacketNumber = indexRecordNumber = 0; return; } // If "npt" is the same as the one that we last looked up, return its cached result: if (npt == fCachedPCR) { tsPacketNumber = fCachedTSPacketNumber; indexRecordNumber = fCachedIndexRecordNumber; return; } // Search for the pair of neighboring index records whose PCR values span "npt". // Use the 'regula-falsi' method. Boolean success = False; unsigned long ixFound = 0; do { unsigned long ixLeft = 0, ixRight = fNumIndexRecords-1; float pcrLeft = 0.0f, pcrRight; if (!readIndexRecord(ixRight)) break; pcrRight = pcrFromBuf(); if (npt > pcrRight) npt = pcrRight; // handle "npt" too large by seeking to the last frame of the file while (ixRight-ixLeft > 1 && pcrLeft < npt && npt <= pcrRight) { unsigned long ixNew = ixLeft + (unsigned long)(((npt-pcrLeft)/(pcrRight-pcrLeft))*(ixRight-ixLeft)); if (ixNew == ixLeft || ixNew == ixRight) { // use bisection instead: ixNew = (ixLeft+ixRight)/2; } if (!readIndexRecord(ixNew)) break; float pcrNew = pcrFromBuf(); if (pcrNew < npt) { pcrLeft = pcrNew; ixLeft = ixNew; } else { pcrRight = pcrNew; ixRight = ixNew; } } if (ixRight-ixLeft > 1 || npt <= pcrLeft || npt > pcrRight) break; // bad PCR values in index file? ixFound = ixRight; // "Rewind' until we reach the start of a Video Sequence or GOP header: success = rewindToCleanPoint(ixFound); } while (0); if (success && readIndexRecord(ixFound)) { // Return (and cache) information from record "ixFound": npt = fCachedPCR = pcrFromBuf(); tsPacketNumber = fCachedTSPacketNumber = tsPacketNumFromBuf(); indexRecordNumber = fCachedIndexRecordNumber = ixFound; } else { // An error occurred: Return the default values, for npt == 0: npt = 0.0f; tsPacketNumber = indexRecordNumber = 0; } closeFid(); } void MPEG2TransportStreamIndexFile ::lookupPCRFromTSPacketNum(unsigned long& tsPacketNumber, Boolean reverseToPreviousCleanPoint, float& pcr, unsigned long& indexRecordNumber) { if (tsPacketNumber == 0 || fNumIndexRecords == 0) { // Fast-track a common case: pcr = 0.0f; indexRecordNumber = 0; return; } // If "tsPacketNumber" is the same as the one that we last looked up, return its cached result: if (tsPacketNumber == fCachedTSPacketNumber) { pcr = fCachedPCR; indexRecordNumber = fCachedIndexRecordNumber; return; } // Search for the pair of neighboring index records whose TS packet #s span "tsPacketNumber". // Use the 'regula-falsi' method. Boolean success = False; unsigned long ixFound = 0; do { unsigned long ixLeft = 0, ixRight = fNumIndexRecords-1; unsigned long tsLeft = 0, tsRight; if (!readIndexRecord(ixRight)) break; tsRight = tsPacketNumFromBuf(); if (tsPacketNumber > tsRight) tsPacketNumber = tsRight; // handle "tsPacketNumber" too large by seeking to the last frame of the file while (ixRight-ixLeft > 1 && tsLeft < tsPacketNumber && tsPacketNumber <= tsRight) { unsigned long ixNew = ixLeft + (unsigned long)(((tsPacketNumber-tsLeft)/(tsRight-tsLeft))*(ixRight-ixLeft)); if (ixNew == ixLeft || ixNew == ixRight) { // Use bisection instead: ixNew = (ixLeft+ixRight)/2; } if (!readIndexRecord(ixNew)) break; unsigned long tsNew = tsPacketNumFromBuf(); if (tsNew < tsPacketNumber) { tsLeft = tsNew; ixLeft = ixNew; } else { tsRight = tsNew; ixRight = ixNew; } } if (ixRight-ixLeft > 1 || tsPacketNumber <= tsLeft || tsPacketNumber > tsRight) break; // bad PCR values in index file? ixFound = ixRight; if (reverseToPreviousCleanPoint) { // "Rewind' until we reach the start of a Video Sequence or GOP header: success = rewindToCleanPoint(ixFound); } else { success = True; } } while (0); if (success && readIndexRecord(ixFound)) { // Return (and cache) information from record "ixFound": pcr = fCachedPCR = pcrFromBuf(); fCachedTSPacketNumber = tsPacketNumFromBuf(); if (reverseToPreviousCleanPoint) tsPacketNumber = fCachedTSPacketNumber; indexRecordNumber = fCachedIndexRecordNumber = ixFound; } else { // An error occurred: Return the default values, for tsPacketNumber == 0: pcr = 0.0f; indexRecordNumber = 0; } closeFid(); } Boolean MPEG2TransportStreamIndexFile ::readIndexRecordValues(unsigned long indexRecordNum, unsigned long& transportPacketNum, u_int8_t& offset, u_int8_t& size, float& pcr, u_int8_t& recordType) { if (!readIndexRecord(indexRecordNum)) return False; transportPacketNum = tsPacketNumFromBuf(); offset = offsetFromBuf(); size = sizeFromBuf(); pcr = pcrFromBuf(); recordType = recordTypeFromBuf(); return True; } float MPEG2TransportStreamIndexFile::getPlayingDuration() { if (fNumIndexRecords == 0 || !readOneIndexRecord(fNumIndexRecords-1)) return 0.0f; return pcrFromBuf(); } int MPEG2TransportStreamIndexFile::mpegVersion() { if (fMPEGVersion != 0) return fMPEGVersion; // we already know it // Read the first index record, and figure out the MPEG version from its type: if (!readOneIndexRecord(0)) return 0; // unknown; perhaps the indecx file is empty? setMPEGVersionFromRecordType(recordTypeFromBuf()); return fMPEGVersion; } Boolean MPEG2TransportStreamIndexFile::openFid() { if (fFid == NULL && fFileName != NULL) { if ((fFid = OpenInputFile(envir(), fFileName)) != NULL) { fCurrentIndexRecordNum = 0; } } return fFid != NULL; } Boolean MPEG2TransportStreamIndexFile::seekToIndexRecord(unsigned long indexRecordNumber) { if (!openFid()) return False; if (indexRecordNumber == fCurrentIndexRecordNum) return True; // we're already there if (SeekFile64(fFid, (int64_t)(indexRecordNumber*INDEX_RECORD_SIZE), SEEK_SET) != 0) return False; fCurrentIndexRecordNum = indexRecordNumber; return True; } Boolean MPEG2TransportStreamIndexFile::readIndexRecord(unsigned long indexRecordNum) { do { if (!seekToIndexRecord(indexRecordNum)) break; if (fread(fBuf, INDEX_RECORD_SIZE, 1, fFid) != 1) break; ++fCurrentIndexRecordNum; return True; } while (0); return False; // an error occurred } Boolean MPEG2TransportStreamIndexFile::readOneIndexRecord(unsigned long indexRecordNum) { Boolean result = readIndexRecord(indexRecordNum); closeFid(); return result; } void MPEG2TransportStreamIndexFile::closeFid() { if (fFid != NULL) { CloseInputFile(fFid); fFid = NULL; } } float MPEG2TransportStreamIndexFile::pcrFromBuf() { unsigned pcr_int = (fBuf[5]<<16) | (fBuf[4]<<8) | fBuf[3]; u_int8_t pcr_frac = fBuf[6]; return pcr_int + pcr_frac/256.0f; } unsigned long MPEG2TransportStreamIndexFile::tsPacketNumFromBuf() { return (fBuf[10]<<24) | (fBuf[9]<<16) | (fBuf[8]<<8) | fBuf[7]; } void MPEG2TransportStreamIndexFile::setMPEGVersionFromRecordType(u_int8_t recordType) { if (fMPEGVersion != 0) return; // we already know it u_int8_t const recordTypeWithoutStartBit = recordType&~0x80; if (recordTypeWithoutStartBit >= 1 && recordTypeWithoutStartBit <= 4) fMPEGVersion = 2; else if (recordTypeWithoutStartBit >= 5 && recordTypeWithoutStartBit <= 10) fMPEGVersion = 5; // represents H.264 else if (recordTypeWithoutStartBit >= 11 && recordTypeWithoutStartBit <= 16) fMPEGVersion = 6; // represents H.265 } Boolean MPEG2TransportStreamIndexFile::rewindToCleanPoint(unsigned long&ixFound) { Boolean success = False; // until we learn otherwise while (ixFound > 0) { if (!readIndexRecord(ixFound)) break; u_int8_t recordType = recordTypeFromBuf(); setMPEGVersionFromRecordType(recordType); // A 'clean point' is the start of a 'frame' from which a decoder can cleanly resume // handling the stream. For H.264, this is a SPS. For H.265, this is a VPS. // For MPEG-2, this is a Video Sequence Header, or a GOP. if ((recordType&0x80) != 0) { // This is the start of a 'frame' recordType &=~ 0x80; // remove the 'start of frame' bit if (fMPEGVersion == 5) { // H.264 if (recordType == 5/*SPS*/) { success = True; break; } } else if (fMPEGVersion == 6) { // H.265 if (recordType == 11/*VPS*/) { success = True; break; } } else { // MPEG-1, 2, or 4 if (recordType == 1/*VSH*/) { success = True; break; } else if (recordType == 2/*GOP*/) { // Hack: If the preceding record is for a Video Sequence Header, then use it instead: unsigned long newIxFound = ixFound; while (--newIxFound > 0) { if (!readIndexRecord(newIxFound)) break; recordType = recordTypeFromBuf(); if ((recordType&0x7F) != 1) break; // not a Video Sequence Header if ((recordType&0x80) != 0) { // this is the start of the VSH; use it ixFound = newIxFound; break; } } } success = True; break; } } // Keep checking, from the previous record: --ixFound; } if (ixFound == 0) success = True; // use record 0 anyway return success; } live/liveMedia/MPEG2TransportStreamTrickModeFilter.cpp000444 001751 000000 00000025446 12265042432 023230 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A filter that converts a MPEG Transport Stream file - with corresponding index file // - to a corresponding Video Elementary Stream. It also uses a "scale" parameter // to implement 'trick mode' (fast forward or reverse play, using I-frames) on // the video stream. // Implementation #include "MPEG2TransportStreamTrickModeFilter.hh" #include // Define the following to be True if we want the output file to have the same frame rate as the original file. // (Because the output file contains I-frames only, this means that each I-frame will appear in the output file // several times, and therefore the output file's bitrate will be significantly higher than that of the original.) // Define the following to be False if we want the output file to include each I-frame no more than once. // (This means that - except for high 'scale' values - both the output frame rate and the output bit rate // will be less than that of the original.) #define KEEP_ORIGINAL_FRAME_RATE False MPEG2TransportStreamTrickModeFilter* MPEG2TransportStreamTrickModeFilter ::createNew(UsageEnvironment& env, FramedSource* inputSource, MPEG2TransportStreamIndexFile* indexFile, int scale) { return new MPEG2TransportStreamTrickModeFilter(env, inputSource, indexFile, scale); } MPEG2TransportStreamTrickModeFilter ::MPEG2TransportStreamTrickModeFilter(UsageEnvironment& env, FramedSource* inputSource, MPEG2TransportStreamIndexFile* indexFile, int scale) : FramedFilter(env, inputSource), fHaveStarted(False), fIndexFile(indexFile), fScale(scale), fDirection(1), fState(SKIPPING_FRAME), fFrameCount(0), fNextIndexRecordNum(0), fNextTSPacketNum(0), fCurrentTSPacketNum((unsigned long)(-1)), fUseSavedFrameNextTime(False) { if (fScale < 0) { // reverse play fScale = -fScale; fDirection = -1; } } MPEG2TransportStreamTrickModeFilter::~MPEG2TransportStreamTrickModeFilter() { } Boolean MPEG2TransportStreamTrickModeFilter::seekTo(unsigned long tsPacketNumber, unsigned long indexRecordNumber) { seekToTransportPacket(tsPacketNumber); fNextIndexRecordNum = indexRecordNumber; return True; } #define isIFrameStart(type) ((type) == 0x81/*actually, a VSH*/ || (type) == 0x85/*actually, a SPS, for H.264*/ || (type) == 0x8B/*actually, a VPS, for H.265*/) // This relies upon I-frames always being preceded by a VSH+GOP (for MPEG-2 data), // by a SPS (for H.264 data), or by a VPS (for H.265 data) #define isNonIFrameStart(type) ((type) == 0x83 || (type) == 0x88/*for H.264*/ || (type) == 0x8E/*for H.265*/) void MPEG2TransportStreamTrickModeFilter::doGetNextFrame() { // fprintf(stderr, "#####DGNF1\n"); // If our client's buffer size is too small, then deliver // a 0-byte 'frame', to tell it to process all of the data that it has // already read, before asking for more data from us: if (fMaxSize < TRANSPORT_PACKET_SIZE) { fFrameSize = 0; afterGetting(this); return; } while (1) { // Get the next record from our index file. // This tells us the type of frame this data is, which Transport Stream packet // (from the input source) the data comes from, and where in the Transport Stream // packet it comes from: u_int8_t recordType; float recordPCR; Boolean endOfIndexFile = False; if (!fIndexFile->readIndexRecordValues(fNextIndexRecordNum, fDesiredTSPacketNum, fDesiredDataOffset, fDesiredDataSize, recordPCR, recordType)) { // We ran off the end of the index file. If we're not delivering a // pre-saved frame, then handle this the same way as if the // input Transport Stream source ended. if (fState != DELIVERING_SAVED_FRAME) { onSourceClosure1(); return; } endOfIndexFile = True; } else if (!fHaveStarted) { fFirstPCR = recordPCR; fHaveStarted = True; } // fprintf(stderr, "#####read index record %ld: ts %ld: %c, PCR %f\n", fNextIndexRecordNum, fDesiredTSPacketNum, isIFrameStart(recordType) ? 'I' : isNonIFrameStart(recordType) ? 'j' : 'x', recordPCR); fNextIndexRecordNum += (fState == DELIVERING_SAVED_FRAME) ? 1 : fDirection; // Handle this index record, depending on the record type and our current state: switch (fState) { case SKIPPING_FRAME: case SAVING_AND_DELIVERING_FRAME: { // if (fState == SKIPPING_FRAME) fprintf(stderr, "\tSKIPPING_FRAME\n"); else fprintf(stderr, "\tSAVING_AND_DELIVERING_FRAME\n");//##### if (isIFrameStart(recordType)) { // Save a record of this frame: fSavedFrameIndexRecordStart = fNextIndexRecordNum - fDirection; fUseSavedFrameNextTime = True; // fprintf(stderr, "\trecording\n");//##### if ((fFrameCount++)%fScale == 0 && fUseSavedFrameNextTime) { // A frame is due now. fFrameCount = 1; // reset to avoid overflow if (fDirection > 0) { // Begin delivering this frame, as we're scanning it: fState = SAVING_AND_DELIVERING_FRAME; // fprintf(stderr, "\tdelivering\n");//##### fDesiredDataPCR = recordPCR; // use this frame's PCR attemptDeliveryToClient(); return; } else { // Deliver this frame, then resume normal scanning: // (This relies on the index records having begun with an I-frame.) fState = DELIVERING_SAVED_FRAME; fSavedSequentialIndexRecordNum = fNextIndexRecordNum; fDesiredDataPCR = recordPCR; // use this frame's (not the saved frame's) PCR fNextIndexRecordNum = fSavedFrameIndexRecordStart; // fprintf(stderr, "\tbeginning delivery of saved frame\n");//##### } } else { // No frame is needed now: fState = SKIPPING_FRAME; } } else if (isNonIFrameStart(recordType)) { if ((fFrameCount++)%fScale == 0 && fUseSavedFrameNextTime) { // A frame is due now, so begin delivering the one that we had saved: // (This relies on the index records having begun with an I-frame.) fFrameCount = 1; // reset to avoid overflow fState = DELIVERING_SAVED_FRAME; fSavedSequentialIndexRecordNum = fNextIndexRecordNum; fDesiredDataPCR = recordPCR; // use this frame's (not the saved frame's) PCR fNextIndexRecordNum = fSavedFrameIndexRecordStart; // fprintf(stderr, "\tbeginning delivery of saved frame\n");//##### } else { // No frame is needed now: fState = SKIPPING_FRAME; } } else { // Not the start of a frame, but deliver it, if it's needed: if (fState == SAVING_AND_DELIVERING_FRAME) { // fprintf(stderr, "\tdelivering\n");//##### fDesiredDataPCR = recordPCR; // use this frame's PCR attemptDeliveryToClient(); return; } } break; } case DELIVERING_SAVED_FRAME: { // fprintf(stderr, "\tDELIVERING_SAVED_FRAME\n");//##### if (endOfIndexFile || (isIFrameStart(recordType) && fNextIndexRecordNum-1 != fSavedFrameIndexRecordStart) || isNonIFrameStart(recordType)) { // fprintf(stderr, "\tended delivery of saved frame\n");//##### // We've reached the end of the saved frame, so revert to the // original sequence of index records: fNextIndexRecordNum = fSavedSequentialIndexRecordNum; fUseSavedFrameNextTime = KEEP_ORIGINAL_FRAME_RATE; fState = SKIPPING_FRAME; } else { // Continue delivering: // fprintf(stderr, "\tdelivering\n");//##### attemptDeliveryToClient(); return; } break; } } } } void MPEG2TransportStreamTrickModeFilter::doStopGettingFrames() { FramedFilter::doStopGettingFrames(); fIndexFile->stopReading(); } void MPEG2TransportStreamTrickModeFilter::attemptDeliveryToClient() { if (fCurrentTSPacketNum == fDesiredTSPacketNum) { // fprintf(stderr, "\t\tdelivering ts %d:%d, %d bytes, PCR %f\n", fCurrentTSPacketNum, fDesiredDataOffset, fDesiredDataSize, fDesiredDataPCR);//##### // We already have the Transport Packet that we want. Deliver its data: memmove(fTo, &fInputBuffer[fDesiredDataOffset], fDesiredDataSize); fFrameSize = fDesiredDataSize; float deliveryPCR = fDirection*(fDesiredDataPCR - fFirstPCR)/fScale; if (deliveryPCR < 0.0) deliveryPCR = 0.0; fPresentationTime.tv_sec = (unsigned long)deliveryPCR; fPresentationTime.tv_usec = (unsigned long)((deliveryPCR - fPresentationTime.tv_sec)*1000000.0f); // fprintf(stderr, "#####DGNF9\n"); afterGetting(this); } else { // Arrange to read the Transport Packet that we want: readTransportPacket(fDesiredTSPacketNum); } } void MPEG2TransportStreamTrickModeFilter::seekToTransportPacket(unsigned long tsPacketNum) { if (tsPacketNum == fNextTSPacketNum) return; // we're already there ByteStreamFileSource* tsFile = (ByteStreamFileSource*)fInputSource; u_int64_t tsPacketNum64 = (u_int64_t)tsPacketNum; tsFile->seekToByteAbsolute(tsPacketNum64*TRANSPORT_PACKET_SIZE); fNextTSPacketNum = tsPacketNum; } void MPEG2TransportStreamTrickModeFilter::readTransportPacket(unsigned long tsPacketNum) { seekToTransportPacket(tsPacketNum); fInputSource->getNextFrame(fInputBuffer, TRANSPORT_PACKET_SIZE, afterGettingFrame, this, onSourceClosure, this); } void MPEG2TransportStreamTrickModeFilter ::afterGettingFrame(void* clientData, unsigned frameSize, unsigned /*numTruncatedBytes*/, struct timeval presentationTime, unsigned /*durationInMicroseconds*/) { MPEG2TransportStreamTrickModeFilter* filter = (MPEG2TransportStreamTrickModeFilter*)clientData; filter->afterGettingFrame1(frameSize); } void MPEG2TransportStreamTrickModeFilter::afterGettingFrame1(unsigned frameSize) { if (frameSize != TRANSPORT_PACKET_SIZE) { // Treat this as if the input source ended: onSourceClosure1(); return; } fCurrentTSPacketNum = fNextTSPacketNum; // i.e., the one that we just read ++fNextTSPacketNum; // Attempt deliver again: attemptDeliveryToClient(); } void MPEG2TransportStreamTrickModeFilter::onSourceClosure(void* clientData) { MPEG2TransportStreamTrickModeFilter* filter = (MPEG2TransportStreamTrickModeFilter*)clientData; filter->onSourceClosure1(); } void MPEG2TransportStreamTrickModeFilter::onSourceClosure1() { fIndexFile->stopReading(); FramedSource::handleClosure(this); } live/liveMedia/AC3AudioMatroskaFileServerMediaSubsession.cpp000444 001751 000000 00000005112 12265042432 024400 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A 'ServerMediaSubsession' object that creates new, unicast, "RTPSink"s // on demand, from an AC3 audio track within a Matroska file. // Implementation #include "AC3AudioMatroskaFileServerMediaSubsession.hh" #include "AC3AudioRTPSink.hh" #include "MatroskaDemuxedTrack.hh" AC3AudioMatroskaFileServerMediaSubsession* AC3AudioMatroskaFileServerMediaSubsession ::createNew(MatroskaFileServerDemux& demux, unsigned trackNumber) { return new AC3AudioMatroskaFileServerMediaSubsession(demux, trackNumber); } AC3AudioMatroskaFileServerMediaSubsession ::AC3AudioMatroskaFileServerMediaSubsession(MatroskaFileServerDemux& demux, unsigned trackNumber) : FileServerMediaSubsession(demux.envir(), demux.fileName(), False), fOurDemux(demux), fTrackNumber(trackNumber) { } AC3AudioMatroskaFileServerMediaSubsession ::~AC3AudioMatroskaFileServerMediaSubsession() { } float AC3AudioMatroskaFileServerMediaSubsession::duration() const { return fOurDemux.fileDuration(); } void AC3AudioMatroskaFileServerMediaSubsession ::seekStreamSource(FramedSource* inputSource, double& seekNPT, double /*streamDuration*/, u_int64_t& /*numBytes*/) { ((MatroskaDemuxedTrack*)inputSource)->seekToTime(seekNPT); } FramedSource* AC3AudioMatroskaFileServerMediaSubsession ::createNewStreamSource(unsigned clientSessionId, unsigned& estBitrate) { estBitrate = 48; // kbps, estimate return fOurDemux.newDemuxedTrack(clientSessionId, fTrackNumber); } RTPSink* AC3AudioMatroskaFileServerMediaSubsession ::createNewRTPSink(Groupsock* rtpGroupsock, unsigned char rtpPayloadTypeIfDynamic, FramedSource* /*inputSource*/) { MatroskaTrack* track = fOurDemux.lookup(fTrackNumber); return AC3AudioRTPSink::createNew(envir(), rtpGroupsock, rtpPayloadTypeIfDynamic, track->samplingFrequency); } live/liveMedia/AMRAudioFileSink.cpp000444 001751 000000 00000007036 12265042432 017376 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // AMR Audio File sinks // Implementation #include "AMRAudioFileSink.hh" #include "AMRAudioSource.hh" #include "OutputFile.hh" ////////// AMRAudioFileSink ////////// AMRAudioFileSink ::AMRAudioFileSink(UsageEnvironment& env, FILE* fid, unsigned bufferSize, char const* perFrameFileNamePrefix) : FileSink(env, fid, bufferSize, perFrameFileNamePrefix), fHaveWrittenHeader(False) { } AMRAudioFileSink::~AMRAudioFileSink() { } AMRAudioFileSink* AMRAudioFileSink::createNew(UsageEnvironment& env, char const* fileName, unsigned bufferSize, Boolean oneFilePerFrame) { do { FILE* fid; char const* perFrameFileNamePrefix; if (oneFilePerFrame) { // Create the fid for each frame fid = NULL; perFrameFileNamePrefix = fileName; } else { // Normal case: create the fid once fid = OpenOutputFile(env, fileName); if (fid == NULL) break; perFrameFileNamePrefix = NULL; } return new AMRAudioFileSink(env, fid, bufferSize, perFrameFileNamePrefix); } while (0); return NULL; } Boolean AMRAudioFileSink::sourceIsCompatibleWithUs(MediaSource& source) { // The input source must be a AMR Audio source: return source.isAMRAudioSource(); } void AMRAudioFileSink::afterGettingFrame(unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime) { AMRAudioSource* source = (AMRAudioSource*)fSource; if (source == NULL) return; // sanity check if (!fHaveWrittenHeader && fPerFrameFileNameBuffer == NULL) { // Output the appropriate AMR header to the start of the file. // This header is defined in RFC 4867, section 5. // (However, we don't do this if we're creating one file per frame.) char headerBuffer[100]; sprintf(headerBuffer, "#!AMR%s%s\n", source->isWideband() ? "-WB" : "", source->numChannels() > 1 ? "_MC1.0" : ""); unsigned headerLength = strlen(headerBuffer); if (source->numChannels() > 1) { // Also add a 32-bit channel description field: headerBuffer[headerLength++] = 0; headerBuffer[headerLength++] = 0; headerBuffer[headerLength++] = 0; headerBuffer[headerLength++] = source->numChannels(); } addData((unsigned char*)headerBuffer, headerLength, presentationTime); } fHaveWrittenHeader = True; // Add the 1-byte header, before writing the file data proper: // (Again, we don't do this if we're creating one file per frame.) if (fPerFrameFileNameBuffer == NULL) { u_int8_t frameHeader = source->lastFrameHeader(); addData(&frameHeader, 1, presentationTime); } // Call the parent class to complete the normal file write with the input data: FileSink::afterGettingFrame(frameSize, numTruncatedBytes, presentationTime); } live/liveMedia/H265VideoMatroskaFileServerMediaSubsession.hh000444 001751 000000 00000004502 12265042432 024302 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A 'ServerMediaSubsession' object that creates new, unicast, "RTPSink"s // on demand, from an H265 video track within a Matroska file. // C++ header #ifndef _H265_VIDEO_MATROSKA_FILE_SERVER_MEDIA_SUBSESSION_HH #define _H265_VIDEO_MATROSKA_FILE_SERVER_MEDIA_SUBSESSION_HH #ifndef _H265_VIDEO_FILE_SERVER_MEDIA_SUBSESSION_HH #include "H265VideoFileServerMediaSubsession.hh" #endif #ifndef _MATROSKA_FILE_SERVER_DEMUX_HH #include "MatroskaFileServerDemux.hh" #endif class H265VideoMatroskaFileServerMediaSubsession: public H265VideoFileServerMediaSubsession { public: static H265VideoMatroskaFileServerMediaSubsession* createNew(MatroskaFileServerDemux& demux, unsigned trackNumber); private: H265VideoMatroskaFileServerMediaSubsession(MatroskaFileServerDemux& demux, unsigned trackNumber); // called only by createNew(); virtual ~H265VideoMatroskaFileServerMediaSubsession(); private: // redefined virtual functions virtual float duration() const; virtual void seekStreamSource(FramedSource* inputSource, double& seekNPT, double streamDuration, u_int64_t& numBytes); virtual FramedSource* createNewStreamSource(unsigned clientSessionId, unsigned& estBitrate); private: MatroskaFileServerDemux& fOurDemux; unsigned fTrackNumber; // We store one VPS, one SPS, and one PPS, for use in our input 'framer's: unsigned fVPSSize; u_int8_t* fVPS; unsigned fSPSSize; u_int8_t* fSPS; unsigned fPPSSize; u_int8_t* fPPS; }; #endif live/liveMedia/H264VideoFileServerMediaSubsession.cpp000444 001751 000000 00000011057 12265042432 022765 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A 'ServerMediaSubsession' object that creates new, unicast, "RTPSink"s // on demand, from a H264 video file. // Implementation #include "H264VideoFileServerMediaSubsession.hh" #include "H264VideoRTPSink.hh" #include "ByteStreamFileSource.hh" #include "H264VideoStreamFramer.hh" H264VideoFileServerMediaSubsession* H264VideoFileServerMediaSubsession::createNew(UsageEnvironment& env, char const* fileName, Boolean reuseFirstSource) { return new H264VideoFileServerMediaSubsession(env, fileName, reuseFirstSource); } H264VideoFileServerMediaSubsession::H264VideoFileServerMediaSubsession(UsageEnvironment& env, char const* fileName, Boolean reuseFirstSource) : FileServerMediaSubsession(env, fileName, reuseFirstSource), fAuxSDPLine(NULL), fDoneFlag(0), fDummyRTPSink(NULL) { } H264VideoFileServerMediaSubsession::~H264VideoFileServerMediaSubsession() { delete[] fAuxSDPLine; } static void afterPlayingDummy(void* clientData) { H264VideoFileServerMediaSubsession* subsess = (H264VideoFileServerMediaSubsession*)clientData; subsess->afterPlayingDummy1(); } void H264VideoFileServerMediaSubsession::afterPlayingDummy1() { // Unschedule any pending 'checking' task: envir().taskScheduler().unscheduleDelayedTask(nextTask()); // Signal the event loop that we're done: setDoneFlag(); } static void checkForAuxSDPLine(void* clientData) { H264VideoFileServerMediaSubsession* subsess = (H264VideoFileServerMediaSubsession*)clientData; subsess->checkForAuxSDPLine1(); } void H264VideoFileServerMediaSubsession::checkForAuxSDPLine1() { char const* dasl; if (fAuxSDPLine != NULL) { // Signal the event loop that we're done: setDoneFlag(); } else if (fDummyRTPSink != NULL && (dasl = fDummyRTPSink->auxSDPLine()) != NULL) { fAuxSDPLine = strDup(dasl); fDummyRTPSink = NULL; // Signal the event loop that we're done: setDoneFlag(); } else { // try again after a brief delay: int uSecsToDelay = 100000; // 100 ms nextTask() = envir().taskScheduler().scheduleDelayedTask(uSecsToDelay, (TaskFunc*)checkForAuxSDPLine, this); } } char const* H264VideoFileServerMediaSubsession::getAuxSDPLine(RTPSink* rtpSink, FramedSource* inputSource) { if (fAuxSDPLine != NULL) return fAuxSDPLine; // it's already been set up (for a previous client) if (fDummyRTPSink == NULL) { // we're not already setting it up for another, concurrent stream // Note: For H264 video files, the 'config' information ("profile-level-id" and "sprop-parameter-sets") isn't known // until we start reading the file. This means that "rtpSink"s "auxSDPLine()" will be NULL initially, // and we need to start reading data from our file until this changes. fDummyRTPSink = rtpSink; // Start reading the file: fDummyRTPSink->startPlaying(*inputSource, afterPlayingDummy, this); // Check whether the sink's 'auxSDPLine()' is ready: checkForAuxSDPLine(this); } envir().taskScheduler().doEventLoop(&fDoneFlag); return fAuxSDPLine; } FramedSource* H264VideoFileServerMediaSubsession::createNewStreamSource(unsigned /*clientSessionId*/, unsigned& estBitrate) { estBitrate = 500; // kbps, estimate // Create the video source: ByteStreamFileSource* fileSource = ByteStreamFileSource::createNew(envir(), fFileName); if (fileSource == NULL) return NULL; fFileSize = fileSource->fileSize(); // Create a framer for the Video Elementary Stream: return H264VideoStreamFramer::createNew(envir(), fileSource); } RTPSink* H264VideoFileServerMediaSubsession ::createNewRTPSink(Groupsock* rtpGroupsock, unsigned char rtpPayloadTypeIfDynamic, FramedSource* /*inputSource*/) { return H264VideoRTPSink::createNew(envir(), rtpGroupsock, rtpPayloadTypeIfDynamic); } live/liveMedia/TCPStreamSink.cpp000444 001751 000000 00000011012 12265042432 016764 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A sink representing a TCP output stream // Implementation #include "TCPStreamSink.hh" TCPStreamSink* TCPStreamSink::createNew(UsageEnvironment& env, int socketNum) { return new TCPStreamSink(env, socketNum); } TCPStreamSink::TCPStreamSink(UsageEnvironment& env, int socketNum) : MediaSink(env), fUnwrittenBytesStart(0), fUnwrittenBytesEnd(0), fInputSourceIsOpen(False), fOutputSocketIsWritable(True), fOutputSocketNum(socketNum) { } TCPStreamSink::~TCPStreamSink() { } Boolean TCPStreamSink::continuePlaying() { fInputSourceIsOpen = fSource != NULL; processBuffer(); return True; } #define TCP_STREAM_SINK_MIN_READ_SIZE 1000 void TCPStreamSink::processBuffer() { // First, try writing data to our output socket, if we can: if (fOutputSocketIsWritable && numUnwrittenBytes() > 0) { int numBytesWritten = send(fOutputSocketNum, (const char*)&fBuffer[fUnwrittenBytesStart], numUnwrittenBytes(), 0); if (numBytesWritten < (int)numUnwrittenBytes()) { // The output socket is no longer writable. Set a handler to be called when it becomes writable again. fOutputSocketIsWritable = False; if (envir().getErrno() != EPIPE) { // on this error, the socket might still be writable, but no longer usable envir().taskScheduler().setBackgroundHandling(fOutputSocketNum, SOCKET_WRITABLE, socketWritableHandler, this); } } if (numBytesWritten > 0) { // We wrote at least some of our data. Update our buffer pointers: fUnwrittenBytesStart += numBytesWritten; if (fUnwrittenBytesStart > fUnwrittenBytesEnd) fUnwrittenBytesStart = fUnwrittenBytesEnd; // sanity check if (fUnwrittenBytesStart == fUnwrittenBytesEnd && (!fInputSourceIsOpen || !fSource->isCurrentlyAwaitingData())) { fUnwrittenBytesStart = fUnwrittenBytesEnd = 0; // reset the buffer to empty } } } // Then, read from our input source, if we can (& we're not already reading from it): if (fInputSourceIsOpen && freeBufferSpace() >= TCP_STREAM_SINK_MIN_READ_SIZE && !fSource->isCurrentlyAwaitingData()) { fSource->getNextFrame(&fBuffer[fUnwrittenBytesEnd], freeBufferSpace(), afterGettingFrame, this, ourOnSourceClosure, this); } if (!fInputSourceIsOpen && numUnwrittenBytes() == 0) { // We're now done: onSourceClosure(); } } void TCPStreamSink::socketWritableHandler(void* clientData, int /*mask*/) { TCPStreamSink* sink = (TCPStreamSink*)clientData; sink->socketWritableHandler1(); } void TCPStreamSink::socketWritableHandler1() { envir().taskScheduler().disableBackgroundHandling(fOutputSocketNum); // disable this handler until the next time it's needed fOutputSocketIsWritable = True; processBuffer(); } void TCPStreamSink::afterGettingFrame(void* clientData, unsigned frameSize, unsigned numTruncatedBytes, struct timeval /*presentationTime*/, unsigned /*durationInMicroseconds*/) { TCPStreamSink* sink = (TCPStreamSink*)clientData; sink->afterGettingFrame(frameSize, numTruncatedBytes); } void TCPStreamSink::afterGettingFrame(unsigned frameSize, unsigned numTruncatedBytes) { if (numTruncatedBytes > 0) { envir() << "TCPStreamSink::afterGettingFrame(): The input frame data was too large for our buffer. " << numTruncatedBytes << " bytes of trailing data was dropped! Correct this by increasing the definition of \"TCP_STREAM_SINK_BUFFER_SIZE\" in \"include/TCPStreamSink.hh\".\n"; } fUnwrittenBytesEnd += frameSize; processBuffer(); } void TCPStreamSink::ourOnSourceClosure(void* clientData) { TCPStreamSink* sink = (TCPStreamSink*)clientData; sink->ourOnSourceClosure1(); } void TCPStreamSink::ourOnSourceClosure1() { // The input source has closed: fInputSourceIsOpen = False; processBuffer(); } live/liveMedia/Makefile.tail000444 001751 000000 00000103232 12265042432 016227 0ustar00rsfwheel000000 000000 ##### End of variables to change NAME = libliveMedia LIVEMEDIA_LIB = $(NAME).$(LIB_SUFFIX) ALL = $(LIVEMEDIA_LIB) all: $(ALL) .$(C).$(OBJ): $(C_COMPILER) -c $(C_FLAGS) $< .$(CPP).$(OBJ): $(CPLUSPLUS_COMPILER) -c $(CPLUSPLUS_FLAGS) $< MP3_SOURCE_OBJS = MP3FileSource.$(OBJ) MP3Transcoder.$(OBJ) MP3ADU.$(OBJ) MP3ADUdescriptor.$(OBJ) MP3ADUinterleaving.$(OBJ) MP3ADUTranscoder.$(OBJ) MP3StreamState.$(OBJ) MP3Internals.$(OBJ) MP3InternalsHuffman.$(OBJ) MP3InternalsHuffmanTable.$(OBJ) MP3ADURTPSource.$(OBJ) MPEG_SOURCE_OBJS = MPEG1or2Demux.$(OBJ) MPEG1or2DemuxedElementaryStream.$(OBJ) MPEGVideoStreamFramer.$(OBJ) MPEG1or2VideoStreamFramer.$(OBJ) MPEG1or2VideoStreamDiscreteFramer.$(OBJ) MPEG4VideoStreamFramer.$(OBJ) MPEG4VideoStreamDiscreteFramer.$(OBJ) H264or5VideoStreamFramer.$(OBJ) H264or5VideoStreamDiscreteFramer.$(OBJ) H264VideoStreamFramer.$(OBJ) H264VideoStreamDiscreteFramer.$(OBJ) H265VideoStreamFramer.$(OBJ) H265VideoStreamDiscreteFramer.$(OBJ) MPEGVideoStreamParser.$(OBJ) MPEG1or2AudioStreamFramer.$(OBJ) MPEG1or2AudioRTPSource.$(OBJ) MPEG4LATMAudioRTPSource.$(OBJ) MPEG4ESVideoRTPSource.$(OBJ) MPEG4GenericRTPSource.$(OBJ) $(MP3_SOURCE_OBJS) MPEG1or2VideoRTPSource.$(OBJ) MPEG2TransportStreamMultiplexor.$(OBJ) MPEG2TransportStreamFromPESSource.$(OBJ) MPEG2TransportStreamFromESSource.$(OBJ) MPEG2TransportStreamFramer.$(OBJ) ADTSAudioFileSource.$(OBJ) H263_SOURCE_OBJS = H263plusVideoRTPSource.$(OBJ) H263plusVideoStreamFramer.$(OBJ) H263plusVideoStreamParser.$(OBJ) AC3_SOURCE_OBJS = AC3AudioStreamFramer.$(OBJ) AC3AudioRTPSource.$(OBJ) DV_SOURCE_OBJS = DVVideoStreamFramer.$(OBJ) DVVideoRTPSource.$(OBJ) MP3_SINK_OBJS = MP3ADURTPSink.$(OBJ) MPEG_SINK_OBJS = MPEG1or2AudioRTPSink.$(OBJ) $(MP3_SINK_OBJS) MPEG1or2VideoRTPSink.$(OBJ) MPEG4LATMAudioRTPSink.$(OBJ) MPEG4GenericRTPSink.$(OBJ) MPEG4ESVideoRTPSink.$(OBJ) H263_SINK_OBJS = H263plusVideoRTPSink.$(OBJ) H264_OR_5_SINK_OBJS = H264or5VideoRTPSink.$(OBJ) H264VideoRTPSink.$(OBJ) H265VideoRTPSink.$(OBJ) DV_SINK_OBJS = DVVideoRTPSink.$(OBJ) AC3_SINK_OBJS = AC3AudioRTPSink.$(OBJ) MISC_SOURCE_OBJS = MediaSource.$(OBJ) FramedSource.$(OBJ) FramedFileSource.$(OBJ) FramedFilter.$(OBJ) ByteStreamFileSource.$(OBJ) ByteStreamMultiFileSource.$(OBJ) ByteStreamMemoryBufferSource.$(OBJ) BasicUDPSource.$(OBJ) DeviceSource.$(OBJ) AudioInputDevice.$(OBJ) WAVAudioFileSource.$(OBJ) $(MPEG_SOURCE_OBJS) $(H263_SOURCE_OBJS) $(AC3_SOURCE_OBJS) $(DV_SOURCE_OBJS) JPEGVideoSource.$(OBJ) AMRAudioSource.$(OBJ) AMRAudioFileSource.$(OBJ) InputFile.$(OBJ) StreamReplicator.$(OBJ) MISC_SINK_OBJS = MediaSink.$(OBJ) FileSink.$(OBJ) BasicUDPSink.$(OBJ) AMRAudioFileSink.$(OBJ) H264VideoFileSink.$(OBJ) $(MPEG_SINK_OBJS) $(H263_SINK_OBJS) $(H264_OR_5_SINK_OBJS) $(DV_SINK_OBJS) $(AC3_SINK_OBJS) VorbisAudioRTPSink.$(OBJ) TheoraVideoRTPSink.$(OBJ) VP8VideoRTPSink.$(OBJ) GSMAudioRTPSink.$(OBJ) JPEGVideoRTPSink.$(OBJ) SimpleRTPSink.$(OBJ) AMRAudioRTPSink.$(OBJ) T140TextRTPSink.$(OBJ) TCPStreamSink.$(OBJ) OutputFile.$(OBJ) MISC_FILTER_OBJS = uLawAudioFilter.$(OBJ) TRANSPORT_STREAM_TRICK_PLAY_OBJS = MPEG2IndexFromTransportStream.$(OBJ) MPEG2TransportStreamIndexFile.$(OBJ) MPEG2TransportStreamTrickModeFilter.$(OBJ) RTP_SOURCE_OBJS = RTPSource.$(OBJ) MultiFramedRTPSource.$(OBJ) SimpleRTPSource.$(OBJ) H261VideoRTPSource.$(OBJ) H264VideoRTPSource.$(OBJ) QCELPAudioRTPSource.$(OBJ) AMRAudioRTPSource.$(OBJ) JPEGVideoRTPSource.$(OBJ) VorbisAudioRTPSource.$(OBJ) VP8VideoRTPSource.$(OBJ) RTP_SINK_OBJS = RTPSink.$(OBJ) MultiFramedRTPSink.$(OBJ) AudioRTPSink.$(OBJ) VideoRTPSink.$(OBJ) TextRTPSink.$(OBJ) RTP_INTERFACE_OBJS = RTPInterface.$(OBJ) RTP_OBJS = $(RTP_SOURCE_OBJS) $(RTP_SINK_OBJS) $(RTP_INTERFACE_OBJS) RTCP_OBJS = RTCP.$(OBJ) rtcp_from_spec.$(OBJ) RTSP_OBJS = RTSPServer.$(OBJ) RTSPClient.$(OBJ) RTSPCommon.$(OBJ) RTSPServerSupportingHTTPStreaming.$(OBJ) RTSPRegisterSender.$(OBJ) SIP_OBJS = SIPClient.$(OBJ) SESSION_OBJS = MediaSession.$(OBJ) ServerMediaSession.$(OBJ) PassiveServerMediaSubsession.$(OBJ) OnDemandServerMediaSubsession.$(OBJ) FileServerMediaSubsession.$(OBJ) MPEG4VideoFileServerMediaSubsession.$(OBJ) H264VideoFileServerMediaSubsession.$(OBJ) H265VideoFileServerMediaSubsession.$(OBJ) H263plusVideoFileServerMediaSubsession.$(OBJ) WAVAudioFileServerMediaSubsession.$(OBJ) AMRAudioFileServerMediaSubsession.$(OBJ) MP3AudioFileServerMediaSubsession.$(OBJ) MPEG1or2VideoFileServerMediaSubsession.$(OBJ) MPEG1or2FileServerDemux.$(OBJ) MPEG1or2DemuxedServerMediaSubsession.$(OBJ) MPEG2TransportFileServerMediaSubsession.$(OBJ) ADTSAudioFileServerMediaSubsession.$(OBJ) DVVideoFileServerMediaSubsession.$(OBJ) AC3AudioFileServerMediaSubsession.$(OBJ) MPEG2TransportUDPServerMediaSubsession.$(OBJ) ProxyServerMediaSession.$(OBJ) QUICKTIME_OBJS = QuickTimeFileSink.$(OBJ) QuickTimeGenericRTPSource.$(OBJ) AVI_OBJS = AVIFileSink.$(OBJ) MATROSKA_FILE_OBJS = MatroskaFile.$(OBJ) MatroskaFileParser.$(OBJ) EBMLNumber.$(OBJ) MatroskaDemuxedTrack.$(OBJ) MATROSKA_SERVER_MEDIA_SUBSESSION_VIDEO_OBJS = H264VideoMatroskaFileServerMediaSubsession.$(OBJ) H265VideoMatroskaFileServerMediaSubsession.$(OBJ) VP8VideoMatroskaFileServerMediaSubsession.$(OBJ) MATROSKA_SERVER_MEDIA_SUBSESSION_AUDIO_OBJS = AACAudioMatroskaFileServerMediaSubsession.$(OBJ) AC3AudioMatroskaFileServerMediaSubsession.$(OBJ) MP3AudioMatroskaFileServerMediaSubsession.$(OBJ) VorbisAudioMatroskaFileServerMediaSubsession.$(OBJ) MATROSKA_SERVER_MEDIA_SUBSESSION_TEXT_OBJS = T140TextMatroskaFileServerMediaSubsession.$(OBJ) MATROSKA_SERVER_MEDIA_SUBSESSION_OBJS = $(MATROSKA_SERVER_MEDIA_SUBSESSION_VIDEO_OBJS) $(MATROSKA_SERVER_MEDIA_SUBSESSION_AUDIO_OBJS) $(MATROSKA_SERVER_MEDIA_SUBSESSION_TEXT_OBJS) MATROSKA_RTSP_SERVER_OBJS = MatroskaFileServerDemux.$(OBJ) $(MATROSKA_SERVER_MEDIA_SUBSESSION_OBJS) MATROSKA_OBJS = $(MATROSKA_FILE_OBJS) $(MATROSKA_RTSP_SERVER_OBJS) MISC_OBJS = DarwinInjector.$(OBJ) BitVector.$(OBJ) StreamParser.$(OBJ) DigestAuthentication.$(OBJ) ourMD5.$(OBJ) Base64.$(OBJ) Locale.$(OBJ) LIVEMEDIA_LIB_OBJS = Media.$(OBJ) $(MISC_SOURCE_OBJS) $(MISC_SINK_OBJS) $(MISC_FILTER_OBJS) $(RTP_OBJS) $(RTCP_OBJS) $(RTSP_OBJS) $(SIP_OBJS) $(SESSION_OBJS) $(QUICKTIME_OBJS) $(AVI_OBJS) $(TRANSPORT_STREAM_TRICK_PLAY_OBJS) $(MATROSKA_OBJS) $(MISC_OBJS) $(LIVEMEDIA_LIB): $(LIVEMEDIA_LIB_OBJS) \ $(PLATFORM_SPECIFIC_LIB_OBJS) $(LIBRARY_LINK)$@ $(LIBRARY_LINK_OPTS) \ $(LIVEMEDIA_LIB_OBJS) Media.$(CPP): include/Media.hh include/Media.hh: include/liveMedia_version.hh MediaSource.$(CPP): include/MediaSource.hh include/MediaSource.hh: include/Media.hh FramedSource.$(CPP): include/FramedSource.hh include/FramedSource.hh: include/MediaSource.hh FramedFileSource.$(CPP): include/FramedFileSource.hh include/FramedFileSource.hh: include/FramedSource.hh FramedFilter.$(CPP): include/FramedFilter.hh include/FramedFilter.hh: include/FramedSource.hh RTPSource.$(CPP): include/RTPSource.hh include/RTPSource.hh: include/FramedSource.hh include/RTPInterface.hh include/RTPInterface.hh: include/Media.hh MultiFramedRTPSource.$(CPP): include/MultiFramedRTPSource.hh include/MultiFramedRTPSource.hh: include/RTPSource.hh SimpleRTPSource.$(CPP): include/SimpleRTPSource.hh include/SimpleRTPSource.hh: include/MultiFramedRTPSource.hh H261VideoRTPSource.$(CPP): include/H261VideoRTPSource.hh include/H261VideoRTPSource.hh: include/MultiFramedRTPSource.hh H264VideoRTPSource.$(CPP): include/H264VideoRTPSource.hh include/Base64.hh include/H264VideoRTPSource.hh: include/MultiFramedRTPSource.hh QCELPAudioRTPSource.$(CPP): include/QCELPAudioRTPSource.hh include/MultiFramedRTPSource.hh include/FramedFilter.hh include/QCELPAudioRTPSource.hh: include/RTPSource.hh AMRAudioRTPSource.$(CPP): include/AMRAudioRTPSource.hh include/MultiFramedRTPSource.hh include/AMRAudioRTPSource.hh: include/RTPSource.hh include/AMRAudioSource.hh JPEGVideoRTPSource.$(CPP): include/JPEGVideoRTPSource.hh include/JPEGVideoRTPSource.hh: include/MultiFramedRTPSource.hh VorbisAudioRTPSource.$(CPP): include/VorbisAudioRTPSource.hh include/VorbisAudioRTPSource.hh: include/MultiFramedRTPSource.hh VP8VideoRTPSource.$(CPP): include/VP8VideoRTPSource.hh include/VP8VideoRTPSource.hh: include/MultiFramedRTPSource.hh ByteStreamFileSource.$(CPP): include/ByteStreamFileSource.hh include/InputFile.hh include/ByteStreamFileSource.hh: include/FramedFileSource.hh ByteStreamMultiFileSource.$(CPP): include/ByteStreamMultiFileSource.hh include/ByteStreamMultiFileSource.hh: include/ByteStreamFileSource.hh ByteStreamMemoryBufferSource.$(CPP): include/ByteStreamMemoryBufferSource.hh include/ByteStreamMemoryBufferSource.hh: include/FramedSource.hh BasicUDPSource.$(CPP): include/BasicUDPSource.hh include/BasicUDPSource.hh: include/FramedSource.hh DeviceSource.$(CPP): include/DeviceSource.hh include/DeviceSource.hh: include/FramedSource.hh AudioInputDevice.$(CPP): include/AudioInputDevice.hh include/AudioInputDevice.hh: include/FramedSource.hh WAVAudioFileSource.$(CPP): include/WAVAudioFileSource.hh include/InputFile.hh include/WAVAudioFileSource.hh: include/AudioInputDevice.hh MPEG1or2Demux.$(CPP): include/MPEG1or2Demux.hh include/MPEG1or2DemuxedElementaryStream.hh StreamParser.hh include/MPEG1or2Demux.hh: include/FramedSource.hh include/MPEG1or2DemuxedElementaryStream.hh: include/MPEG1or2Demux.hh StreamParser.hh: include/FramedSource.hh MPEG1or2DemuxedElementaryStream.$(CPP): include/MPEG1or2DemuxedElementaryStream.hh MPEGVideoStreamFramer.$(CPP): MPEGVideoStreamParser.hh MPEGVideoStreamParser.hh: StreamParser.hh include/MPEGVideoStreamFramer.hh include/MPEGVideoStreamFramer.hh: include/FramedFilter.hh MPEG1or2VideoStreamFramer.$(CPP): include/MPEG1or2VideoStreamFramer.hh MPEGVideoStreamParser.hh include/MPEG1or2VideoStreamFramer.hh: include/MPEGVideoStreamFramer.hh MPEG1or2VideoStreamDiscreteFramer.$(CPP): include/MPEG1or2VideoStreamDiscreteFramer.hh include/MPEG1or2VideoStreamDiscreteFramer.hh: include/MPEG1or2VideoStreamFramer.hh MPEG4VideoStreamFramer.$(CPP): include/MPEG4VideoStreamFramer.hh MPEGVideoStreamParser.hh include/MPEG4LATMAudioRTPSource.hh include/MPEG4VideoStreamFramer.hh: include/MPEGVideoStreamFramer.hh MPEG4VideoStreamDiscreteFramer.$(CPP): include/MPEG4VideoStreamDiscreteFramer.hh include/MPEG4VideoStreamDiscreteFramer.hh: include/MPEG4VideoStreamFramer.hh H264or5VideoStreamFramer.$(CPP): include/H264or5VideoStreamFramer.hh MPEGVideoStreamParser.hh include/BitVector.hh include/H264or5VideoStreamFramer.hh: include/MPEGVideoStreamFramer.hh H264or5VideoStreamDiscreteFramer.$(CPP): include/H264or5VideoStreamDiscreteFramer.hh include/H264or5VideoStreamDiscreteFramer.hh: include/H264or5VideoStreamFramer.hh H264VideoStreamFramer.$(CPP): include/H264VideoStreamFramer.hh include/H264VideoStreamFramer.hh: include/H264or5VideoStreamFramer.hh H264VideoStreamDiscreteFramer.$(CPP): include/H264VideoStreamDiscreteFramer.hh include/H264VideoStreamDiscreteFramer.hh: include/H264VideoStreamFramer.hh H265VideoStreamFramer.$(CPP): include/H265VideoStreamFramer.hh include/H265VideoStreamFramer.hh: include/H264or5VideoStreamFramer.hh H265VideoStreamDiscreteFramer.$(CPP): include/H265VideoStreamDiscreteFramer.hh include/H265VideoStreamDiscreteFramer.hh: include/H265VideoStreamFramer.hh MPEGVideoStreamParser.$(CPP): MPEGVideoStreamParser.hh MPEG1or2AudioStreamFramer.$(CPP): include/MPEG1or2AudioStreamFramer.hh StreamParser.hh MP3Internals.hh include/MPEG1or2AudioStreamFramer.hh: include/FramedFilter.hh MPEG1or2AudioRTPSource.$(CPP): include/MPEG1or2AudioRTPSource.hh include/MPEG1or2AudioRTPSource.hh: include/MultiFramedRTPSource.hh MPEG4LATMAudioRTPSource.$(CPP): include/MPEG4LATMAudioRTPSource.hh include/MPEG4LATMAudioRTPSource.hh: include/MultiFramedRTPSource.hh MPEG4ESVideoRTPSource.$(CPP): include/MPEG4ESVideoRTPSource.hh include/MPEG4ESVideoRTPSource.hh: include/MultiFramedRTPSource.hh MPEG4GenericRTPSource.$(CPP): include/MPEG4GenericRTPSource.hh include/BitVector.hh include/MPEG4LATMAudioRTPSource.hh include/MPEG4GenericRTPSource.hh: include/MultiFramedRTPSource.hh MP3FileSource.$(CPP): include/MP3FileSource.hh MP3StreamState.hh include/InputFile.hh include/MP3FileSource.hh: include/FramedFileSource.hh MP3StreamState.hh: MP3Internals.hh MP3Internals.hh: include/BitVector.hh MP3Transcoder.$(CPP): include/MP3ADU.hh include/MP3Transcoder.hh include/MP3ADU.hh: include/FramedFilter.hh include/MP3Transcoder.hh: include/MP3ADU.hh include/MP3ADUTranscoder.hh include/MP3ADUTranscoder.hh: include/FramedFilter.hh MP3ADU.$(CPP): include/MP3ADU.hh MP3ADUdescriptor.hh MP3Internals.hh MP3ADUdescriptor.$(CPP): MP3ADUdescriptor.hh MP3ADUinterleaving.$(CPP): include/MP3ADUinterleaving.hh MP3ADUdescriptor.hh include/MP3ADUinterleaving.hh: include/FramedFilter.hh MP3ADUTranscoder.$(CPP): include/MP3ADUTranscoder.hh MP3Internals.hh MP3StreamState.$(CPP): MP3StreamState.hh include/InputFile.hh MP3Internals.$(CPP): MP3InternalsHuffman.hh MP3InternalsHuffman.hh: MP3Internals.hh MP3InternalsHuffman.$(CPP): MP3InternalsHuffman.hh MP3InternalsHuffmanTable.$(CPP): MP3InternalsHuffman.hh MP3ADURTPSource.$(CPP): include/MP3ADURTPSource.hh MP3ADUdescriptor.hh include/MP3ADURTPSource.hh: include/MultiFramedRTPSource.hh MPEG1or2VideoRTPSource.$(CPP): include/MPEG1or2VideoRTPSource.hh include/MPEG1or2VideoRTPSource.hh: include/MultiFramedRTPSource.hh MPEG2TransportStreamMultiplexor.$(CPP): include/MPEG2TransportStreamMultiplexor.hh include/MPEG2TransportStreamMultiplexor.hh: include/FramedSource.hh include/MPEG1or2Demux.hh MPEG2TransportStreamFromPESSource.$(CPP): include/MPEG2TransportStreamFromPESSource.hh include/MPEG2TransportStreamFromPESSource.hh: include/MPEG2TransportStreamMultiplexor.hh include/MPEG1or2DemuxedElementaryStream.hh MPEG2TransportStreamFromESSource.$(CPP): include/MPEG2TransportStreamFromESSource.hh include/MPEG2TransportStreamFromESSource.hh: include/MPEG2TransportStreamMultiplexor.hh MPEG2TransportStreamFramer.$(CPP): include/MPEG2TransportStreamFramer.hh include/MPEG2TransportStreamFramer.hh: include/FramedFilter.hh include/MPEG2TransportStreamIndexFile.hh ADTSAudioFileSource.$(CPP): include/ADTSAudioFileSource.hh include/InputFile.hh include/ADTSAudioFileSource.hh: include/FramedFileSource.hh H263plusVideoRTPSource.$(CPP): include/H263plusVideoRTPSource.hh include/H263plusVideoRTPSource.hh: include/MultiFramedRTPSource.hh H263plusVideoStreamFramer.$(CPP): include/H263plusVideoStreamFramer.hh H263plusVideoStreamParser.hh include/H263plusVideoStreamFramer.hh: include/FramedFilter.hh H263plusVideoStreamParser.hh: StreamParser.hh H263plusVideoStreamParser.$(CPP): H263plusVideoStreamParser.hh include/H263plusVideoStreamFramer.hh AC3AudioStreamFramer.$(CPP): include/AC3AudioStreamFramer.hh StreamParser.hh include/AC3AudioStreamFramer.hh: include/FramedFilter.hh AC3AudioRTPSource.$(CPP): include/AC3AudioRTPSource.hh include/AC3AudioRTPSource.hh: include/MultiFramedRTPSource.hh DVVideoRTPSource.$(CPP): include/DVVideoRTPSource.hh include/DVVideoRTPSource.hh: include/MultiFramedRTPSource.hh JPEGVideoSource.$(CPP): include/JPEGVideoSource.hh include/JPEGVideoSource.hh: include/FramedSource.hh AMRAudioSource.$(CPP): include/AMRAudioSource.hh include/AMRAudioSource.hh: include/FramedSource.hh AMRAudioFileSource.$(CPP): include/AMRAudioFileSource.hh include/InputFile.hh include/AMRAudioFileSource.hh: include/AMRAudioSource.hh InputFile.$(CPP): include/InputFile.hh StreamReplicator.$(CPP): include/StreamReplicator.hh include/StreamReplicator.hh: include/FramedSource.hh MediaSink.$(CPP): include/MediaSink.hh include/MediaSink.hh: include/FramedSource.hh FileSink.$(CPP): include/FileSink.hh include/OutputFile.hh include/FileSink.hh: include/MediaSink.hh BasicUDPSink.$(CPP): include/BasicUDPSink.hh include/BasicUDPSink.hh: include/MediaSink.hh AMRAudioFileSink.$(CPP): include/AMRAudioFileSink.hh include/AMRAudioSource.hh include/OutputFile.hh include/AMRAudioFileSink.hh: include/FileSink.hh H264VideoFileSink.$(CPP): include/H264VideoFileSink.hh include/OutputFile.hh include/H264VideoRTPSource.hh include/H264VideoFileSink.hh: include/FileSink.hh RTPSink.$(CPP): include/RTPSink.hh include/RTPSink.hh: include/MediaSink.hh include/RTPInterface.hh MultiFramedRTPSink.$(CPP): include/MultiFramedRTPSink.hh include/MultiFramedRTPSink.hh: include/RTPSink.hh AudioRTPSink.$(CPP): include/AudioRTPSink.hh include/AudioRTPSink.hh: include/MultiFramedRTPSink.hh VideoRTPSink.$(CPP): include/VideoRTPSink.hh include/VideoRTPSink.hh: include/MultiFramedRTPSink.hh TextRTPSink.$(CPP): include/TextRTPSink.hh include/TextRTPSink.hh: include/MultiFramedRTPSink.hh RTPInterface.$(CPP): include/RTPInterface.hh MPEG1or2AudioRTPSink.$(CPP): include/MPEG1or2AudioRTPSink.hh include/MPEG1or2AudioRTPSink.hh: include/AudioRTPSink.hh MP3ADURTPSink.$(CPP): include/MP3ADURTPSink.hh include/MP3ADURTPSink.hh: include/AudioRTPSink.hh MPEG1or2VideoRTPSink.$(CPP): include/MPEG1or2VideoRTPSink.hh include/MPEG1or2VideoStreamFramer.hh include/MPEG1or2VideoRTPSink.hh: include/VideoRTPSink.hh MPEG4LATMAudioRTPSink.$(CPP): include/MPEG4LATMAudioRTPSink.hh include/MPEG4LATMAudioRTPSink.hh: include/AudioRTPSink.hh MPEG4GenericRTPSink.$(CPP): include/MPEG4GenericRTPSink.hh include/Locale.hh include/MPEG4GenericRTPSink.hh: include/MultiFramedRTPSink.hh MPEG4ESVideoRTPSink.$(CPP): include/MPEG4ESVideoRTPSink.hh include/MPEG4VideoStreamFramer.hh include/MPEG4LATMAudioRTPSource.hh include/MPEG4ESVideoRTPSink.hh: include/VideoRTPSink.hh H263plusVideoRTPSink.$(CPP): include/H263plusVideoRTPSink.hh include/H263plusVideoRTPSink.hh: include/VideoRTPSink.hh H264or5VideoRTPSink.$(CPP): include/H264or5VideoRTPSink.hh include/H264or5VideoStreamFramer.hh include/H264or5VideoRTPSink.hh: include/VideoRTPSink.hh include/FramedFilter.hh H264VideoRTPSink.$(CPP): include/H264VideoRTPSink.hh include/H264VideoStreamFramer.hh include/Base64.hh include/H264VideoRTPSource.hh include/H264VideoRTPSink.hh: include/H264or5VideoRTPSink.hh H265VideoRTPSink.$(CPP): include/H265VideoRTPSink.hh include/H265VideoStreamFramer.hh include/Base64.hh include/BitVector.hh include/H264VideoRTPSource.hh include/H265VideoRTPSink.hh: include/H264or5VideoRTPSink.hh DVVideoRTPSink.$(CPP): include/DVVideoRTPSink.hh include/DVVideoRTPSink.hh: include/VideoRTPSink.hh include/DVVideoStreamFramer.hh include/DVVideoStreamFramer.hh: include/FramedFilter.hh AC3AudioRTPSink.$(CPP): include/AC3AudioRTPSink.hh include/AC3AudioRTPSink.hh: include/AudioRTPSink.hh VorbisAudioRTPSink.$(CPP): include/VorbisAudioRTPSink.hh include/Base64.hh include/VorbisAudioRTPSink.hh: include/AudioRTPSink.hh TheoraVideoRTPSink.$(CPP): include/TheoraVideoRTPSink.hh include/Base64.hh include/TheoraVideoRTPSink.hh: include/VideoRTPSink.hh VP8VideoRTPSink.$(CPP): include/VP8VideoRTPSink.hh include/VP8VideoRTPSink.hh: include/VideoRTPSink.hh GSMAudioRTPSink.$(CPP): include/GSMAudioRTPSink.hh include/GSMAudioRTPSink.hh: include/AudioRTPSink.hh JPEGVideoRTPSink.$(CPP): include/JPEGVideoRTPSink.hh include/JPEGVideoSource.hh include/JPEGVideoRTPSink.hh: include/VideoRTPSink.hh SimpleRTPSink.$(CPP): include/SimpleRTPSink.hh include/SimpleRTPSink.hh: include/MultiFramedRTPSink.hh AMRAudioRTPSink.$(CPP): include/AMRAudioRTPSink.hh include/AMRAudioSource.hh include/AMRAudioRTPSink.hh: include/AudioRTPSink.hh T140TextRTPSink.$(CPP): include/T140TextRTPSink.hh include/T140TextRTPSink.hh: include/TextRTPSink.hh include/FramedFilter.hh TCPStreamSink.$(CPP): include/TCPStreamSink.hh include/TCPStreamSink.hh: include/MediaSink.hh OutputFile.$(CPP): include/OutputFile.hh uLawAudioFilter.$(CPP): include/uLawAudioFilter.hh include/uLawAudioFilter.hh: include/FramedFilter.hh MPEG2IndexFromTransportStream.$(CPP): include/MPEG2IndexFromTransportStream.hh include/MPEG2IndexFromTransportStream.hh: include/FramedFilter.hh MPEG2TransportStreamIndexFile.$(CPP): include/MPEG2TransportStreamIndexFile.hh include/InputFile.hh include/MPEG2TransportStreamIndexFile.hh: include/Media.hh MPEG2TransportStreamTrickModeFilter.$(CPP): include/MPEG2TransportStreamTrickModeFilter.hh include/ByteStreamFileSource.hh include/MPEG2TransportStreamTrickModeFilter.hh: include/FramedFilter.hh include/MPEG2TransportStreamIndexFile.hh RTCP.$(CPP): include/RTCP.hh rtcp_from_spec.h include/RTCP.hh: include/RTPSink.hh include/RTPSource.hh rtcp_from_spec.$(C): rtcp_from_spec.h RTSPServer.$(CPP): include/RTSPServer.hh include/RTSPCommon.hh include/RTSPRegisterSender.hh include/ProxyServerMediaSession.hh include/Base64.hh include/RTSPServer.hh: include/ServerMediaSession.hh include/DigestAuthentication.hh include/RTSPCommon.hh include/ServerMediaSession.hh: include/Media.hh include/FramedSource.hh include/RTPInterface.hh RTSPClient.$(CPP): include/RTSPClient.hh include/RTSPCommon.hh include/Base64.hh include/Locale.hh ourMD5.hh include/RTSPClient.hh: include/MediaSession.hh include/DigestAuthentication.hh RTSPCommon.$(CPP): include/RTSPCommon.hh include/Locale.hh RTSPServerSupportingHTTPStreaming.$(CPP): include/RTSPServerSupportingHTTPStreaming.hh include/RTSPCommon.hh include/RTSPServerSupportingHTTPStreaming.hh: include/RTSPServer.hh include/ByteStreamMemoryBufferSource.hh include/TCPStreamSink.hh RTSPRegisterSender.$(CPP): include/RTSPRegisterSender.hh include/RTSPRegisterSender.hh: include/RTSPClient.hh SIPClient.$(CPP): include/SIPClient.hh include/SIPClient.hh: include/MediaSession.hh include/DigestAuthentication.hh MediaSession.$(CPP): include/liveMedia.hh include/Locale.hh include/MediaSession.hh: include/RTCP.hh include/FramedFilter.hh ServerMediaSession.$(CPP): include/ServerMediaSession.hh PassiveServerMediaSubsession.$(CPP): include/PassiveServerMediaSubsession.hh include/PassiveServerMediaSubsession.hh: include/ServerMediaSession.hh include/RTPSink.hh include/RTCP.hh OnDemandServerMediaSubsession.$(CPP): include/OnDemandServerMediaSubsession.hh include/OnDemandServerMediaSubsession.hh: include/ServerMediaSession.hh include/RTPSink.hh include/BasicUDPSink.hh include/RTCP.hh FileServerMediaSubsession.$(CPP): include/FileServerMediaSubsession.hh include/FileServerMediaSubsession.hh: include/OnDemandServerMediaSubsession.hh MPEG4VideoFileServerMediaSubsession.$(CPP): include/MPEG4VideoFileServerMediaSubsession.hh include/MPEG4ESVideoRTPSink.hh include/ByteStreamFileSource.hh include/MPEG4VideoStreamFramer.hh include/MPEG4VideoFileServerMediaSubsession.hh: include/FileServerMediaSubsession.hh H264VideoFileServerMediaSubsession.$(CPP): include/H264VideoFileServerMediaSubsession.hh include/H264VideoRTPSink.hh include/ByteStreamFileSource.hh include/H264VideoStreamFramer.hh include/H264VideoFileServerMediaSubsession.hh: include/FileServerMediaSubsession.hh H265VideoFileServerMediaSubsession.$(CPP): include/H265VideoFileServerMediaSubsession.hh include/H265VideoRTPSink.hh include/ByteStreamFileSource.hh include/H265VideoStreamFramer.hh include/H265VideoFileServerMediaSubsession.hh: include/FileServerMediaSubsession.hh H263plusVideoFileServerMediaSubsession.$(CPP): include/H263plusVideoFileServerMediaSubsession.hh include/H263plusVideoRTPSink.hh include/ByteStreamFileSource.hh include/H263plusVideoStreamFramer.hh include/H263plusVideoFileServerMediaSubsession.hh: include/FileServerMediaSubsession.hh WAVAudioFileServerMediaSubsession.$(CPP): include/WAVAudioFileServerMediaSubsession.hh include/WAVAudioFileSource.hh include/uLawAudioFilter.hh include/SimpleRTPSink.hh include/WAVAudioFileServerMediaSubsession.hh: include/FileServerMediaSubsession.hh AMRAudioFileServerMediaSubsession.$(CPP): include/AMRAudioFileServerMediaSubsession.hh include/AMRAudioRTPSink.hh include/AMRAudioFileSource.hh include/AMRAudioFileServerMediaSubsession.hh: include/FileServerMediaSubsession.hh MP3AudioFileServerMediaSubsession.$(CPP): include/MP3AudioFileServerMediaSubsession.hh include/MPEG1or2AudioRTPSink.hh include/MP3ADURTPSink.hh include/MP3FileSource.hh include/MP3ADU.hh include/MP3AudioFileServerMediaSubsession.hh: include/FileServerMediaSubsession.hh include/MP3ADUinterleaving.hh MPEG1or2VideoFileServerMediaSubsession.$(CPP): include/MPEG1or2VideoFileServerMediaSubsession.hh include/MPEG1or2VideoRTPSink.hh include/ByteStreamFileSource.hh include/MPEG1or2VideoStreamFramer.hh include/MPEG1or2VideoFileServerMediaSubsession.hh: include/FileServerMediaSubsession.hh MPEG1or2FileServerDemux.$(CPP): include/MPEG1or2FileServerDemux.hh include/MPEG1or2DemuxedServerMediaSubsession.hh include/ByteStreamFileSource.hh include/MPEG1or2FileServerDemux.hh: include/ServerMediaSession.hh include/MPEG1or2DemuxedElementaryStream.hh MPEG1or2DemuxedServerMediaSubsession.$(CPP): include/MPEG1or2DemuxedServerMediaSubsession.hh include/MPEG1or2AudioStreamFramer.hh include/MPEG1or2AudioRTPSink.hh include/MPEG1or2VideoStreamFramer.hh include/MPEG1or2VideoRTPSink.hh include/AC3AudioStreamFramer.hh include/AC3AudioRTPSink.hh include/ByteStreamFileSource.hh include/MPEG1or2DemuxedServerMediaSubsession.hh: include/OnDemandServerMediaSubsession.hh include/MPEG1or2FileServerDemux.hh MPEG2TransportFileServerMediaSubsession.$(CPP): include/MPEG2TransportFileServerMediaSubsession.hh include/SimpleRTPSink.hh include/MPEG2TransportFileServerMediaSubsession.hh: include/FileServerMediaSubsession.hh include/MPEG2TransportStreamFramer.hh include/ByteStreamFileSource.hh include/MPEG2TransportStreamTrickModeFilter.hh include/MPEG2TransportStreamFromESSource.hh ADTSAudioFileServerMediaSubsession.$(CPP): include/ADTSAudioFileServerMediaSubsession.hh include/ADTSAudioFileSource.hh include/MPEG4GenericRTPSink.hh include/ADTSAudioFileServerMediaSubsession.hh: include/FileServerMediaSubsession.hh DVVideoFileServerMediaSubsession.$(CPP): include/DVVideoFileServerMediaSubsession.hh include/DVVideoRTPSink.hh include/ByteStreamFileSource.hh include/DVVideoStreamFramer.hh include/DVVideoFileServerMediaSubsession.hh: include/FileServerMediaSubsession.hh AC3AudioFileServerMediaSubsession.$(CPP): include/AC3AudioFileServerMediaSubsession.hh include/AC3AudioRTPSink.hh include/ByteStreamFileSource.hh include/AC3AudioStreamFramer.hh include/AC3AudioFileServerMediaSubsession.hh: include/FileServerMediaSubsession.hh MPEG2TransportUDPServerMediaSubsession.$(CPP): include/MPEG2TransportUDPServerMediaSubsession.hh include/BasicUDPSource.hh include/SimpleRTPSource.hh include/MPEG2TransportStreamFramer.hh include/SimpleRTPSink.hh include/MPEG2TransportUDPServerMediaSubsession.hh: include/OnDemandServerMediaSubsession.hh ProxyServerMediaSession.$(CPP): include/liveMedia.hh include/RTSPCommon.hh include/ProxyServerMediaSession.hh: include/ServerMediaSession.hh include/MediaSession.hh include/RTSPClient.hh QuickTimeFileSink.$(CPP): include/QuickTimeFileSink.hh include/InputFile.hh include/OutputFile.hh include/QuickTimeGenericRTPSource.hh include/H263plusVideoRTPSource.hh include/MPEG4GenericRTPSource.hh include/MPEG4LATMAudioRTPSource.hh include/QuickTimeFileSink.hh: include/MediaSession.hh QuickTimeGenericRTPSource.$(CPP): include/QuickTimeGenericRTPSource.hh include/QuickTimeGenericRTPSource.hh: include/MultiFramedRTPSource.hh AVIFileSink.$(CPP): include/AVIFileSink.hh include/InputFile.hh include/OutputFile.hh include/AVIFileSink.hh: include/MediaSession.hh MatroskaFile.$(CPP): MatroskaFileParser.hh MatroskaDemuxedTrack.hh include/ByteStreamFileSource.hh MatroskaFileParser.hh: StreamParser.hh include/MatroskaFile.hh EBMLNumber.hh include/MatroskaFile.hh: include/Media.hh MatroskaDemuxedTrack.hh: include/FramedSource.hh MatroskaFileParser.$(CPP): MatroskaFileParser.hh MatroskaDemuxedTrack.hh include/ByteStreamFileSource.hh EBMLNumber.$(CPP): EBMLNumber.hh MatroskaDemuxedTrack.$(CPP): MatroskaDemuxedTrack.hh include/MatroskaFile.hh H264VideoMatroskaFileServerMediaSubsession.$(CPP): H264VideoMatroskaFileServerMediaSubsession.hh include/H264VideoStreamDiscreteFramer.hh H264VideoMatroskaFileServerMediaSubsession.hh: include/H264VideoFileServerMediaSubsession.hh include/MatroskaFileServerDemux.hh MatroskaDemuxedTrack.hh H265VideoMatroskaFileServerMediaSubsession.$(CPP): H265VideoMatroskaFileServerMediaSubsession.hh include/H265VideoStreamDiscreteFramer.hh H265VideoMatroskaFileServerMediaSubsession.hh: include/H265VideoFileServerMediaSubsession.hh include/MatroskaFileServerDemux.hh MatroskaDemuxedTrack.hh VP8VideoMatroskaFileServerMediaSubsession.$(CPP): VP8VideoMatroskaFileServerMediaSubsession.hh include/VP8VideoRTPSink.hh VP8VideoMatroskaFileServerMediaSubsession.hh: include/FileServerMediaSubsession.hh include/MatroskaFileServerDemux.hh MatroskaDemuxedTrack.hh AACAudioMatroskaFileServerMediaSubsession.$(CPP): AACAudioMatroskaFileServerMediaSubsession.hh include/MPEG4GenericRTPSink.hh AACAudioMatroskaFileServerMediaSubsession.hh: include/FileServerMediaSubsession.hh include/MatroskaFileServerDemux.hh MatroskaDemuxedTrack.hh AC3AudioMatroskaFileServerMediaSubsession.$(CPP): AC3AudioMatroskaFileServerMediaSubsession.hh include/AC3AudioRTPSink.hh AC3AudioMatroskaFileServerMediaSubsession.hh: include/FileServerMediaSubsession.hh include/MatroskaFileServerDemux.hh MatroskaDemuxedTrack.hh VorbisAudioMatroskaFileServerMediaSubsession.$(CPP): VorbisAudioMatroskaFileServerMediaSubsession.hh include/VorbisAudioRTPSink.hh VorbisAudioMatroskaFileServerMediaSubsession.hh: include/FileServerMediaSubsession.hh include/MatroskaFileServerDemux.hh MatroskaDemuxedTrack.hh MP3AudioMatroskaFileServerMediaSubsession.$(CPP): MP3AudioMatroskaFileServerMediaSubsession.hh MP3AudioMatroskaFileServerMediaSubsession.hh: include/MP3AudioFileServerMediaSubsession.hh include/MatroskaFileServerDemux.hh MatroskaDemuxedTrack.hh T140TextMatroskaFileServerMediaSubsession.$(CPP): T140TextMatroskaFileServerMediaSubsession.hh include/T140TextRTPSink.hh T140TextMatroskaFileServerMediaSubsession.hh: include/FileServerMediaSubsession.hh include/MatroskaFileServerDemux.hh MatroskaDemuxedTrack.hh MatroskaFileServerDemux.$(CPP): include/MatroskaFileServerDemux.hh H264VideoMatroskaFileServerMediaSubsession.hh H265VideoMatroskaFileServerMediaSubsession.hh VP8VideoMatroskaFileServerMediaSubsession.hh AACAudioMatroskaFileServerMediaSubsession.hh AC3AudioMatroskaFileServerMediaSubsession.hh VorbisAudioMatroskaFileServerMediaSubsession.hh MP3AudioMatroskaFileServerMediaSubsession.hh T140TextMatroskaFileServerMediaSubsession.hh include/MatroskaFileServerDemux.hh: include/ServerMediaSession.hh include/MatroskaFile.hh DarwinInjector.$(CPP): include/DarwinInjector.hh include/DarwinInjector.hh: include/RTSPClient.hh include/RTCP.hh BitVector.$(CPP): include/BitVector.hh StreamParser.$(CPP): StreamParser.hh DigestAuthentication.$(CPP): include/DigestAuthentication.hh ourMD5.hh ourMD5.$(CPP): ourMD5.hh Base64.$(CPP): include/Base64.hh Locale.$(CPP): include/Locale.hh include/liveMedia.hh:: include/MPEG1or2AudioRTPSink.hh include/MP3ADURTPSink.hh include/MPEG1or2VideoRTPSink.hh include/MPEG4ESVideoRTPSink.hh include/BasicUDPSink.hh include/AMRAudioFileSink.hh include/H264VideoFileSink.hh include/GSMAudioRTPSink.hh include/H263plusVideoRTPSink.hh include/H264VideoRTPSink.hh include/H265VideoRTPSink.hh include/DVVideoRTPSource.hh include/DVVideoRTPSink.hh include/DVVideoStreamFramer.hh include/H264VideoStreamFramer.hh include/H265VideoStreamFramer.hh include/H264VideoStreamDiscreteFramer.hh include/H265VideoStreamDiscreteFramer.hh include/JPEGVideoRTPSink.hh include/SimpleRTPSink.hh include/uLawAudioFilter.hh include/MPEG2IndexFromTransportStream.hh include/MPEG2TransportStreamTrickModeFilter.hh include/ByteStreamMultiFileSource.hh include/ByteStreamMemoryBufferSource.hh include/BasicUDPSource.hh include/SimpleRTPSource.hh include/MPEG1or2AudioRTPSource.hh include/MPEG4LATMAudioRTPSource.hh include/MPEG4LATMAudioRTPSink.hh include/MPEG4ESVideoRTPSource.hh include/MPEG4GenericRTPSource.hh include/MP3ADURTPSource.hh include/QCELPAudioRTPSource.hh include/AMRAudioRTPSource.hh include/JPEGVideoRTPSource.hh include/JPEGVideoSource.hh include/MPEG1or2VideoRTPSource.hh include/VorbisAudioRTPSource.hh include/VP8VideoRTPSource.hh include/liveMedia.hh:: include/MPEG2TransportStreamFromPESSource.hh include/MPEG2TransportStreamFromESSource.hh include/MPEG2TransportStreamFramer.hh include/ADTSAudioFileSource.hh include/H261VideoRTPSource.hh include/H263plusVideoRTPSource.hh include/H264VideoRTPSource.hh include/MP3FileSource.hh include/MP3ADU.hh include/MP3ADUinterleaving.hh include/MP3Transcoder.hh include/MPEG1or2DemuxedElementaryStream.hh include/MPEG1or2AudioStreamFramer.hh include/MPEG1or2VideoStreamDiscreteFramer.hh include/MPEG4VideoStreamDiscreteFramer.hh include/H263plusVideoStreamFramer.hh include/AC3AudioStreamFramer.hh include/AC3AudioRTPSource.hh include/AC3AudioRTPSink.hh include/VorbisAudioRTPSink.hh include/TheoraVideoRTPSink.hh include/VP8VideoRTPSink.hh include/MPEG4GenericRTPSink.hh include/DeviceSource.hh include/AudioInputDevice.hh include/WAVAudioFileSource.hh include/StreamReplicator.hh include/RTSPRegisterSender.hh include/liveMedia.hh:: include/RTSPServerSupportingHTTPStreaming.hh include/RTSPClient.hh include/SIPClient.hh include/QuickTimeFileSink.hh include/QuickTimeGenericRTPSource.hh include/AVIFileSink.hh include/PassiveServerMediaSubsession.hh include/MPEG4VideoFileServerMediaSubsession.hh include/H264VideoFileServerMediaSubsession.hh include/H265VideoFileServerMediaSubsession.hh include/WAVAudioFileServerMediaSubsession.hh include/AMRAudioFileServerMediaSubsession.hh include/AMRAudioFileSource.hh include/AMRAudioRTPSink.hh include/T140TextRTPSink.hh include/TCPStreamSink.hh include/MP3AudioFileServerMediaSubsession.hh include/MPEG1or2VideoFileServerMediaSubsession.hh include/MPEG1or2FileServerDemux.hh include/MPEG2TransportFileServerMediaSubsession.hh include/H263plusVideoFileServerMediaSubsession.hh include/ADTSAudioFileServerMediaSubsession.hh include/DVVideoFileServerMediaSubsession.hh include/AC3AudioFileServerMediaSubsession.hh include/MPEG2TransportUDPServerMediaSubsession.hh include/MatroskaFileServerDemux.hh include/ProxyServerMediaSession.hh include/DarwinInjector.hh clean: -rm -rf *.$(OBJ) $(ALL) core *.core *~ include/*~ install: install1 $(INSTALL2) install1: $(LIVEMEDIA_LIB) install -d $(DESTDIR)$(PREFIX)/include/liveMedia $(DESTDIR)$(LIBDIR) install -m 644 include/*.hh $(DESTDIR)$(PREFIX)/include/liveMedia install -m 644 $(LIVEMEDIA_LIB) $(DESTDIR)$(LIBDIR) install_shared_libraries: $(LIVEMEDIA_LIB) ln -s $(NAME).$(LIB_SUFFIX) $(DESTDIR)$(LIBDIR)/$(NAME).$(SHORT_LIB_SUFFIX) ln -s $(NAME).$(LIB_SUFFIX) $(DESTDIR)$(LIBDIR)/$(NAME).so ##### Any additional, platform-specific rules come here: live/liveMedia/TextRTPSink.cpp000444 001751 000000 00000002506 12265042432 016504 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A generic RTP sink for text codecs (abstract base class) // Implementation #include "TextRTPSink.hh" TextRTPSink::TextRTPSink(UsageEnvironment& env, Groupsock* rtpgs, unsigned char rtpPayloadType, unsigned rtpTimestampFrequency, char const* rtpPayloadFormatName) : MultiFramedRTPSink(env, rtpgs, rtpPayloadType, rtpTimestampFrequency, rtpPayloadFormatName) { } TextRTPSink::~TextRTPSink() { } char const* TextRTPSink::sdpMediaType() const { return "text"; } live/liveMedia/AC3AudioMatroskaFileServerMediaSubsession.hh000444 001751 000000 00000004270 12265042432 024221 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A 'ServerMediaSubsession' object that creates new, unicast, "RTPSink"s // on demand, from an AC3 audio track within a Matroska file. // C++ header #ifndef _AC3_AUDIO_MATROSKA_FILE_SERVER_MEDIA_SUBSESSION_HH #define _AC3_AUDIO_MATROSKA_FILE_SERVER_MEDIA_SUBSESSION_HH #ifndef _FILE_SERVER_MEDIA_SUBSESSION_HH #include "FileServerMediaSubsession.hh" #endif #ifndef _MATROSKA_FILE_SERVER_DEMUX_HH #include "MatroskaFileServerDemux.hh" #endif class AC3AudioMatroskaFileServerMediaSubsession: public FileServerMediaSubsession { public: static AC3AudioMatroskaFileServerMediaSubsession* createNew(MatroskaFileServerDemux& demux, unsigned trackNumber); private: AC3AudioMatroskaFileServerMediaSubsession(MatroskaFileServerDemux& demux, unsigned trackNumber); // called only by createNew(); virtual ~AC3AudioMatroskaFileServerMediaSubsession(); private: // redefined virtual functions virtual float duration() const; virtual void seekStreamSource(FramedSource* inputSource, double& seekNPT, double streamDuration, u_int64_t& numBytes); virtual FramedSource* createNewStreamSource(unsigned clientSessionId, unsigned& estBitrate); virtual RTPSink* createNewRTPSink(Groupsock* rtpGroupsock, unsigned char rtpPayloadTypeIfDynamic, FramedSource* inputSource); private: MatroskaFileServerDemux& fOurDemux; unsigned fTrackNumber; }; #endif live/liveMedia/MatroskaDemuxedTrack.cpp000444 001751 000000 00000003376 12265042432 020435 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A media track, demultiplexed from a Matroska file // Implementation #include "MatroskaDemuxedTrack.hh" #include "MatroskaFile.hh" void MatroskaDemuxedTrack::seekToTime(double& seekNPT) { fOurSourceDemux.seekToTime(seekNPT); } MatroskaDemuxedTrack::MatroskaDemuxedTrack(UsageEnvironment& env, unsigned trackNumber, MatroskaDemux& sourceDemux) : FramedSource(env), fOurTrackNumber(trackNumber), fOurSourceDemux(sourceDemux), fDurationImbalance(0) { fPrevPresentationTime.tv_sec = 0; fPrevPresentationTime.tv_usec = 0; } MatroskaDemuxedTrack::~MatroskaDemuxedTrack() { fOurSourceDemux.removeTrack(fOurTrackNumber); } void MatroskaDemuxedTrack::doGetNextFrame() { fOurSourceDemux.continueReading(); } char const* MatroskaDemuxedTrack::MIMEtype() const { MatroskaTrack* track = fOurSourceDemux.fOurFile.lookup(fOurTrackNumber); if (track == NULL) return NULL; // shouldn't happen return track->mimeType; } live/liveMedia/H264VideoMatroskaFileServerMediaSubsession.cpp000444 001751 000000 00000011437 12265042432 024471 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A 'ServerMediaSubsession' object that creates new, unicast, "RTPSink"s // on demand, from an H264 video track within a Matroska file. // Implementation #include "H264VideoMatroskaFileServerMediaSubsession.hh" #include "H264VideoStreamDiscreteFramer.hh" #include "MatroskaDemuxedTrack.hh" H264VideoMatroskaFileServerMediaSubsession* H264VideoMatroskaFileServerMediaSubsession ::createNew(MatroskaFileServerDemux& demux, unsigned trackNumber) { return new H264VideoMatroskaFileServerMediaSubsession(demux, trackNumber); } #define CHECK_PTR if (ptr >= limit) return #define NUM_BYTES_REMAINING (unsigned)(limit - ptr) H264VideoMatroskaFileServerMediaSubsession ::H264VideoMatroskaFileServerMediaSubsession(MatroskaFileServerDemux& demux, unsigned trackNumber) : H264VideoFileServerMediaSubsession(demux.envir(), demux.fileName(), False), fOurDemux(demux), fTrackNumber(trackNumber), fSPSSize(0), fSPS(NULL), fPPSSize(0), fPPS(NULL) { // Use our track's 'Codec Private' data: Bytes 5 and beyond contain SPS and PPSs: unsigned numSPSandPPSBytes; u_int8_t* SPSandPPSBytes; MatroskaTrack* track = fOurDemux.lookup(fTrackNumber); if (track->codecPrivateSize >= 6) { numSPSandPPSBytes = track->codecPrivateSize - 5; SPSandPPSBytes = &track->codecPrivate[5]; } else { numSPSandPPSBytes = 0; SPSandPPSBytes = NULL; } // Extract, from "SPSandPPSBytes", one SPS NAL unit, and one PPS NAL unit. // (I hope one is all we need of each.) if (numSPSandPPSBytes == 0 || SPSandPPSBytes == NULL) return; // sanity check unsigned i; u_int8_t* ptr = SPSandPPSBytes; u_int8_t* limit = &SPSandPPSBytes[numSPSandPPSBytes]; unsigned numSPSs = (*ptr++)&0x1F; CHECK_PTR; for (i = 0; i < numSPSs; ++i) { unsigned spsSize = (*ptr++)<<8; CHECK_PTR; spsSize |= *ptr++; CHECK_PTR; if (spsSize > NUM_BYTES_REMAINING) return; u_int8_t nal_unit_type = ptr[0]&0x1F; if (fSPS == NULL && nal_unit_type == 7/*sanity check*/) { // save the first one fSPSSize = spsSize; fSPS = new u_int8_t[spsSize]; memmove(fSPS, ptr, spsSize); } ptr += spsSize; } unsigned numPPSs = (*ptr++)&0x1F; CHECK_PTR; for (i = 0; i < numPPSs; ++i) { unsigned ppsSize = (*ptr++)<<8; CHECK_PTR; ppsSize |= *ptr++; CHECK_PTR; if (ppsSize > NUM_BYTES_REMAINING) return; u_int8_t nal_unit_type = ptr[0]&0x1F; if (fPPS == NULL && nal_unit_type == 8/*sanity check*/) { // save the first one fPPSSize = ppsSize; fPPS = new u_int8_t[ppsSize]; memmove(fPPS, ptr, ppsSize); } ptr += ppsSize; } } H264VideoMatroskaFileServerMediaSubsession ::~H264VideoMatroskaFileServerMediaSubsession() { delete[] fSPS; delete[] fPPS; } float H264VideoMatroskaFileServerMediaSubsession::duration() const { return fOurDemux.fileDuration(); } void H264VideoMatroskaFileServerMediaSubsession ::seekStreamSource(FramedSource* inputSource, double& seekNPT, double /*streamDuration*/, u_int64_t& /*numBytes*/) { // "inputSource" is a framer. *Its* source is the demuxed track that we seek on: H264VideoStreamDiscreteFramer* framer = (H264VideoStreamDiscreteFramer*)inputSource; MatroskaDemuxedTrack* demuxedTrack = (MatroskaDemuxedTrack*)(framer->inputSource()); demuxedTrack->seekToTime(seekNPT); } FramedSource* H264VideoMatroskaFileServerMediaSubsession ::createNewStreamSource(unsigned clientSessionId, unsigned& estBitrate) { // Allow for the possibility of very large NAL units being fed to our "RTPSink" objects: OutPacketBuffer::maxSize = 300000; // bytes estBitrate = 500; // kbps, estimate // Create the video source: FramedSource* baseH264VideoSource = fOurDemux.newDemuxedTrack(clientSessionId, fTrackNumber); if (baseH264VideoSource == NULL) return NULL; // Create a framer for the Video stream: H264VideoStreamDiscreteFramer* framer = H264VideoStreamDiscreteFramer::createNew(envir(), baseH264VideoSource); framer->setVPSandSPSandPPS(NULL, 0, fSPS, fSPSSize, fPPS, fPPSSize); return framer; } live/liveMedia/EBMLNumber.cpp000444 001751 000000 00000014411 12265042432 016233 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // EBML numbers (ids and sizes) // Implementation #include "EBMLNumber.hh" EBMLNumber::EBMLNumber(Boolean stripLeading1) : stripLeading1(stripLeading1), len(0) { } EBMLNumber::~EBMLNumber() { } char* EBMLNumber::hexString() const { static char printBuf[2*EBML_NUMBER_MAX_LEN + 1]; char* to = printBuf; for (unsigned i = 0; i < len; ++i) { sprintf(to, "%02X", data[i]); to += 2; } return printBuf; } u_int64_t EBMLNumber::val() const { u_int64_t result = 0; for (unsigned i = 0; i < len; ++i) { result = result*256 + data[i]; } return result; } EBMLId::EBMLId() : EBMLNumber(False) { } EBMLId::~EBMLId() { } char const* EBMLId::stringName() const { switch (val()) { case MATROSKA_ID_EBML: { return "EBML"; } case MATROSKA_ID_VOID: { return "Void"; } case MATROSKA_ID_CRC_32: { return "CRC-32"; } case MATROSKA_ID_SEGMENT: { return "Segment"; } case MATROSKA_ID_SEEK_HEAD: { return "Seek Head"; } case MATROSKA_ID_SEEK: { return "Seek"; } case MATROSKA_ID_SEEK_ID: { return "Seek ID"; } case MATROSKA_ID_SEEK_POSITION: { return "Seek Position"; } case MATROSKA_ID_INFO: { return "Segment Info"; } case MATROSKA_ID_SEGMENT_UID: { return "Segment UID"; } case MATROSKA_ID_DURATION: { return "Segment Duration"; } case MATROSKA_ID_TIMECODE_SCALE: { return "Timecode Scale"; } case MATROSKA_ID_DATE_UTC: { return "Date (UTC)"; } case MATROSKA_ID_TITLE: { return "Title"; } case MATROSKA_ID_MUXING_APP: { return "Muxing App"; } case MATROSKA_ID_WRITING_APP: { return "Writing App"; } case MATROSKA_ID_CLUSTER: { return "Cluster"; } case MATROSKA_ID_TIMECODE: { return "TimeCode"; } case MATROSKA_ID_POSITION: { return "Position"; } case MATROSKA_ID_PREV_SIZE: { return "Prev. Size"; } case MATROSKA_ID_SIMPLEBLOCK: { return "SimpleBlock"; } case MATROSKA_ID_BLOCK_GROUP: { return "Block Group"; } case MATROSKA_ID_BLOCK: { return "Block"; } case MATROSKA_ID_BLOCK_DURATION: { return "Block Duration"; } case MATROSKA_ID_REFERENCE_BLOCK: { return "Reference Block"; } case MATROSKA_ID_TRACKS: { return "Tracks"; } case MATROSKA_ID_TRACK_ENTRY: { return "Track Entry"; } case MATROSKA_ID_TRACK_NUMBER: { return "Track Number"; } case MATROSKA_ID_TRACK_UID: { return "Track UID"; } case MATROSKA_ID_TRACK_TYPE: { return "Track Type"; } case MATROSKA_ID_FLAG_ENABLED: { return "Flag Enabled"; } case MATROSKA_ID_FLAG_DEFAULT: { return "Flag Default"; } case MATROSKA_ID_FLAG_FORCED: { return "Flag Forced"; } case MATROSKA_ID_FLAG_LACING: { return "Flag Lacing"; } case MATROSKA_ID_MIN_CACHE: { return "Min Cache"; } case MATROSKA_ID_DEFAULT_DURATION: { return "Default Duration"; } case MATROSKA_ID_TRACK_TIMECODE_SCALE: { return "Track Timecode Scale"; } case MATROSKA_ID_MAX_BLOCK_ADDITION_ID: { return "Max Block Addition ID"; } case MATROSKA_ID_NAME: { return "Name"; } case MATROSKA_ID_LANGUAGE: { return "Language"; } case MATROSKA_ID_CODEC: { return "Codec ID"; } case MATROSKA_ID_CODEC_PRIVATE: { return "Codec Private"; } case MATROSKA_ID_CODEC_NAME: { return "Codec Name"; } case MATROSKA_ID_CODEC_DECODE_ALL: { return "Codec Decode All"; } case MATROSKA_ID_VIDEO: { return "Video Settings"; } case MATROSKA_ID_FLAG_INTERLACED: { return "Flag Interlaced"; } case MATROSKA_ID_PIXEL_WIDTH: { return "Pixel Width"; } case MATROSKA_ID_PIXEL_HEIGHT: { return "Pixel Height"; } case MATROSKA_ID_DISPLAY_WIDTH: { return "Display Width"; } case MATROSKA_ID_DISPLAY_HEIGHT: { return "Display Height"; } case MATROSKA_ID_DISPLAY_UNIT: { return "Display Unit"; } case MATROSKA_ID_AUDIO: { return "Audio Settings"; } case MATROSKA_ID_SAMPLING_FREQUENCY: { return "Sampling Frequency"; } case MATROSKA_ID_OUTPUT_SAMPLING_FREQUENCY: { return "Output Sampling Frequency"; } case MATROSKA_ID_CHANNELS: { return "Channels"; } case MATROSKA_ID_BIT_DEPTH: { return "Bit Depth"; } case MATROSKA_ID_CONTENT_ENCODINGS: { return "Content Encodings"; } case MATROSKA_ID_CONTENT_ENCODING: { return "Content Encoding"; } case MATROSKA_ID_CONTENT_COMPRESSION: { return "Content Compression"; } case MATROSKA_ID_CONTENT_COMP_ALGO: { return "Content Compression Algorithm"; } case MATROSKA_ID_CONTENT_COMP_SETTINGS: { return "Content Compression Settings"; } case MATROSKA_ID_CONTENT_ENCRYPTION: { return "Content Encryption"; } case MATROSKA_ID_ATTACHMENTS: { return "Attachments"; } case MATROSKA_ID_ATTACHED_FILE: { return "Attached File"; } case MATROSKA_ID_FILE_DESCRIPTION: { return "File Description"; } case MATROSKA_ID_FILE_NAME: { return "File Name"; } case MATROSKA_ID_FILE_MIME_TYPE: { return "File MIME Type"; } case MATROSKA_ID_FILE_DATA: { return "File Data"; } case MATROSKA_ID_FILE_UID: { return "File UID"; } case MATROSKA_ID_CUES: { return "Cues"; } case MATROSKA_ID_CUE_POINT: { return "Cue Point"; } case MATROSKA_ID_CUE_TIME: { return "Cue Time"; } case MATROSKA_ID_CUE_TRACK_POSITIONS: { return "Cue Track Positions"; } case MATROSKA_ID_CUE_TRACK: { return "Cue Track"; } case MATROSKA_ID_CUE_CLUSTER_POSITION: { return "Cue Cluster Position"; } case MATROSKA_ID_CUE_BLOCK_NUMBER: { return "Cue Block Number"; } case MATROSKA_ID_TAGS: { return "Tags"; } default: { return "*****unknown*****"; } } } EBMLDataSize::EBMLDataSize() : EBMLNumber(True) { } EBMLDataSize::~EBMLDataSize() { } live/liveMedia/H264VideoMatroskaFileServerMediaSubsession.hh000444 001751 000000 00000004422 12265042432 024302 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A 'ServerMediaSubsession' object that creates new, unicast, "RTPSink"s // on demand, from an H264 video track within a Matroska file. // C++ header #ifndef _H264_VIDEO_MATROSKA_FILE_SERVER_MEDIA_SUBSESSION_HH #define _H264_VIDEO_MATROSKA_FILE_SERVER_MEDIA_SUBSESSION_HH #ifndef _H264_VIDEO_FILE_SERVER_MEDIA_SUBSESSION_HH #include "H264VideoFileServerMediaSubsession.hh" #endif #ifndef _MATROSKA_FILE_SERVER_DEMUX_HH #include "MatroskaFileServerDemux.hh" #endif class H264VideoMatroskaFileServerMediaSubsession: public H264VideoFileServerMediaSubsession { public: static H264VideoMatroskaFileServerMediaSubsession* createNew(MatroskaFileServerDemux& demux, unsigned trackNumber); private: H264VideoMatroskaFileServerMediaSubsession(MatroskaFileServerDemux& demux, unsigned trackNumber); // called only by createNew(); virtual ~H264VideoMatroskaFileServerMediaSubsession(); private: // redefined virtual functions virtual float duration() const; virtual void seekStreamSource(FramedSource* inputSource, double& seekNPT, double streamDuration, u_int64_t& numBytes); virtual FramedSource* createNewStreamSource(unsigned clientSessionId, unsigned& estBitrate); private: MatroskaFileServerDemux& fOurDemux; unsigned fTrackNumber; // We store one SPS, and one PPS, for use in our input 'framer's: unsigned fSPSSize; u_int8_t* fSPS; unsigned fPPSSize; u_int8_t* fPPS; }; #endif live/liveMedia/MP3AudioMatroskaFileServerMediaSubsession.hh000444 001751 000000 00000004565 12265042432 024261 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A 'ServerMediaSubsession' object that creates new, unicast, "RTPSink"s // on demand, from an MP3 audio track within a Matroska file. // (Actually, MPEG-1 or MPEG-2 audio should also work.) // C++ header #ifndef _MP3_AUDIO_MATROSKA_FILE_SERVER_MEDIA_SUBSESSION_HH #define _MP3_AUDIO_MATROSKA_FILE_SERVER_MEDIA_SUBSESSION_HH #ifndef _MP3_AUDIO_FILE_SERVER_MEDIA_SUBSESSION_HH #include "MP3AudioFileServerMediaSubsession.hh" #endif #ifndef _MATROSKA_FILE_SERVER_DEMUX_HH #include "MatroskaFileServerDemux.hh" #endif class MP3AudioMatroskaFileServerMediaSubsession: public MP3AudioFileServerMediaSubsession { public: static MP3AudioMatroskaFileServerMediaSubsession* createNew(MatroskaFileServerDemux& demux, unsigned trackNumber, Boolean generateADUs, Interleaving* interleaving); // Note: "interleaving" is used only if "generateADUs" is True, // (and a value of NULL means 'no interleaving') private: MP3AudioMatroskaFileServerMediaSubsession(MatroskaFileServerDemux& demux, unsigned trackNumber, Boolean generateADUs, Interleaving* interleaving); // called only by createNew(); virtual ~MP3AudioMatroskaFileServerMediaSubsession(); private: // redefined virtual functions virtual void seekStreamSource(FramedSource* inputSource, double& seekNPT, double streamDuration, u_int64_t& numBytes); virtual FramedSource* createNewStreamSource(unsigned clientSessionId, unsigned& estBitrate); private: MatroskaFileServerDemux& fOurDemux; unsigned fTrackNumber; }; #endif live/liveMedia/RTSPServerSupportingHTTPStreaming.cpp000444 001751 000000 00000026032 12265042432 022771 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A server that supports both RTSP, and HTTP streaming (using Apple's "HTTP Live Streaming" protocol) // Implementation #include "RTSPServerSupportingHTTPStreaming.hh" #include "RTSPCommon.hh" #ifndef _WIN32_WCE #include #endif #include RTSPServerSupportingHTTPStreaming* RTSPServerSupportingHTTPStreaming::createNew(UsageEnvironment& env, Port rtspPort, UserAuthenticationDatabase* authDatabase, unsigned reclamationTestSeconds) { int ourSocket = setUpOurSocket(env, rtspPort); if (ourSocket == -1) return NULL; return new RTSPServerSupportingHTTPStreaming(env, ourSocket, rtspPort, authDatabase, reclamationTestSeconds); } RTSPServerSupportingHTTPStreaming ::RTSPServerSupportingHTTPStreaming(UsageEnvironment& env, int ourSocket, Port rtspPort, UserAuthenticationDatabase* authDatabase, unsigned reclamationTestSeconds) : RTSPServer(env, ourSocket, rtspPort, authDatabase, reclamationTestSeconds) { } RTSPServerSupportingHTTPStreaming::~RTSPServerSupportingHTTPStreaming() { } RTSPServer::RTSPClientConnection* RTSPServerSupportingHTTPStreaming::createNewClientConnection(int clientSocket, struct sockaddr_in clientAddr) { return new RTSPClientConnectionSupportingHTTPStreaming(*this, clientSocket, clientAddr); } RTSPServerSupportingHTTPStreaming::RTSPClientConnectionSupportingHTTPStreaming ::RTSPClientConnectionSupportingHTTPStreaming(RTSPServer& ourServer, int clientSocket, struct sockaddr_in clientAddr) : RTSPClientConnection(ourServer, clientSocket, clientAddr), fClientSessionId(0), fPlaylistSource(NULL), fTCPSink(NULL) { } RTSPServerSupportingHTTPStreaming::RTSPClientConnectionSupportingHTTPStreaming::~RTSPClientConnectionSupportingHTTPStreaming() { Medium::close(fPlaylistSource); Medium::close(fTCPSink); } static char const* lastModifiedHeader(char const* fileName) { static char buf[200]; buf[0] = '\0'; // by default, return an empty string #ifndef _WIN32_WCE struct stat sb; int statResult = stat(fileName, &sb); if (statResult == 0) { strftime(buf, sizeof buf, "Last-Modified: %a, %b %d %Y %H:%M:%S GMT\r\n", gmtime((const time_t*)&sb.st_mtime)); } #endif return buf; } void RTSPServerSupportingHTTPStreaming::RTSPClientConnectionSupportingHTTPStreaming ::handleHTTPCmd_StreamingGET(char const* urlSuffix, char const* /*fullRequestStr*/) { // If "urlSuffix" ends with "?segment=,", then strip this off, and send the // specified segment. Otherwise, construct and send a playlist that consists of segments from the specified file. do { char const* questionMarkPos = strrchr(urlSuffix, '?'); if (questionMarkPos == NULL) break; unsigned offsetInSeconds, durationInSeconds; if (sscanf(questionMarkPos, "?segment=%u,%u", &offsetInSeconds, &durationInSeconds) != 2) break; char* streamName = strDup(urlSuffix); streamName[questionMarkPos-urlSuffix] = '\0'; do { ServerMediaSession* session = fOurServer.lookupServerMediaSession(streamName); if (session == NULL) { handleHTTPCmd_notFound(); break; } // We can't send multi-subsession streams over HTTP (because there's no defined way to multiplex more than one subsession). // Therefore, use the first (and presumed only) substream: ServerMediaSubsessionIterator iter(*session); ServerMediaSubsession* subsession = iter.next(); if (subsession == NULL) { // Treat an 'empty' ServerMediaSession the same as one that doesn't exist at all: handleHTTPCmd_notFound(); break; } // Call "getStreamParameters()" to create the stream's source. (Because we're not actually streaming via RTP/RTCP, most // of the parameters to the call are dummy.) ++fClientSessionId; Port clientRTPPort(0), clientRTCPPort(0), serverRTPPort(0), serverRTCPPort(0); netAddressBits destinationAddress = 0; u_int8_t destinationTTL = 0; Boolean isMulticast = False; void* streamToken; subsession->getStreamParameters(fClientSessionId, 0, clientRTPPort,clientRTCPPort, 0,0,0, destinationAddress,destinationTTL, isMulticast, serverRTPPort,serverRTCPPort, streamToken); // Seek the stream source to the desired place, with the desired duration, and (as a side effect) get the number of bytes: double dOffsetInSeconds = (double)offsetInSeconds; u_int64_t numBytes; subsession->seekStream(fClientSessionId, streamToken, dOffsetInSeconds, (double)durationInSeconds, numBytes); unsigned numTSBytesToStream = (unsigned)numBytes; if (numTSBytesToStream == 0) { // For some reason, we do not know the size of the requested range. We can't handle this request: handleHTTPCmd_notSupported(); break; } // Construct our response: snprintf((char*)fResponseBuffer, sizeof fResponseBuffer, "HTTP/1.1 200 OK\r\n" "%s" "Server: LIVE555 Streaming Media v%s\r\n" "%s" "Content-Length: %d\r\n" "Content-Type: text/plain; charset=ISO-8859-1\r\n" "\r\n", dateHeader(), LIVEMEDIA_LIBRARY_VERSION_STRING, lastModifiedHeader(streamName), numTSBytesToStream); // Send the response now, because we're about to add more data (from the source): send(fClientOutputSocket, (char const*)fResponseBuffer, strlen((char*)fResponseBuffer), 0); fResponseBuffer[0] = '\0'; // We've already sent the response. This tells the calling code not to send it again. // Ask the media source to deliver - to the TCP sink - the desired data: FramedSource* mediaSource = subsession->getStreamSource(streamToken); if (mediaSource != NULL) { if (fTCPSink == NULL) fTCPSink = TCPStreamSink::createNew(envir(), fClientOutputSocket); fTCPSink->startPlaying(*mediaSource, afterStreaming, this); } } while(0); delete[] streamName; return; } while (0); // "urlSuffix" does not end with "?segment=,". // Construct and send a playlist that describes segments from the specified file. // First, make sure that the named file exists, and is streamable: ServerMediaSession* session = fOurServer.lookupServerMediaSession(urlSuffix); if (session == NULL) { handleHTTPCmd_notFound(); return; } // To be able to construct a playlist for the requested file, we need to know its duration: float duration = session->duration(); if (duration <= 0.0) { // We can't handle this request: handleHTTPCmd_notSupported(); return; } // Now, construct the playlist. It will consist of a prefix, one or more media file specifications, and a suffix: unsigned const maxIntLen = 10; // >= the maximum possible strlen() of an integer in the playlist char const* const playlistPrefixFmt = "#EXTM3U\r\n" "#EXT-X-ALLOW-CACHE:YES\r\n" "#EXT-X-MEDIA-SEQUENCE:0\r\n" "#EXT-X-TARGETDURATION:%d\r\n"; unsigned const playlistPrefixFmt_maxLen = strlen(playlistPrefixFmt) + maxIntLen; char const* const playlistMediaFileSpecFmt = "#EXTINF:%d,\r\n" "%s?segment=%d,%d\r\n"; unsigned const playlistMediaFileSpecFmt_maxLen = strlen(playlistMediaFileSpecFmt) + maxIntLen + strlen(urlSuffix) + 2*maxIntLen; char const* const playlistSuffixFmt = "#EXT-X-ENDLIST\r\n"; unsigned const playlistSuffixFmt_maxLen = strlen(playlistSuffixFmt); // Figure out the 'target duration' that will produce a playlist that will fit in our response buffer. (But make it at least 10s.) unsigned const playlistMaxSize = 10000; unsigned const mediaFileSpecsMaxSize = playlistMaxSize - (playlistPrefixFmt_maxLen + playlistSuffixFmt_maxLen); unsigned const maxNumMediaFileSpecs = mediaFileSpecsMaxSize/playlistMediaFileSpecFmt_maxLen; unsigned targetDuration = (unsigned)(duration/maxNumMediaFileSpecs + 1); if (targetDuration < 10) targetDuration = 10; char* playlist = new char[playlistMaxSize]; char* s = playlist; sprintf(s, playlistPrefixFmt, targetDuration); s += strlen(s); unsigned durSoFar = 0; while (1) { unsigned dur = targetDuration < duration ? targetDuration : (unsigned)duration; duration -= dur; sprintf(s, playlistMediaFileSpecFmt, dur, urlSuffix, durSoFar, dur); s += strlen(s); if (duration < 1.0) break; durSoFar += dur; } sprintf(s, playlistSuffixFmt); s += strlen(s); unsigned playlistLen = s - playlist; // Construct our response: snprintf((char*)fResponseBuffer, sizeof fResponseBuffer, "HTTP/1.1 200 OK\r\n" "%s" "Server: LIVE555 Streaming Media v%s\r\n" "%s" "Content-Length: %d\r\n" "Content-Type: application/vnd.apple.mpegurl\r\n" "\r\n", dateHeader(), LIVEMEDIA_LIBRARY_VERSION_STRING, lastModifiedHeader(urlSuffix), playlistLen); // Send the response header now, because we're about to add more data (the playlist): send(fClientOutputSocket, (char const*)fResponseBuffer, strlen((char*)fResponseBuffer), 0); fResponseBuffer[0] = '\0'; // We've already sent the response. This tells the calling code not to send it again. // Then, send the playlist. Because it's large, we don't do so using "send()", because that might not send it all at once. // Instead, we stream the playlist over the TCP socket: if (fPlaylistSource != NULL) { // sanity check if (fTCPSink != NULL) fTCPSink->stopPlaying(); Medium::close(fPlaylistSource); } fPlaylistSource = ByteStreamMemoryBufferSource::createNew(envir(), (u_int8_t*)playlist, playlistLen); if (fTCPSink == NULL) fTCPSink = TCPStreamSink::createNew(envir(), fClientOutputSocket); fTCPSink->startPlaying(*fPlaylistSource, afterStreaming, this); } void RTSPServerSupportingHTTPStreaming::RTSPClientConnectionSupportingHTTPStreaming::afterStreaming(void* clientData) { RTSPServerSupportingHTTPStreaming::RTSPClientConnectionSupportingHTTPStreaming* clientConnection = (RTSPServerSupportingHTTPStreaming::RTSPClientConnectionSupportingHTTPStreaming*)clientData; // Arrange to delete the 'client connection' object: if (clientConnection->fRecursionCount > 0) { // We're still in the midst of handling a request clientConnection->fIsActive = False; // will cause the object to get deleted at the end of handling the request } else { // We're no longer handling a request; delete the object now: delete clientConnection; } } live/liveMedia/SIPClient.cpp000444 001751 000000 00000074764 12265042432 016156 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A generic SIP client // Implementation #include "SIPClient.hh" #include "GroupsockHelper.hh" #if defined(__WIN32__) || defined(_WIN32) || defined(_QNX4) #define _strncasecmp _strnicmp #else #define _strncasecmp strncasecmp #endif ////////// SIPClient ////////// SIPClient* SIPClient ::createNew(UsageEnvironment& env, unsigned char desiredAudioRTPPayloadFormat, char const* mimeSubtype, int verbosityLevel, char const* applicationName) { return new SIPClient(env, desiredAudioRTPPayloadFormat, mimeSubtype, verbosityLevel, applicationName); } void SIPClient::setUserAgentString(char const* userAgentName) { if (userAgentName == NULL) return; // Change the existing user agent header string: char const* const formatStr = "User-Agent: %s\r\n"; unsigned const headerSize = strlen(formatStr) + strlen(userAgentName); delete[] fUserAgentHeaderStr; fUserAgentHeaderStr = new char[headerSize]; sprintf(fUserAgentHeaderStr, formatStr, userAgentName); fUserAgentHeaderStrLen = strlen(fUserAgentHeaderStr); } SIPClient::SIPClient(UsageEnvironment& env, unsigned char desiredAudioRTPPayloadFormat, char const* mimeSubtype, int verbosityLevel, char const* applicationName) : Medium(env), fT1(500000 /* 500 ms */), fDesiredAudioRTPPayloadFormat(desiredAudioRTPPayloadFormat), fVerbosityLevel(verbosityLevel), fCSeq(0), fUserAgentHeaderStr(NULL), fUserAgentHeaderStrLen(0), fURL(NULL), fURLSize(0), fToTagStr(NULL), fToTagStrSize(0), fUserName(NULL), fUserNameSize(0), fInviteSDPDescription(NULL), fInviteSDPDescriptionReturned(NULL), fInviteCmd(NULL), fInviteCmdSize(0) { if (mimeSubtype == NULL) mimeSubtype = ""; fMIMESubtype = strDup(mimeSubtype); fMIMESubtypeSize = strlen(fMIMESubtype); if (applicationName == NULL) applicationName = ""; fApplicationName = strDup(applicationName); fApplicationNameSize = strlen(fApplicationName); struct in_addr ourAddress; ourAddress.s_addr = ourIPAddress(env); // hack fOurAddressStr = strDup(AddressString(ourAddress).val()); fOurAddressStrSize = strlen(fOurAddressStr); fOurSocket = new Groupsock(env, ourAddress, 0, 255); if (fOurSocket == NULL) { env << "ERROR: Failed to create socket for addr " << fOurAddressStr << ": " << env.getResultMsg() << "\n"; } // Now, find out our source port number. Hack: Do this by first trying to // send a 0-length packet, so that the "getSourcePort()" call will work. fOurSocket->output(envir(), 255, (unsigned char*)"", 0); Port srcPort(0); getSourcePort(env, fOurSocket->socketNum(), srcPort); if (srcPort.num() != 0) { fOurPortNum = ntohs(srcPort.num()); } else { // No luck. Try again using a default port number: fOurPortNum = 5060; delete fOurSocket; fOurSocket = new Groupsock(env, ourAddress, fOurPortNum, 255); if (fOurSocket == NULL) { env << "ERROR: Failed to create socket for addr " << fOurAddressStr << ", port " << fOurPortNum << ": " << env.getResultMsg() << "\n"; } } // Set the "User-Agent:" header to use in each request: char const* const libName = "LIVE555 Streaming Media v"; char const* const libVersionStr = LIVEMEDIA_LIBRARY_VERSION_STRING; char const* libPrefix; char const* libSuffix; if (applicationName == NULL || applicationName[0] == '\0') { applicationName = libPrefix = libSuffix = ""; } else { libPrefix = " ("; libSuffix = ")"; } unsigned userAgentNameSize = fApplicationNameSize + strlen(libPrefix) + strlen(libName) + strlen(libVersionStr) + strlen(libSuffix) + 1; char* userAgentName = new char[userAgentNameSize]; sprintf(userAgentName, "%s%s%s%s%s", applicationName, libPrefix, libName, libVersionStr, libSuffix); setUserAgentString(userAgentName); delete[] userAgentName; reset(); } SIPClient::~SIPClient() { reset(); delete[] fUserAgentHeaderStr; delete fOurSocket; delete[] (char*)fOurAddressStr; delete[] (char*)fApplicationName; delete[] (char*)fMIMESubtype; } void SIPClient::reset() { fWorkingAuthenticator = NULL; delete[] fInviteCmd; fInviteCmd = NULL; fInviteCmdSize = 0; delete[] fInviteSDPDescription; fInviteSDPDescription = NULL; delete[] (char*)fUserName; fUserName = strDup(fApplicationName); fUserNameSize = strlen(fUserName); fValidAuthenticator.reset(); delete[] (char*)fToTagStr; fToTagStr = NULL; fToTagStrSize = 0; fServerPortNum = 0; fServerAddress.s_addr = 0; delete[] (char*)fURL; fURL = NULL; fURLSize = 0; } void SIPClient::setProxyServer(unsigned proxyServerAddress, portNumBits proxyServerPortNum) { fServerAddress.s_addr = proxyServerAddress; fServerPortNum = proxyServerPortNum; if (fOurSocket != NULL) { fOurSocket->changeDestinationParameters(fServerAddress, fServerPortNum, 255); } } static char* getLine(char* startOfLine) { // returns the start of the next line, or NULL if none for (char* ptr = startOfLine; *ptr != '\0'; ++ptr) { if (*ptr == '\r' || *ptr == '\n') { // We found the end of the line *ptr++ = '\0'; if (*ptr == '\n') ++ptr; return ptr; } } return NULL; } char* SIPClient::invite(char const* url, Authenticator* authenticator) { // First, check whether "url" contains a username:password to be used: char* username; char* password; if (authenticator == NULL && parseSIPURLUsernamePassword(url, username, password)) { char* result = inviteWithPassword(url, username, password); delete[] username; delete[] password; // they were dynamically allocated return result; } if (!processURL(url)) return NULL; delete[] (char*)fURL; fURL = strDup(url); fURLSize = strlen(fURL); fCallId = our_random32(); fFromTag = our_random32(); return invite1(authenticator); } char* SIPClient::invite1(Authenticator* authenticator) { do { // Send the INVITE command: // First, construct an authenticator string: fValidAuthenticator.reset(); fWorkingAuthenticator = authenticator; char* authenticatorStr = createAuthenticatorString(fWorkingAuthenticator, "INVITE", fURL); // Then, construct the SDP description to be sent in the INVITE: char* rtpmapLine; unsigned rtpmapLineSize; if (fMIMESubtypeSize > 0) { char const* const rtpmapFmt = "a=rtpmap:%u %s/8000\r\n"; unsigned rtpmapFmtSize = strlen(rtpmapFmt) + 3 /* max char len */ + fMIMESubtypeSize; rtpmapLine = new char[rtpmapFmtSize]; sprintf(rtpmapLine, rtpmapFmt, fDesiredAudioRTPPayloadFormat, fMIMESubtype); rtpmapLineSize = strlen(rtpmapLine); } else { // Static payload type => no "a=rtpmap:" line rtpmapLine = strDup(""); rtpmapLineSize = 0; } char const* const inviteSDPFmt = "v=0\r\n" "o=- %u %u IN IP4 %s\r\n" "s=%s session\r\n" "c=IN IP4 %s\r\n" "t=0 0\r\n" "m=audio %u RTP/AVP %u\r\n" "%s"; unsigned inviteSDPFmtSize = strlen(inviteSDPFmt) + 20 /* max int len */ + 20 + fOurAddressStrSize + fApplicationNameSize + fOurAddressStrSize + 5 /* max short len */ + 3 /* max char len */ + rtpmapLineSize; delete[] fInviteSDPDescription; fInviteSDPDescription = new char[inviteSDPFmtSize]; sprintf(fInviteSDPDescription, inviteSDPFmt, fCallId, fCSeq, fOurAddressStr, fApplicationName, fOurAddressStr, fClientStartPortNum, fDesiredAudioRTPPayloadFormat, rtpmapLine); unsigned inviteSDPSize = strlen(fInviteSDPDescription); delete[] rtpmapLine; char const* const cmdFmt = "INVITE %s SIP/2.0\r\n" "From: %s ;tag=%u\r\n" "Via: SIP/2.0/UDP %s:%u\r\n" "Max-Forwards: 70\r\n" "To: %s\r\n" "Contact: sip:%s@%s:%u\r\n" "Call-ID: %u@%s\r\n" "CSeq: %d INVITE\r\n" "Content-Type: application/sdp\r\n" "%s" /* Proxy-Authorization: line (if any) */ "%s" /* User-Agent: line */ "Content-Length: %d\r\n\r\n" "%s"; unsigned inviteCmdSize = strlen(cmdFmt) + fURLSize + 2*fUserNameSize + fOurAddressStrSize + 20 /* max int len */ + fOurAddressStrSize + 5 /* max port len */ + fURLSize + fUserNameSize + fOurAddressStrSize + 5 + 20 + fOurAddressStrSize + 20 + strlen(authenticatorStr) + fUserAgentHeaderStrLen + 20 + inviteSDPSize; delete[] fInviteCmd; fInviteCmd = new char[inviteCmdSize]; sprintf(fInviteCmd, cmdFmt, fURL, fUserName, fUserName, fOurAddressStr, fFromTag, fOurAddressStr, fOurPortNum, fURL, fUserName, fOurAddressStr, fOurPortNum, fCallId, fOurAddressStr, ++fCSeq, authenticatorStr, fUserAgentHeaderStr, inviteSDPSize, fInviteSDPDescription); fInviteCmdSize = strlen(fInviteCmd); delete[] authenticatorStr; // Before sending the "INVITE", arrange to handle any response packets, // and set up timers: fInviteClientState = Calling; fEventLoopStopFlag = 0; TaskScheduler& sched = envir().taskScheduler(); // abbrev. sched.turnOnBackgroundReadHandling(fOurSocket->socketNum(), &inviteResponseHandler, this); fTimerALen = 1*fT1; // initially fTimerACount = 0; // initially fTimerA = sched.scheduleDelayedTask(fTimerALen, timerAHandler, this); fTimerB = sched.scheduleDelayedTask(64*fT1, timerBHandler, this); fTimerD = NULL; // for now if (!sendINVITE()) break; // Enter the event loop, to handle response packets, and timeouts: envir().taskScheduler().doEventLoop(&fEventLoopStopFlag); // We're finished with this "INVITE". // Turn off response handling and timers: sched.turnOffBackgroundReadHandling(fOurSocket->socketNum()); sched.unscheduleDelayedTask(fTimerA); sched.unscheduleDelayedTask(fTimerB); sched.unscheduleDelayedTask(fTimerD); // NOTE: We return the SDP description that we used in the "INVITE", // not the one that we got from the server. // ##### Later: match the codecs in the response (offer, answer) ##### if (fInviteSDPDescription != NULL) { return strDup(fInviteSDPDescription); } } while (0); return NULL; } void SIPClient::inviteResponseHandler(void* clientData, int /*mask*/) { SIPClient* client = (SIPClient*)clientData; unsigned responseCode = client->getResponseCode(); client->doInviteStateMachine(responseCode); } // Special 'response codes' that represent timers expiring: unsigned const timerAFires = 0xAAAAAAAA; unsigned const timerBFires = 0xBBBBBBBB; unsigned const timerDFires = 0xDDDDDDDD; void SIPClient::timerAHandler(void* clientData) { SIPClient* client = (SIPClient*)clientData; if (client->fVerbosityLevel >= 1) { client->envir() << "RETRANSMISSION " << ++client->fTimerACount << ", after " << client->fTimerALen/1000000.0 << " additional seconds\n"; } client->doInviteStateMachine(timerAFires); } void SIPClient::timerBHandler(void* clientData) { SIPClient* client = (SIPClient*)clientData; if (client->fVerbosityLevel >= 1) { client->envir() << "RETRANSMISSION TIMEOUT, after " << 64*client->fT1/1000000.0 << " seconds\n"; fflush(stderr); } client->doInviteStateMachine(timerBFires); } void SIPClient::timerDHandler(void* clientData) { SIPClient* client = (SIPClient*)clientData; if (client->fVerbosityLevel >= 1) { client->envir() << "TIMER D EXPIRED\n"; } client->doInviteStateMachine(timerDFires); } void SIPClient::doInviteStateMachine(unsigned responseCode) { // Implement the state transition diagram (RFC 3261, Figure 5) TaskScheduler& sched = envir().taskScheduler(); // abbrev. switch (fInviteClientState) { case Calling: { if (responseCode == timerAFires) { // Restart timer A (with double the timeout interval): fTimerALen *= 2; fTimerA = sched.scheduleDelayedTask(fTimerALen, timerAHandler, this); fInviteClientState = Calling; if (!sendINVITE()) doInviteStateTerminated(0); } else { // Turn off timers A & B before moving to a new state: sched.unscheduleDelayedTask(fTimerA); sched.unscheduleDelayedTask(fTimerB); if (responseCode == timerBFires) { envir().setResultMsg("No response from server"); doInviteStateTerminated(0); } else if (responseCode >= 100 && responseCode <= 199) { fInviteClientState = Proceeding; } else if (responseCode >= 200 && responseCode <= 299) { doInviteStateTerminated(responseCode); } else if (responseCode >= 400 && responseCode <= 499) { doInviteStateTerminated(responseCode); // this isn't what the spec says, but it seems right... } else if (responseCode >= 300 && responseCode <= 699) { fInviteClientState = Completed; fTimerD = sched.scheduleDelayedTask(32000000, timerDHandler, this); if (!sendACK()) doInviteStateTerminated(0); } } break; } case Proceeding: { if (responseCode >= 100 && responseCode <= 199) { fInviteClientState = Proceeding; } else if (responseCode >= 200 && responseCode <= 299) { doInviteStateTerminated(responseCode); } else if (responseCode >= 400 && responseCode <= 499) { doInviteStateTerminated(responseCode); // this isn't what the spec says, but it seems right... } else if (responseCode >= 300 && responseCode <= 699) { fInviteClientState = Completed; fTimerD = sched.scheduleDelayedTask(32000000, timerDHandler, this); if (!sendACK()) doInviteStateTerminated(0); } break; } case Completed: { if (responseCode == timerDFires) { envir().setResultMsg("Transaction terminated"); doInviteStateTerminated(0); } else if (responseCode >= 300 && responseCode <= 699) { fInviteClientState = Completed; if (!sendACK()) doInviteStateTerminated(0); } break; } case Terminated: { doInviteStateTerminated(responseCode); break; } } } void SIPClient::doInviteStateTerminated(unsigned responseCode) { fInviteClientState = Terminated; // FWIW... if (responseCode < 200 || responseCode > 299) { // We failed, so return NULL; delete[] fInviteSDPDescription; fInviteSDPDescription = NULL; delete[] fInviteSDPDescriptionReturned; fInviteSDPDescriptionReturned = NULL; } // Unblock the event loop: fEventLoopStopFlag = ~0; } Boolean SIPClient::sendINVITE() { if (!sendRequest(fInviteCmd, fInviteCmdSize)) { envir().setResultErrMsg("INVITE send() failed: "); return False; } return True; } unsigned SIPClient::getResponseCode() { unsigned responseCode = 0; do { // Get the response from the server: unsigned const readBufSize = 10000; char readBuffer[readBufSize+1]; char* readBuf = readBuffer; char* firstLine = NULL; char* nextLineStart = NULL; unsigned bytesRead = getResponse(readBuf, readBufSize); if (bytesRead == 0) break; if (fVerbosityLevel >= 1) { envir() << "Received INVITE response: " << readBuf << "\n"; } // Inspect the first line to get the response code: firstLine = readBuf; nextLineStart = getLine(firstLine); if (!parseResponseCode(firstLine, responseCode)) break; if (responseCode != 200) { if (responseCode >= 400 && responseCode <= 499 && fWorkingAuthenticator != NULL) { // We have an authentication failure, so fill in // "*fWorkingAuthenticator" using the contents of a following // "Proxy-Authenticate:" line. (Once we compute a 'response' for // "fWorkingAuthenticator", it can be used in a subsequent request // - that will hopefully succeed.) char* lineStart; while (1) { lineStart = nextLineStart; if (lineStart == NULL) break; nextLineStart = getLine(lineStart); if (lineStart[0] == '\0') break; // this is a blank line char* realm = strDupSize(lineStart); char* nonce = strDupSize(lineStart); // ##### Check for the format of "Proxy-Authenticate:" lines from // ##### known server types. // ##### This is a crock! We should make the parsing more general Boolean foundAuthenticateHeader = False; if ( // Asterisk ##### sscanf(lineStart, "Proxy-Authenticate: Digest realm=\"%[^\"]\", nonce=\"%[^\"]\"", realm, nonce) == 2 || // Cisco ATA ##### sscanf(lineStart, "Proxy-Authenticate: Digest algorithm=MD5,domain=\"%*[^\"]\",nonce=\"%[^\"]\", realm=\"%[^\"]\"", nonce, realm) == 2) { fWorkingAuthenticator->setRealmAndNonce(realm, nonce); foundAuthenticateHeader = True; } delete[] realm; delete[] nonce; if (foundAuthenticateHeader) break; } } envir().setResultMsg("cannot handle INVITE response: ", firstLine); break; } // Skip every subsequent header line, until we see a blank line. // While doing so, check for "To:" and "Content-Length:" lines. // The remaining data is assumed to be the SDP descriptor that we want. // We should really do some more checking on the headers here - e.g., to // check for "Content-type: application/sdp", "CSeq", etc. ##### int contentLength = -1; char* lineStart; while (1) { lineStart = nextLineStart; if (lineStart == NULL) break; nextLineStart = getLine(lineStart); if (lineStart[0] == '\0') break; // this is a blank line char* toTagStr = strDupSize(lineStart); if (sscanf(lineStart, "To:%*[^;]; tag=%s", toTagStr) == 1) { delete[] (char*)fToTagStr; fToTagStr = strDup(toTagStr); fToTagStrSize = strlen(fToTagStr); } delete[] toTagStr; if (sscanf(lineStart, "Content-Length: %d", &contentLength) == 1 || sscanf(lineStart, "Content-length: %d", &contentLength) == 1) { if (contentLength < 0) { envir().setResultMsg("Bad \"Content-Length:\" header: \"", lineStart, "\""); break; } } } // We're now at the end of the response header lines if (lineStart == NULL) { envir().setResultMsg("no content following header lines: ", readBuf); break; } // Use the remaining data as the SDP descr, but first, check // the "Content-Length:" header (if any) that we saw. We may need to // read more data, or we may have extraneous data in the buffer. char* bodyStart = nextLineStart; if (bodyStart != NULL && contentLength >= 0) { // We saw a "Content-Length:" header unsigned numBodyBytes = &readBuf[bytesRead] - bodyStart; if (contentLength > (int)numBodyBytes) { // We need to read more data. First, make sure we have enough // space for it: unsigned numExtraBytesNeeded = contentLength - numBodyBytes; #ifdef USING_TCP // THIS CODE WORKS ONLY FOR TCP: ##### unsigned remainingBufferSize = readBufSize - (bytesRead + (readBuf - readBuffer)); if (numExtraBytesNeeded > remainingBufferSize) { char tmpBuf[200]; sprintf(tmpBuf, "Read buffer size (%d) is too small for \"Content-Length:\" %d (need a buffer size of >= %d bytes\n", readBufSize, contentLength, readBufSize + numExtraBytesNeeded - remainingBufferSize); envir().setResultMsg(tmpBuf); break; } // Keep reading more data until we have enough: if (fVerbosityLevel >= 1) { envir() << "Need to read " << numExtraBytesNeeded << " extra bytes\n"; } while (numExtraBytesNeeded > 0) { char* ptr = &readBuf[bytesRead]; unsigned bytesRead2; struct sockaddr_in fromAddr; Boolean readSuccess = fOurSocket->handleRead((unsigned char*)ptr, numExtraBytesNeeded, bytesRead2, fromAddr); if (!readSuccess) break; ptr[bytesRead2] = '\0'; if (fVerbosityLevel >= 1) { envir() << "Read " << bytesRead2 << " extra bytes: " << ptr << "\n"; } bytesRead += bytesRead2; numExtraBytesNeeded -= bytesRead2; } #endif if (numExtraBytesNeeded > 0) break; // one of the reads failed } bodyStart[contentLength] = '\0'; // trims any extra data delete[] fInviteSDPDescriptionReturned; fInviteSDPDescriptionReturned = strDup(bodyStart); } } while (0); return responseCode; } char* SIPClient::inviteWithPassword(char const* url, char const* username, char const* password) { delete[] (char*)fUserName; fUserName = strDup(username); fUserNameSize = strlen(fUserName); Authenticator authenticator(username, password); char* inviteResult = invite(url, &authenticator); if (inviteResult != NULL) { // We are already authorized return inviteResult; } // The "realm" and "nonce" fields should have been filled in: if (authenticator.realm() == NULL || authenticator.nonce() == NULL) { // We haven't been given enough information to try again, so fail: return NULL; } // Try again (but with the same CallId): inviteResult = invite1(&authenticator); if (inviteResult != NULL) { // The authenticator worked, so use it in future requests: fValidAuthenticator = authenticator; } return inviteResult; } Boolean SIPClient::sendACK() { char* cmd = NULL; do { char const* const cmdFmt = "ACK %s SIP/2.0\r\n" "From: %s ;tag=%u\r\n" "Via: SIP/2.0/UDP %s:%u\r\n" "Max-Forwards: 70\r\n" "To: %s;tag=%s\r\n" "Call-ID: %u@%s\r\n" "CSeq: %d ACK\r\n" "Content-Length: 0\r\n\r\n"; unsigned cmdSize = strlen(cmdFmt) + fURLSize + 2*fUserNameSize + fOurAddressStrSize + 20 /* max int len */ + fOurAddressStrSize + 5 /* max port len */ + fURLSize + fToTagStrSize + 20 + fOurAddressStrSize + 20; cmd = new char[cmdSize]; sprintf(cmd, cmdFmt, fURL, fUserName, fUserName, fOurAddressStr, fFromTag, fOurAddressStr, fOurPortNum, fURL, fToTagStr, fCallId, fOurAddressStr, fCSeq /* note: it's the same as before; not incremented */); if (!sendRequest(cmd, strlen(cmd))) { envir().setResultErrMsg("ACK send() failed: "); break; } delete[] cmd; return True; } while (0); delete[] cmd; return False; } Boolean SIPClient::sendBYE() { // NOTE: This should really be retransmitted, for reliability ##### char* cmd = NULL; do { char const* const cmdFmt = "BYE %s SIP/2.0\r\n" "From: %s ;tag=%u\r\n" "Via: SIP/2.0/UDP %s:%u\r\n" "Max-Forwards: 70\r\n" "To: %s;tag=%s\r\n" "Call-ID: %u@%s\r\n" "CSeq: %d BYE\r\n" "Content-Length: 0\r\n\r\n"; unsigned cmdSize = strlen(cmdFmt) + fURLSize + 2*fUserNameSize + fOurAddressStrSize + 20 /* max int len */ + fOurAddressStrSize + 5 /* max port len */ + fURLSize + fToTagStrSize + 20 + fOurAddressStrSize + 20; cmd = new char[cmdSize]; sprintf(cmd, cmdFmt, fURL, fUserName, fUserName, fOurAddressStr, fFromTag, fOurAddressStr, fOurPortNum, fURL, fToTagStr, fCallId, fOurAddressStr, ++fCSeq); if (!sendRequest(cmd, strlen(cmd))) { envir().setResultErrMsg("BYE send() failed: "); break; } delete[] cmd; return True; } while (0); delete[] cmd; return False; } Boolean SIPClient::processURL(char const* url) { do { // If we don't already have a server address/port, then // get these by parsing the URL: if (fServerAddress.s_addr == 0) { NetAddress destAddress; if (!parseSIPURL(envir(), url, destAddress, fServerPortNum)) break; fServerAddress.s_addr = *(unsigned*)(destAddress.data()); if (fOurSocket != NULL) { fOurSocket->changeDestinationParameters(fServerAddress, fServerPortNum, 255); } } return True; } while (0); return False; } Boolean SIPClient::parseSIPURL(UsageEnvironment& env, char const* url, NetAddress& address, portNumBits& portNum) { do { // Parse the URL as "sip:@
:/" // (with ":" and "/" optional) // Also, skip over any "[:]@" preceding
char const* prefix = "sip:"; unsigned const prefixLength = 4; if (_strncasecmp(url, prefix, prefixLength) != 0) { env.setResultMsg("URL is not of the form \"", prefix, "\""); break; } unsigned const parseBufferSize = 100; char parseBuffer[parseBufferSize]; unsigned addressStartIndex = prefixLength; while (url[addressStartIndex] != '\0' && url[addressStartIndex++] != '@') {} char const* from = &url[addressStartIndex]; // Skip over any "[:]@" char const* from1 = from; while (*from1 != '\0' && *from1 != '/') { if (*from1 == '@') { from = ++from1; break; } ++from1; } char* to = &parseBuffer[0]; unsigned i; for (i = 0; i < parseBufferSize; ++i) { if (*from == '\0' || *from == ':' || *from == '/') { // We've completed parsing the address *to = '\0'; break; } *to++ = *from++; } if (i == parseBufferSize) { env.setResultMsg("URL is too long"); break; } NetAddressList addresses(parseBuffer); if (addresses.numAddresses() == 0) { env.setResultMsg("Failed to find network address for \"", parseBuffer, "\""); break; } address = *(addresses.firstAddress()); portNum = 5060; // default value char nextChar = *from; if (nextChar == ':') { int portNumInt; if (sscanf(++from, "%d", &portNumInt) != 1) { env.setResultMsg("No port number follows ':'"); break; } if (portNumInt < 1 || portNumInt > 65535) { env.setResultMsg("Bad port number"); break; } portNum = (portNumBits)portNumInt; } return True; } while (0); return False; } Boolean SIPClient::parseSIPURLUsernamePassword(char const* url, char*& username, char*& password) { username = password = NULL; // by default do { // Parse the URL as "sip:[:]@" char const* prefix = "sip:"; unsigned const prefixLength = 4; if (_strncasecmp(url, prefix, prefixLength) != 0) break; // Look for the ':' and '@': unsigned usernameIndex = prefixLength; unsigned colonIndex = 0, atIndex = 0; for (unsigned i = usernameIndex; url[i] != '\0' && url[i] != '/'; ++i) { if (url[i] == ':' && colonIndex == 0) { colonIndex = i; } else if (url[i] == '@') { atIndex = i; break; // we're done } } if (atIndex == 0) break; // no '@' found char* urlCopy = strDup(url); urlCopy[atIndex] = '\0'; if (colonIndex > 0) { urlCopy[colonIndex] = '\0'; password = strDup(&urlCopy[colonIndex+1]); } else { password = strDup(""); } username = strDup(&urlCopy[usernameIndex]); delete[] urlCopy; return True; } while (0); return False; } char* SIPClient::createAuthenticatorString(Authenticator const* authenticator, char const* cmd, char const* url) { if (authenticator != NULL && authenticator->realm() != NULL && authenticator->nonce() != NULL && authenticator->username() != NULL && authenticator->password() != NULL) { // We've been provided a filled-in authenticator, so use it: char const* const authFmt = "Proxy-Authorization: Digest username=\"%s\", realm=\"%s\", nonce=\"%s\", response=\"%s\", uri=\"%s\"\r\n"; char const* response = authenticator->computeDigestResponse(cmd, url); unsigned authBufSize = strlen(authFmt) + strlen(authenticator->username()) + strlen(authenticator->realm()) + strlen(authenticator->nonce()) + strlen(url) + strlen(response); char* authenticatorStr = new char[authBufSize]; sprintf(authenticatorStr, authFmt, authenticator->username(), authenticator->realm(), authenticator->nonce(), response, url); authenticator->reclaimDigestResponse(response); return authenticatorStr; } return strDup(""); } Boolean SIPClient::sendRequest(char const* requestString, unsigned requestLength) { if (fVerbosityLevel >= 1) { envir() << "Sending request: " << requestString << "\n"; } // NOTE: We should really check that "requestLength" is not ##### // too large for UDP (see RFC 3261, section 18.1.1) ##### return fOurSocket->output(envir(), 255, (unsigned char*)requestString, requestLength); } unsigned SIPClient::getResponse(char*& responseBuffer, unsigned responseBufferSize) { if (responseBufferSize == 0) return 0; // just in case... responseBuffer[0] = '\0'; // ditto // Keep reading data from the socket until we see "\r\n\r\n" (except // at the start), or until we fill up our buffer. // Don't read any more than this. char* p = responseBuffer; Boolean haveSeenNonCRLF = False; int bytesRead = 0; while (bytesRead < (int)responseBufferSize) { unsigned bytesReadNow; struct sockaddr_in fromAddr; unsigned char* toPosn = (unsigned char*)(responseBuffer+bytesRead); Boolean readSuccess = fOurSocket->handleRead(toPosn, responseBufferSize-bytesRead, bytesReadNow, fromAddr); if (!readSuccess || bytesReadNow == 0) { envir().setResultMsg("SIP response was truncated"); break; } bytesRead += bytesReadNow; // Check whether we have "\r\n\r\n": char* lastToCheck = responseBuffer+bytesRead-4; if (lastToCheck < responseBuffer) continue; for (; p <= lastToCheck; ++p) { if (haveSeenNonCRLF) { if (*p == '\r' && *(p+1) == '\n' && *(p+2) == '\r' && *(p+3) == '\n') { responseBuffer[bytesRead] = '\0'; // Before returning, trim any \r or \n from the start: while (*responseBuffer == '\r' || *responseBuffer == '\n') { ++responseBuffer; --bytesRead; } return bytesRead; } } else { if (*p != '\r' && *p != '\n') { haveSeenNonCRLF = True; } } } } return 0; } Boolean SIPClient::parseResponseCode(char const* line, unsigned& responseCode) { if (sscanf(line, "%*s%u", &responseCode) != 1) { envir().setResultMsg("no response code in line: \"", line, "\""); return False; } return True; } live/liveMedia/Makefile.head000440 001751 000000 00000000246 12265042432 016174 0ustar00rsfwheel000000 000000 INCLUDES = -Iinclude -I../UsageEnvironment/include -I../groupsock/include PREFIX = /usr/local LIBDIR = $(PREFIX)/lib ##### Change the following for your environment: live/liveMedia/AACAudioMatroskaFileServerMediaSubsession.cpp000444 001751 000000 00000006342 12265042432 024424 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A 'ServerMediaSubsession' object that creates new, unicast, "RTPSink"s // on demand, from an AAC audio track within a Matroska file. // Implementation #include "AACAudioMatroskaFileServerMediaSubsession.hh" #include "MPEG4GenericRTPSink.hh" #include "MatroskaDemuxedTrack.hh" AACAudioMatroskaFileServerMediaSubsession* AACAudioMatroskaFileServerMediaSubsession ::createNew(MatroskaFileServerDemux& demux, unsigned trackNumber) { return new AACAudioMatroskaFileServerMediaSubsession(demux, trackNumber); } AACAudioMatroskaFileServerMediaSubsession ::AACAudioMatroskaFileServerMediaSubsession(MatroskaFileServerDemux& demux, unsigned trackNumber) : FileServerMediaSubsession(demux.envir(), demux.fileName(), False), fOurDemux(demux), fTrackNumber(trackNumber) { // The Matroska file's 'Codec Private' data is assumed to be the AAC configuration information. // Use this to generate a 'config string': MatroskaTrack* track = fOurDemux.lookup(fTrackNumber); fConfigStr = new char[2*track->codecPrivateSize + 1]; // 2 hex digits per byte, plus the trailing '\0' for (unsigned i = 0; i < track->codecPrivateSize; ++i) sprintf(&fConfigStr[2*i], "%02X", track->codecPrivate[i]); } AACAudioMatroskaFileServerMediaSubsession ::~AACAudioMatroskaFileServerMediaSubsession() { delete[] fConfigStr; } float AACAudioMatroskaFileServerMediaSubsession::duration() const { return fOurDemux.fileDuration(); } void AACAudioMatroskaFileServerMediaSubsession ::seekStreamSource(FramedSource* inputSource, double& seekNPT, double /*streamDuration*/, u_int64_t& /*numBytes*/) { ((MatroskaDemuxedTrack*)inputSource)->seekToTime(seekNPT); } FramedSource* AACAudioMatroskaFileServerMediaSubsession ::createNewStreamSource(unsigned clientSessionId, unsigned& estBitrate) { estBitrate = 96; // kbps, estimate return fOurDemux.newDemuxedTrack(clientSessionId, fTrackNumber); } RTPSink* AACAudioMatroskaFileServerMediaSubsession ::createNewRTPSink(Groupsock* rtpGroupsock, unsigned char rtpPayloadTypeIfDynamic, FramedSource* /*inputSource*/) { MatroskaTrack* track = fOurDemux.lookup(fTrackNumber); return MPEG4GenericRTPSink::createNew(envir(), rtpGroupsock, rtpPayloadTypeIfDynamic, track->samplingFrequency, "audio", "AAC-hbr", fConfigStr, track->numChannels); } live/liveMedia/MP3AudioMatroskaFileServerMediaSubsession.cpp000444 001751 000000 00000005237 12265042432 024441 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A 'ServerMediaSubsession' object that creates new, unicast, "RTPSink"s // on demand, from an MP3 audio track within a Matroska file. // (Actually, MPEG-1 or MPEG-2 audio file should also work.) // Implementation #include "MP3AudioMatroskaFileServerMediaSubsession.hh" #include "MatroskaDemuxedTrack.hh" MP3AudioMatroskaFileServerMediaSubsession* MP3AudioMatroskaFileServerMediaSubsession ::createNew(MatroskaFileServerDemux& demux, unsigned trackNumber, Boolean generateADUs, Interleaving* interleaving) { return new MP3AudioMatroskaFileServerMediaSubsession(demux, trackNumber, generateADUs, interleaving); } MP3AudioMatroskaFileServerMediaSubsession ::MP3AudioMatroskaFileServerMediaSubsession(MatroskaFileServerDemux& demux, unsigned trackNumber, Boolean generateADUs, Interleaving* interleaving) : MP3AudioFileServerMediaSubsession(demux.envir(), demux.fileName(), False, generateADUs, interleaving), fOurDemux(demux), fTrackNumber(trackNumber) { fFileDuration = fOurDemux.fileDuration(); } MP3AudioMatroskaFileServerMediaSubsession::~MP3AudioMatroskaFileServerMediaSubsession() { } void MP3AudioMatroskaFileServerMediaSubsession ::seekStreamSource(FramedSource* inputSource, double& seekNPT, double /*streamDuration*/, u_int64_t& /*numBytes*/) { FramedSource* sourceMP3Stream; ADUFromMP3Source* aduStream; getBaseStreams(inputSource, sourceMP3Stream, aduStream); if (aduStream != NULL) aduStream->resetInput(); // because we're about to seek within its source ((MatroskaDemuxedTrack*)sourceMP3Stream)->seekToTime(seekNPT); } FramedSource* MP3AudioMatroskaFileServerMediaSubsession ::createNewStreamSource(unsigned clientSessionId, unsigned& estBitrate) { FramedSource* baseMP3Source = fOurDemux.newDemuxedTrack(clientSessionId, fTrackNumber); return createNewStreamSourceCommon(baseMP3Source, 0, estBitrate); } live/liveMedia/T140TextMatroskaFileServerMediaSubsession.cpp000444 001751 000000 00000004775 12265042432 024363 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A 'ServerMediaSubsession' object that creates new, unicast, "RTPSink"s // on demand, from an text (subtitle) track within a Matroska file. // Implementation #include "T140TextMatroskaFileServerMediaSubsession.hh" #include "T140TextRTPSink.hh" #include "MatroskaDemuxedTrack.hh" T140TextMatroskaFileServerMediaSubsession* T140TextMatroskaFileServerMediaSubsession ::createNew(MatroskaFileServerDemux& demux, unsigned trackNumber) { return new T140TextMatroskaFileServerMediaSubsession(demux, trackNumber); } T140TextMatroskaFileServerMediaSubsession ::T140TextMatroskaFileServerMediaSubsession(MatroskaFileServerDemux& demux, unsigned trackNumber) : FileServerMediaSubsession(demux.envir(), demux.fileName(), False), fOurDemux(demux), fTrackNumber(trackNumber) { } T140TextMatroskaFileServerMediaSubsession ::~T140TextMatroskaFileServerMediaSubsession() { } float T140TextMatroskaFileServerMediaSubsession::duration() const { return fOurDemux.fileDuration(); } void T140TextMatroskaFileServerMediaSubsession ::seekStreamSource(FramedSource* inputSource, double& seekNPT, double /*streamDuration*/, u_int64_t& /*numBytes*/) { ((MatroskaDemuxedTrack*)inputSource)->seekToTime(seekNPT); } FramedSource* T140TextMatroskaFileServerMediaSubsession ::createNewStreamSource(unsigned clientSessionId, unsigned& estBitrate) { estBitrate = 48; // kbps, estimate return fOurDemux.newDemuxedTrack(clientSessionId, fTrackNumber); } RTPSink* T140TextMatroskaFileServerMediaSubsession ::createNewRTPSink(Groupsock* rtpGroupsock, unsigned char rtpPayloadTypeIfDynamic, FramedSource* /*inputSource*/) { return T140TextRTPSink::createNew(envir(), rtpGroupsock, rtpPayloadTypeIfDynamic); } live/liveMedia/MatroskaDemuxedTrack.hh000444 001751 000000 00000004331 12265042432 020242 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A media track, demultiplexed from a Matroska file // C++ header #ifndef _MATROSKA_DEMUXED_TRACK_HH #define _MATROSKA_DEMUXED_TRACK_HH #ifndef _FRAMED_SOURCE_HH #include "FramedSource.hh" #endif class MatroskaDemux; // forward class MatroskaDemuxedTrack: public FramedSource { public: void seekToTime(double& seekNPT); private: // We are created only by a MatroskaDemux (a friend) friend class MatroskaDemux; MatroskaDemuxedTrack(UsageEnvironment& env, unsigned trackNumber, MatroskaDemux& sourceDemux); virtual ~MatroskaDemuxedTrack(); private: // redefined virtual functions: virtual void doGetNextFrame(); virtual char const* MIMEtype() const; private: // We are accessed only by MatroskaDemux and by MatroskaFileParser (a friend) friend class MatroskaFileParser; unsigned char* to() { return fTo; } unsigned maxSize() { return fMaxSize; } unsigned& frameSize() { return fFrameSize; } unsigned& numTruncatedBytes() { return fNumTruncatedBytes; } struct timeval& presentationTime() { return fPresentationTime; } unsigned& durationInMicroseconds() { return fDurationInMicroseconds; } struct timeval& prevPresentationTime() { return fPrevPresentationTime; } int& durationImbalance() { return fDurationImbalance; } private: unsigned fOurTrackNumber; MatroskaDemux& fOurSourceDemux; struct timeval fPrevPresentationTime; int fDurationImbalance; }; #endif live/liveMedia/H263plusVideoFileServerMediaSubsession.cpp000444 001751 000000 00000005112 12265042432 023663 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A 'ServerMediaSubsession' object that creates new, unicast, "RTPSink"s // on demand, from a H263 video file. // Implementation // Author: Bernhard Feiten. // Based on MPEG4VideoFileServerMediaSubsession // Updated by Ross FInlayson (December 2007) #include "H263plusVideoFileServerMediaSubsession.hh" #include "H263plusVideoRTPSink.hh" #include "ByteStreamFileSource.hh" #include "H263plusVideoStreamFramer.hh" H263plusVideoFileServerMediaSubsession* H263plusVideoFileServerMediaSubsession::createNew(UsageEnvironment& env, char const* fileName, Boolean reuseFirstSource) { return new H263plusVideoFileServerMediaSubsession(env, fileName, reuseFirstSource); } H263plusVideoFileServerMediaSubsession ::H263plusVideoFileServerMediaSubsession(UsageEnvironment& env, char const* fileName, Boolean reuseFirstSource) : FileServerMediaSubsession(env, fileName, reuseFirstSource) { } H263plusVideoFileServerMediaSubsession::~H263plusVideoFileServerMediaSubsession() { } FramedSource* H263plusVideoFileServerMediaSubsession ::createNewStreamSource(unsigned /*clientSessionId*/, unsigned& estBitrate) { estBitrate = 500; // kbps, estimate ?? // Create the video source: ByteStreamFileSource* fileSource = ByteStreamFileSource::createNew(envir(), fFileName); if (fileSource == NULL) return NULL; fFileSize = fileSource->fileSize(); // Create a framer for the Video Elementary Stream: return H263plusVideoStreamFramer::createNew(envir(), fileSource); } RTPSink* H263plusVideoFileServerMediaSubsession::createNewRTPSink(Groupsock* rtpGroupsock, unsigned char rtpPayloadTypeIfDynamic, FramedSource* /*inputSource*/) { return H263plusVideoRTPSink::createNew(envir(), rtpGroupsock, rtpPayloadTypeIfDynamic); } live/liveMedia/MatroskaFileParser.hh000444 001751 000000 00000010636 12265042432 017723 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A parser for a Matroska file. // C++ header #ifndef _MATROSKA_FILE_PARSER_HH #ifndef _STREAM_PARSER_HH #include "StreamParser.hh" #endif #ifndef _MATROSKA_FILE_HH #include "MatroskaFile.hh" #endif #ifndef _EBML_NUMBER_HH #include "EBMLNumber.hh" #endif // An enum representing the current state of the parser: enum MatroskaParseState { PARSING_START_OF_FILE, LOOKING_FOR_TRACKS, PARSING_TRACK, PARSING_CUES, LOOKING_FOR_CLUSTER, LOOKING_FOR_BLOCK, PARSING_BLOCK, DELIVERING_FRAME_WITHIN_BLOCK, DELIVERING_FRAME_BYTES }; class MatroskaFileParser: public StreamParser { public: MatroskaFileParser(MatroskaFile& ourFile, FramedSource* inputSource, FramedSource::onCloseFunc* onEndFunc, void* onEndClientData, MatroskaDemux* ourDemux = NULL); virtual ~MatroskaFileParser(); void seekToTime(double& seekNPT); // StreamParser 'client continue' function: static void continueParsing(void* clientData, unsigned char* ptr, unsigned size, struct timeval presentationTime); void continueParsing(); private: // Parsing functions: Boolean parse(); // returns True iff we have finished parsing to the end of all 'Track' headers (on initialization) Boolean parseStartOfFile(); void lookForNextTrack(); Boolean parseTrack(); Boolean parseCues(); void lookForNextBlock(); void parseBlock(); Boolean deliverFrameWithinBlock(); void deliverFrameBytes(); void getCommonFrameBytes(MatroskaTrack* track, u_int8_t* to, unsigned numBytesToGet, unsigned numBytesToSkip); Boolean parseEBMLNumber(EBMLNumber& num); Boolean parseEBMLIdAndSize(EBMLId& id, EBMLDataSize& size); Boolean parseEBMLVal_unsigned64(EBMLDataSize& size, u_int64_t& result); Boolean parseEBMLVal_unsigned(EBMLDataSize& size, unsigned& result); Boolean parseEBMLVal_float(EBMLDataSize& size, float& result); Boolean parseEBMLVal_string(EBMLDataSize& size, char*& result); // Note: "result" is dynamically allocated; the caller must delete[] it later Boolean parseEBMLVal_binary(EBMLDataSize& size, u_int8_t*& result); // Note: "result" is dynamically allocated; the caller must delete[] it later void skipHeader(EBMLDataSize const& size); void skipRemainingHeaderBytes(Boolean isContinuation); void setParseState(); void seekToFilePosition(u_int64_t offsetInFile); void seekToEndOfFile(); void resetStateAfterSeeking(); // common code, called by both of the above private: // redefined virtual functions virtual void restoreSavedParserState(); private: // General state for parsing: MatroskaFile& fOurFile; FramedSource* fInputSource; FramedSource::onCloseFunc* fOnEndFunc; void* fOnEndClientData; MatroskaDemux* fOurDemux; MatroskaParseState fCurrentParseState; u_int64_t fCurOffsetInFile, fSavedCurOffsetInFile, fLimitOffsetInFile; // For skipping over (possibly large) headers: u_int64_t fNumHeaderBytesToSkip; // For parsing 'Seek ID's: EBMLId fLastSeekId; // Parameters of the most recently-parsed 'Cluster': unsigned fClusterTimecode; // Parameters of the most recently-parsed 'Block': unsigned fBlockSize; unsigned fBlockTrackNumber; short fBlockTimecode; unsigned fNumFramesInBlock; unsigned* fFrameSizesWithinBlock; // Parameters of the most recently-parsed frame within a 'Block': double fPresentationTimeOffset; unsigned fNextFrameNumberToDeliver; unsigned fCurOffsetWithinFrame, fSavedCurOffsetWithinFrame; // used if track->haveSubframes() // Parameters of the (sub)frame that's currently being delivered: u_int8_t* fCurFrameTo; unsigned fCurFrameNumBytesToGet; unsigned fCurFrameNumBytesToSkip; }; #endif live/liveMedia/DVVideoFileServerMediaSubsession.cpp000444 001751 000000 00000007351 12265042432 022655 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A 'ServerMediaSubsession' object that creates new, unicast, "RTPSink"s // on demand, from a DV video file. // Implementation #include "DVVideoFileServerMediaSubsession.hh" #include "DVVideoRTPSink.hh" #include "ByteStreamFileSource.hh" #include "DVVideoStreamFramer.hh" DVVideoFileServerMediaSubsession* DVVideoFileServerMediaSubsession::createNew(UsageEnvironment& env, char const* fileName, Boolean reuseFirstSource) { return new DVVideoFileServerMediaSubsession(env, fileName, reuseFirstSource); } DVVideoFileServerMediaSubsession ::DVVideoFileServerMediaSubsession(UsageEnvironment& env, char const* fileName, Boolean reuseFirstSource) : FileServerMediaSubsession(env, fileName, reuseFirstSource), fFileDuration(0.0) { } DVVideoFileServerMediaSubsession::~DVVideoFileServerMediaSubsession() { } FramedSource* DVVideoFileServerMediaSubsession ::createNewStreamSource(unsigned /*clientSessionId*/, unsigned& estBitrate) { // Create the video source: ByteStreamFileSource* fileSource = ByteStreamFileSource::createNew(envir(), fFileName); if (fileSource == NULL) return NULL; fFileSize = fileSource->fileSize(); // Create a framer for the Video Elementary Stream: DVVideoStreamFramer* framer = DVVideoStreamFramer::createNew(envir(), fileSource, True/*the file source is seekable*/); // Use the framer to figure out the file's duration: unsigned frameSize; double frameDuration; if (framer->getFrameParameters(frameSize, frameDuration)) { fFileDuration = (float)(((int64_t)fFileSize*frameDuration)/(frameSize*1000000.0)); estBitrate = (unsigned)((8000.0*frameSize)/frameDuration); // in kbps } else { estBitrate = 50000; // kbps, estimate } return framer; } RTPSink* DVVideoFileServerMediaSubsession::createNewRTPSink(Groupsock* rtpGroupsock, unsigned char rtpPayloadTypeIfDynamic, FramedSource* /*inputSource*/) { return DVVideoRTPSink::createNew(envir(), rtpGroupsock, rtpPayloadTypeIfDynamic); } char const* DVVideoFileServerMediaSubsession::getAuxSDPLine(RTPSink* rtpSink, FramedSource* inputSource) { return ((DVVideoRTPSink*)rtpSink)->auxSDPLineFromFramer((DVVideoStreamFramer*)inputSource); } float DVVideoFileServerMediaSubsession::duration() const { return fFileDuration; } void DVVideoFileServerMediaSubsession ::seekStreamSource(FramedSource* inputSource, double& seekNPT, double streamDuration, u_int64_t& numBytes) { // First, get the file source from "inputSource" (a framer): DVVideoStreamFramer* framer = (DVVideoStreamFramer*)inputSource; ByteStreamFileSource* fileSource = (ByteStreamFileSource*)(framer->inputSource()); // Then figure out where to seek to within the file: if (fFileDuration > 0.0) { u_int64_t seekByteNumber = (u_int64_t)(((int64_t)fFileSize*seekNPT)/fFileDuration); numBytes = (u_int64_t)(((int64_t)fFileSize*streamDuration)/fFileDuration); fileSource->seekToByteAbsolute(seekByteNumber, numBytes); } } live/liveMedia/MultiFramedRTPSource.cpp000444 001751 000000 00000051304 12265042432 020325 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // RTP source for a common kind of payload format: Those that pack multiple, // complete codec frames (as many as possible) into each RTP packet. // Implementation #include "MultiFramedRTPSource.hh" #include "GroupsockHelper.hh" #include ////////// ReorderingPacketBuffer definition ////////// class ReorderingPacketBuffer { public: ReorderingPacketBuffer(BufferedPacketFactory* packetFactory); virtual ~ReorderingPacketBuffer(); void reset(); BufferedPacket* getFreePacket(MultiFramedRTPSource* ourSource); Boolean storePacket(BufferedPacket* bPacket); BufferedPacket* getNextCompletedPacket(Boolean& packetLossPreceded); void releaseUsedPacket(BufferedPacket* packet); void freePacket(BufferedPacket* packet) { if (packet != fSavedPacket) { delete packet; } else { fSavedPacketFree = True; } } Boolean isEmpty() const { return fHeadPacket == NULL; } void setThresholdTime(unsigned uSeconds) { fThresholdTime = uSeconds; } void resetHaveSeenFirstPacket() { fHaveSeenFirstPacket = False; } private: BufferedPacketFactory* fPacketFactory; unsigned fThresholdTime; // uSeconds Boolean fHaveSeenFirstPacket; // used to set initial "fNextExpectedSeqNo" unsigned short fNextExpectedSeqNo; BufferedPacket* fHeadPacket; BufferedPacket* fTailPacket; BufferedPacket* fSavedPacket; // to avoid calling new/free in the common case Boolean fSavedPacketFree; }; ////////// MultiFramedRTPSource implementation ////////// MultiFramedRTPSource ::MultiFramedRTPSource(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, unsigned rtpTimestampFrequency, BufferedPacketFactory* packetFactory) : RTPSource(env, RTPgs, rtpPayloadFormat, rtpTimestampFrequency) { reset(); fReorderingBuffer = new ReorderingPacketBuffer(packetFactory); // Try to use a big receive buffer for RTP: increaseReceiveBufferTo(env, RTPgs->socketNum(), 50*1024); } void MultiFramedRTPSource::reset() { fCurrentPacketBeginsFrame = True; // by default fCurrentPacketCompletesFrame = True; // by default fAreDoingNetworkReads = False; fPacketReadInProgress = NULL; fNeedDelivery = False; fPacketLossInFragmentedFrame = False; } MultiFramedRTPSource::~MultiFramedRTPSource() { delete fReorderingBuffer; } Boolean MultiFramedRTPSource ::processSpecialHeader(BufferedPacket* /*packet*/, unsigned& resultSpecialHeaderSize) { // Default implementation: Assume no special header: resultSpecialHeaderSize = 0; return True; } Boolean MultiFramedRTPSource ::packetIsUsableInJitterCalculation(unsigned char* /*packet*/, unsigned /*packetSize*/) { // Default implementation: return True; } void MultiFramedRTPSource::doStopGettingFrames() { envir().taskScheduler().unscheduleDelayedTask(nextTask()); fRTPInterface.stopNetworkReading(); fReorderingBuffer->reset(); reset(); } void MultiFramedRTPSource::doGetNextFrame() { if (!fAreDoingNetworkReads) { // Turn on background read handling of incoming packets: fAreDoingNetworkReads = True; TaskScheduler::BackgroundHandlerProc* handler = (TaskScheduler::BackgroundHandlerProc*)&networkReadHandler; fRTPInterface.startNetworkReading(handler); } fSavedTo = fTo; fSavedMaxSize = fMaxSize; fFrameSize = 0; // for now fNeedDelivery = True; doGetNextFrame1(); } void MultiFramedRTPSource::doGetNextFrame1() { while (fNeedDelivery) { // If we already have packet data available, then deliver it now. Boolean packetLossPrecededThis; BufferedPacket* nextPacket = fReorderingBuffer->getNextCompletedPacket(packetLossPrecededThis); if (nextPacket == NULL) break; fNeedDelivery = False; if (nextPacket->useCount() == 0) { // Before using the packet, check whether it has a special header // that needs to be processed: unsigned specialHeaderSize; if (!processSpecialHeader(nextPacket, specialHeaderSize)) { // Something's wrong with the header; reject the packet: fReorderingBuffer->releaseUsedPacket(nextPacket); fNeedDelivery = True; break; } nextPacket->skip(specialHeaderSize); } // Check whether we're part of a multi-packet frame, and whether // there was packet loss that would render this packet unusable: if (fCurrentPacketBeginsFrame) { if (packetLossPrecededThis || fPacketLossInFragmentedFrame) { // We didn't get all of the previous frame. // Forget any data that we used from it: fTo = fSavedTo; fMaxSize = fSavedMaxSize; fFrameSize = 0; } fPacketLossInFragmentedFrame = False; } else if (packetLossPrecededThis) { // We're in a multi-packet frame, with preceding packet loss fPacketLossInFragmentedFrame = True; } if (fPacketLossInFragmentedFrame) { // This packet is unusable; reject it: fReorderingBuffer->releaseUsedPacket(nextPacket); fNeedDelivery = True; break; } // The packet is usable. Deliver all or part of it to our caller: unsigned frameSize; nextPacket->use(fTo, fMaxSize, frameSize, fNumTruncatedBytes, fCurPacketRTPSeqNum, fCurPacketRTPTimestamp, fPresentationTime, fCurPacketHasBeenSynchronizedUsingRTCP, fCurPacketMarkerBit); fFrameSize += frameSize; if (!nextPacket->hasUsableData()) { // We're completely done with this packet now fReorderingBuffer->releaseUsedPacket(nextPacket); } if (fCurrentPacketCompletesFrame) { // We have all the data that the client wants. if (fNumTruncatedBytes > 0) { envir() << "MultiFramedRTPSource::doGetNextFrame1(): The total received frame size exceeds the client's buffer size (" << fSavedMaxSize << "). " << fNumTruncatedBytes << " bytes of trailing data will be dropped!\n"; } // Call our own 'after getting' function, so that the downstream object can consume the data: if (fReorderingBuffer->isEmpty()) { // Common case optimization: There are no more queued incoming packets, so this code will not get // executed again without having first returned to the event loop. Call our 'after getting' function // directly, because there's no risk of a long chain of recursion (and thus stack overflow): afterGetting(this); } else { // Special case: Call our 'after getting' function via the event loop. nextTask() = envir().taskScheduler().scheduleDelayedTask(0, (TaskFunc*)FramedSource::afterGetting, this); } } else { // This packet contained fragmented data, and does not complete // the data that the client wants. Keep getting data: fTo += frameSize; fMaxSize -= frameSize; fNeedDelivery = True; } } } void MultiFramedRTPSource ::setPacketReorderingThresholdTime(unsigned uSeconds) { fReorderingBuffer->setThresholdTime(uSeconds); } #define ADVANCE(n) do { bPacket->skip(n); } while (0) void MultiFramedRTPSource::networkReadHandler(MultiFramedRTPSource* source, int /*mask*/) { source->networkReadHandler1(); } void MultiFramedRTPSource::networkReadHandler1() { BufferedPacket* bPacket = fPacketReadInProgress; if (bPacket == NULL) { // Normal case: Get a free BufferedPacket descriptor to hold the new network packet: bPacket = fReorderingBuffer->getFreePacket(this); } // Read the network packet, and perform sanity checks on the RTP header: Boolean readSuccess = False; do { Boolean packetReadWasIncomplete = fPacketReadInProgress != NULL; if (!bPacket->fillInData(fRTPInterface, packetReadWasIncomplete)) { if (bPacket->bytesAvailable() == 0) { envir() << "MultiFramedRTPSource error: Hit limit when reading incoming packet over TCP. Increase \"MAX_PACKET_SIZE\"\n"; } fPacketReadInProgress = NULL; break; } if (packetReadWasIncomplete) { // We need additional read(s) before we can process the incoming packet: fPacketReadInProgress = bPacket; return; } else { fPacketReadInProgress = NULL; } #ifdef TEST_LOSS setPacketReorderingThresholdTime(0); // don't wait for 'lost' packets to arrive out-of-order later if ((our_random()%10) == 0) break; // simulate 10% packet loss #endif // Check for the 12-byte RTP header: if (bPacket->dataSize() < 12) break; unsigned rtpHdr = ntohl(*(u_int32_t*)(bPacket->data())); ADVANCE(4); Boolean rtpMarkerBit = (rtpHdr&0x00800000) != 0; unsigned rtpTimestamp = ntohl(*(u_int32_t*)(bPacket->data()));ADVANCE(4); unsigned rtpSSRC = ntohl(*(u_int32_t*)(bPacket->data())); ADVANCE(4); // Check the RTP version number (it should be 2): if ((rtpHdr&0xC0000000) != 0x80000000) break; // Skip over any CSRC identifiers in the header: unsigned cc = (rtpHdr>>24)&0xF; if (bPacket->dataSize() < cc) break; ADVANCE(cc*4); // Check for (& ignore) any RTP header extension if (rtpHdr&0x10000000) { if (bPacket->dataSize() < 4) break; unsigned extHdr = ntohl(*(u_int32_t*)(bPacket->data())); ADVANCE(4); unsigned remExtSize = 4*(extHdr&0xFFFF); if (bPacket->dataSize() < remExtSize) break; ADVANCE(remExtSize); } // Discard any padding bytes: if (rtpHdr&0x20000000) { if (bPacket->dataSize() == 0) break; unsigned numPaddingBytes = (unsigned)(bPacket->data())[bPacket->dataSize()-1]; if (bPacket->dataSize() < numPaddingBytes) break; bPacket->removePadding(numPaddingBytes); } // Check the Payload Type. if ((unsigned char)((rtpHdr&0x007F0000)>>16) != rtpPayloadFormat()) { break; } // The rest of the packet is the usable data. Record and save it: if (rtpSSRC != fLastReceivedSSRC) { // The SSRC of incoming packets has changed. Unfortunately we don't yet handle streams that contain multiple SSRCs, // but we can handle a single-SSRC stream where the SSRC changes occasionally: fLastReceivedSSRC = rtpSSRC; fReorderingBuffer->resetHaveSeenFirstPacket(); } unsigned short rtpSeqNo = (unsigned short)(rtpHdr&0xFFFF); Boolean usableInJitterCalculation = packetIsUsableInJitterCalculation((bPacket->data()), bPacket->dataSize()); struct timeval presentationTime; // computed by: Boolean hasBeenSyncedUsingRTCP; // computed by: receptionStatsDB() .noteIncomingPacket(rtpSSRC, rtpSeqNo, rtpTimestamp, timestampFrequency(), usableInJitterCalculation, presentationTime, hasBeenSyncedUsingRTCP, bPacket->dataSize()); // Fill in the rest of the packet descriptor, and store it: struct timeval timeNow; gettimeofday(&timeNow, NULL); bPacket->assignMiscParams(rtpSeqNo, rtpTimestamp, presentationTime, hasBeenSyncedUsingRTCP, rtpMarkerBit, timeNow); if (!fReorderingBuffer->storePacket(bPacket)) break; readSuccess = True; } while (0); if (!readSuccess) fReorderingBuffer->freePacket(bPacket); doGetNextFrame1(); // If we didn't get proper data this time, we'll get another chance } ////////// BufferedPacket and BufferedPacketFactory implementation ///// #define MAX_PACKET_SIZE 20000 BufferedPacket::BufferedPacket() : fPacketSize(MAX_PACKET_SIZE), fBuf(new unsigned char[MAX_PACKET_SIZE]), fNextPacket(NULL) { } BufferedPacket::~BufferedPacket() { delete fNextPacket; delete[] fBuf; } void BufferedPacket::reset() { fHead = fTail = 0; fUseCount = 0; fIsFirstPacket = False; // by default } // The following function has been deprecated: unsigned BufferedPacket ::nextEnclosedFrameSize(unsigned char*& /*framePtr*/, unsigned dataSize) { // By default, use the entire buffered data, even though it may consist // of more than one frame, on the assumption that the client doesn't // care. (This is more efficient than delivering a frame at a time) return dataSize; } void BufferedPacket ::getNextEnclosedFrameParameters(unsigned char*& framePtr, unsigned dataSize, unsigned& frameSize, unsigned& frameDurationInMicroseconds) { // By default, use the entire buffered data, even though it may consist // of more than one frame, on the assumption that the client doesn't // care. (This is more efficient than delivering a frame at a time) // For backwards-compatibility with existing uses of (the now deprecated) // "nextEnclosedFrameSize()", call that function to implement this one: frameSize = nextEnclosedFrameSize(framePtr, dataSize); frameDurationInMicroseconds = 0; // by default. Subclasses should correct this. } Boolean BufferedPacket::fillInData(RTPInterface& rtpInterface, Boolean& packetReadWasIncomplete) { if (!packetReadWasIncomplete) reset(); unsigned numBytesRead; struct sockaddr_in fromAddress; unsigned const maxBytesToRead = bytesAvailable(); if (maxBytesToRead == 0) return False; // exceeded buffer size when reading over TCP if (!rtpInterface.handleRead(&fBuf[fTail], maxBytesToRead, numBytesRead, fromAddress, packetReadWasIncomplete)) { return False; } fTail += numBytesRead; return True; } void BufferedPacket ::assignMiscParams(unsigned short rtpSeqNo, unsigned rtpTimestamp, struct timeval presentationTime, Boolean hasBeenSyncedUsingRTCP, Boolean rtpMarkerBit, struct timeval timeReceived) { fRTPSeqNo = rtpSeqNo; fRTPTimestamp = rtpTimestamp; fPresentationTime = presentationTime; fHasBeenSyncedUsingRTCP = hasBeenSyncedUsingRTCP; fRTPMarkerBit = rtpMarkerBit; fTimeReceived = timeReceived; } void BufferedPacket::skip(unsigned numBytes) { fHead += numBytes; if (fHead > fTail) fHead = fTail; } void BufferedPacket::removePadding(unsigned numBytes) { if (numBytes > fTail-fHead) numBytes = fTail-fHead; fTail -= numBytes; } void BufferedPacket::appendData(unsigned char* newData, unsigned numBytes) { if (numBytes > fPacketSize-fTail) numBytes = fPacketSize - fTail; memmove(&fBuf[fTail], newData, numBytes); fTail += numBytes; } void BufferedPacket::use(unsigned char* to, unsigned toSize, unsigned& bytesUsed, unsigned& bytesTruncated, unsigned short& rtpSeqNo, unsigned& rtpTimestamp, struct timeval& presentationTime, Boolean& hasBeenSyncedUsingRTCP, Boolean& rtpMarkerBit) { unsigned char* origFramePtr = &fBuf[fHead]; unsigned char* newFramePtr = origFramePtr; // may change in the call below unsigned frameSize, frameDurationInMicroseconds; getNextEnclosedFrameParameters(newFramePtr, fTail - fHead, frameSize, frameDurationInMicroseconds); if (frameSize > toSize) { bytesTruncated += frameSize - toSize; bytesUsed = toSize; } else { bytesTruncated = 0; bytesUsed = frameSize; } memmove(to, newFramePtr, bytesUsed); fHead += (newFramePtr - origFramePtr) + frameSize; ++fUseCount; rtpSeqNo = fRTPSeqNo; rtpTimestamp = fRTPTimestamp; presentationTime = fPresentationTime; hasBeenSyncedUsingRTCP = fHasBeenSyncedUsingRTCP; rtpMarkerBit = fRTPMarkerBit; // Update "fPresentationTime" for the next enclosed frame (if any): fPresentationTime.tv_usec += frameDurationInMicroseconds; if (fPresentationTime.tv_usec >= 1000000) { fPresentationTime.tv_sec += fPresentationTime.tv_usec/1000000; fPresentationTime.tv_usec = fPresentationTime.tv_usec%1000000; } } BufferedPacketFactory::BufferedPacketFactory() { } BufferedPacketFactory::~BufferedPacketFactory() { } BufferedPacket* BufferedPacketFactory ::createNewPacket(MultiFramedRTPSource* /*ourSource*/) { return new BufferedPacket; } ////////// ReorderingPacketBuffer implementation ////////// ReorderingPacketBuffer ::ReorderingPacketBuffer(BufferedPacketFactory* packetFactory) : fThresholdTime(100000) /* default reordering threshold: 100 ms */, fHaveSeenFirstPacket(False), fHeadPacket(NULL), fTailPacket(NULL), fSavedPacket(NULL), fSavedPacketFree(True) { fPacketFactory = (packetFactory == NULL) ? (new BufferedPacketFactory) : packetFactory; } ReorderingPacketBuffer::~ReorderingPacketBuffer() { reset(); delete fPacketFactory; } void ReorderingPacketBuffer::reset() { if (fSavedPacketFree) delete fSavedPacket; // because fSavedPacket is not in the list delete fHeadPacket; // will also delete fSavedPacket if it's in the list resetHaveSeenFirstPacket(); fHeadPacket = fTailPacket = fSavedPacket = NULL; } BufferedPacket* ReorderingPacketBuffer::getFreePacket(MultiFramedRTPSource* ourSource) { if (fSavedPacket == NULL) { // we're being called for the first time fSavedPacket = fPacketFactory->createNewPacket(ourSource); fSavedPacketFree = True; } if (fSavedPacketFree == True) { fSavedPacketFree = False; return fSavedPacket; } else { return fPacketFactory->createNewPacket(ourSource); } } Boolean ReorderingPacketBuffer::storePacket(BufferedPacket* bPacket) { unsigned short rtpSeqNo = bPacket->rtpSeqNo(); if (!fHaveSeenFirstPacket) { fNextExpectedSeqNo = rtpSeqNo; // initialization bPacket->isFirstPacket() = True; fHaveSeenFirstPacket = True; } // Ignore this packet if its sequence number is less than the one // that we're looking for (in this case, it's been excessively delayed). if (seqNumLT(rtpSeqNo, fNextExpectedSeqNo)) return False; if (fTailPacket == NULL) { // Common case: There are no packets in the queue; this will be the first one: bPacket->nextPacket() = NULL; fHeadPacket = fTailPacket = bPacket; return True; } if (seqNumLT(fTailPacket->rtpSeqNo(), rtpSeqNo)) { // The next-most common case: There are packets already in the queue; this packet arrived in order => put it at the tail: bPacket->nextPacket() = NULL; fTailPacket->nextPacket() = bPacket; fTailPacket = bPacket; return True; } if (rtpSeqNo == fTailPacket->rtpSeqNo()) { // This is a duplicate packet - ignore it return False; } // Rare case: This packet is out-of-order. Run through the list (from the head), to figure out where it belongs: BufferedPacket* beforePtr = NULL; BufferedPacket* afterPtr = fHeadPacket; while (afterPtr != NULL) { if (seqNumLT(rtpSeqNo, afterPtr->rtpSeqNo())) break; // it comes here if (rtpSeqNo == afterPtr->rtpSeqNo()) { // This is a duplicate packet - ignore it return False; } beforePtr = afterPtr; afterPtr = afterPtr->nextPacket(); } // Link our new packet between "beforePtr" and "afterPtr": bPacket->nextPacket() = afterPtr; if (beforePtr == NULL) { fHeadPacket = bPacket; } else { beforePtr->nextPacket() = bPacket; } return True; } void ReorderingPacketBuffer::releaseUsedPacket(BufferedPacket* packet) { // ASSERT: packet == fHeadPacket // ASSERT: fNextExpectedSeqNo == packet->rtpSeqNo() ++fNextExpectedSeqNo; // because we're finished with this packet now fHeadPacket = fHeadPacket->nextPacket(); if (!fHeadPacket) { fTailPacket = NULL; } packet->nextPacket() = NULL; freePacket(packet); } BufferedPacket* ReorderingPacketBuffer ::getNextCompletedPacket(Boolean& packetLossPreceded) { if (fHeadPacket == NULL) return NULL; // Check whether the next packet we want is already at the head // of the queue: // ASSERT: fHeadPacket->rtpSeqNo() >= fNextExpectedSeqNo if (fHeadPacket->rtpSeqNo() == fNextExpectedSeqNo) { packetLossPreceded = fHeadPacket->isFirstPacket(); // (The very first packet is treated as if there was packet loss beforehand.) return fHeadPacket; } // We're still waiting for our desired packet to arrive. However, if // our time threshold has been exceeded, then forget it, and return // the head packet instead: Boolean timeThresholdHasBeenExceeded; if (fThresholdTime == 0) { timeThresholdHasBeenExceeded = True; // optimization } else { struct timeval timeNow; gettimeofday(&timeNow, NULL); unsigned uSecondsSinceReceived = (timeNow.tv_sec - fHeadPacket->timeReceived().tv_sec)*1000000 + (timeNow.tv_usec - fHeadPacket->timeReceived().tv_usec); timeThresholdHasBeenExceeded = uSecondsSinceReceived > fThresholdTime; } if (timeThresholdHasBeenExceeded) { fNextExpectedSeqNo = fHeadPacket->rtpSeqNo(); // we've given up on earlier packets now packetLossPreceded = True; return fHeadPacket; } // Otherwise, keep waiting for our desired packet to arrive: return NULL; } live/liveMedia/T140TextMatroskaFileServerMediaSubsession.hh000444 001751 000000 00000004276 12265042432 024174 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A 'ServerMediaSubsession' object that creates new, unicast, "RTPSink"s // on demand, from an text (subtitle) track within a Matroska file. // C++ header #ifndef _T140_TEXT_MATROSKA_FILE_SERVER_MEDIA_SUBSESSION_HH #define _T140_TEXT_MATROSKA_FILE_SERVER_MEDIA_SUBSESSION_HH #ifndef _FILE_SERVER_MEDIA_SUBSESSION_HH #include "FileServerMediaSubsession.hh" #endif #ifndef _MATROSKA_FILE_SERVER_DEMUX_HH #include "MatroskaFileServerDemux.hh" #endif class T140TextMatroskaFileServerMediaSubsession: public FileServerMediaSubsession { public: static T140TextMatroskaFileServerMediaSubsession* createNew(MatroskaFileServerDemux& demux, unsigned trackNumber); private: T140TextMatroskaFileServerMediaSubsession(MatroskaFileServerDemux& demux, unsigned trackNumber); // called only by createNew(); virtual ~T140TextMatroskaFileServerMediaSubsession(); private: // redefined virtual functions virtual float duration() const; virtual void seekStreamSource(FramedSource* inputSource, double& seekNPT, double streamDuration, u_int64_t& numBytes); virtual FramedSource* createNewStreamSource(unsigned clientSessionId, unsigned& estBitrate); virtual RTPSink* createNewRTPSink(Groupsock* rtpGroupsock, unsigned char rtpPayloadTypeIfDynamic, FramedSource* inputSource); private: MatroskaFileServerDemux& fOurDemux; unsigned fTrackNumber; }; #endif live/liveMedia/StreamReplicator.cpp000444 001751 000000 00000034115 12265042432 017626 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // An class that can be used to create (possibly multiple) 'replicas' of an incoming stream. // Implementation. #include "StreamReplicator.hh" ////////// Definition of "StreamReplica": The class that implements each stream replica ////////// class StreamReplica: public FramedSource { protected: friend class StreamReplicator; StreamReplica(StreamReplicator& ourReplicator); // called only by "StreamReplicator::createStreamReplica()" virtual ~StreamReplica(); private: // redefined virtual functions: virtual void doGetNextFrame(); virtual void doStopGettingFrames(); private: static void copyReceivedFrame(StreamReplica* toReplica, StreamReplica* fromReplica); private: StreamReplicator& fOurReplicator; int fFrameIndex; // 0 or 1, depending upon which frame we're currently requesting; could also be -1 if we've stopped playing Boolean fDeliveryInProgress; // Replicas that are currently awaiting data are kept in a (singly-linked) list: StreamReplica* fNext; }; ////////// StreamReplicator implementation ////////// StreamReplicator* StreamReplicator::createNew(UsageEnvironment& env, FramedSource* inputSource, Boolean deleteWhenLastReplicaDies) { return new StreamReplicator(env, inputSource, deleteWhenLastReplicaDies); } StreamReplicator::StreamReplicator(UsageEnvironment& env, FramedSource* inputSource, Boolean deleteWhenLastReplicaDies) : Medium(env), fInputSource(inputSource), fDeleteWhenLastReplicaDies(deleteWhenLastReplicaDies), fInputSourceHasClosed(False), fNumReplicas(0), fNumActiveReplicas(0), fNumDeliveriesMadeSoFar(0), fFrameIndex(0), fMasterReplica(NULL), fReplicasAwaitingCurrentFrame(NULL), fReplicasAwaitingNextFrame(NULL) { } StreamReplicator::~StreamReplicator() { Medium::close(fInputSource); } FramedSource* StreamReplicator::createStreamReplica() { ++fNumReplicas; return new StreamReplica(*this); } void StreamReplicator::getNextFrame(StreamReplica* replica) { if (fInputSourceHasClosed) { // handle closure instead FramedSource::handleClosure(replica); return; } if (replica->fFrameIndex == -1) { // This replica had stopped playing (or had just been created), but is now actively reading. Note this: replica->fFrameIndex = fFrameIndex; ++fNumActiveReplicas; } if (fMasterReplica == NULL) { // This is the first replica to request the next unread frame. Make it the 'master' replica - meaning that we read the frame // into its buffer, and then copy from this into the other replicas' buffers. fMasterReplica = replica; // Arrange to read the next frame into this replica's buffer: if (fInputSource != NULL) fInputSource->getNextFrame(fMasterReplica->fTo, fMasterReplica->fMaxSize, afterGettingFrame, this, onSourceClosure, this); } else if (replica->fFrameIndex != fFrameIndex) { // This replica is already asking for the next frame (because it has already received the current frame). Enqueue it: replica->fNext = fReplicasAwaitingNextFrame; fReplicasAwaitingNextFrame = replica; } else { // This replica is asking for the current frame. Enqueue it: replica->fNext = fReplicasAwaitingCurrentFrame; fReplicasAwaitingCurrentFrame = replica; if (fInputSource != NULL && !fInputSource->isCurrentlyAwaitingData()) { // The current frame has already arrived, so deliver it to this replica now: deliverReceivedFrame(); } } } void StreamReplicator::deactivateStreamReplica(StreamReplica* replicaBeingDeactivated) { // Assert: fNumActiveReplicas > 0 if (fNumReplicas == 0) fprintf(stderr, "StreamReplicator::deactivateStreamReplica() Internal Error!\n"); // should not happen --fNumActiveReplicas; if (replicaBeingDeactivated->fDeliveryInProgress) --fNumDeliveriesMadeSoFar; // hack in case we're called while in the middle of a frame delivery to us // Check whether the replica being deactivated is the 'master' replica, or is enqueued awaiting a frame: if (replicaBeingDeactivated == fMasterReplica) { // We need to replace the 'master replica', if we can: if (fReplicasAwaitingCurrentFrame == NULL) { // There's currently no replacement 'master replica' fMasterReplica = NULL; } else { // There's another replica that we can use as a replacement 'master replica': fMasterReplica = fReplicasAwaitingCurrentFrame; fReplicasAwaitingCurrentFrame = fReplicasAwaitingCurrentFrame->fNext; fMasterReplica->fNext = NULL; } // Check whether the read into the old master replica's buffer is still pending, or has completed: if (fInputSource != NULL) { if (fInputSource->isCurrentlyAwaitingData()) { // We have a pending read into the old master replica's buffer. // We need to stop it, and retry the read with a new master (if available) fInputSource->stopGettingFrames(); if (fMasterReplica != NULL) { fInputSource->getNextFrame(fMasterReplica->fTo, fMasterReplica->fMaxSize, afterGettingFrame, this, onSourceClosure, this); } } else { // The read into the old master replica's buffer has already completed. Copy the data to the new master replica (if any): if (fMasterReplica != NULL) { StreamReplica::copyReceivedFrame(fMasterReplica, replicaBeingDeactivated); } else { // We don't have a new master replica, so we can't copy the received frame to any new replica that might ask for it. // Fortunately this should be a very rare occurrence. } } } } else { // The replica that's being removed was not our 'master replica', but make sure it's not on either of our queues: if (fReplicasAwaitingCurrentFrame != NULL) { if (replicaBeingDeactivated == fReplicasAwaitingCurrentFrame) { fReplicasAwaitingCurrentFrame = replicaBeingDeactivated->fNext; replicaBeingDeactivated->fNext = NULL; } else { for (StreamReplica* r1 = fReplicasAwaitingCurrentFrame; r1->fNext != NULL; r1 = r1->fNext) { if (r1->fNext == replicaBeingDeactivated) { r1->fNext = replicaBeingDeactivated->fNext; replicaBeingDeactivated->fNext = NULL; break; } } } } if (fReplicasAwaitingNextFrame != NULL) { if (replicaBeingDeactivated == fReplicasAwaitingNextFrame) { fReplicasAwaitingNextFrame = replicaBeingDeactivated->fNext; replicaBeingDeactivated->fNext = NULL; } else { for (StreamReplica* r2 = fReplicasAwaitingNextFrame; r2->fNext != NULL; r2 = r2->fNext) { if (r2->fNext == replicaBeingDeactivated) { r2->fNext = replicaBeingDeactivated->fNext; replicaBeingDeactivated->fNext = NULL; break; } } } } } if (fNumActiveReplicas == 0 && fInputSource != NULL) fInputSource->stopGettingFrames(); // tell our source to stop too } void StreamReplicator::removeStreamReplica(StreamReplica* replicaBeingRemoved) { // Assert: fNumReplicas > 0 if (fNumReplicas == 0) fprintf(stderr, "StreamReplicator::removeStreamReplica() Internal Error!\n"); // should not happen --fNumReplicas; // If this was the last replica, then delete ourselves (if we were set up to do so): if (fNumReplicas == 0 && fDeleteWhenLastReplicaDies) { delete this; return; } // Now handle the replica that's being removed the same way that we would if it were merely being deactivated: if (replicaBeingRemoved->fFrameIndex != -1) { // i.e., we haven't already done this deactivateStreamReplica(replicaBeingRemoved); } } void StreamReplicator::afterGettingFrame(void* clientData, unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime, unsigned durationInMicroseconds) { ((StreamReplicator*)clientData)->afterGettingFrame(frameSize, numTruncatedBytes, presentationTime, durationInMicroseconds); } void StreamReplicator::afterGettingFrame(unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime, unsigned durationInMicroseconds) { // The frame was read into our master replica's buffer. Update the master replica's state, but don't complete delivery to it // just yet. We do that later, after we're sure that we've delivered it to all other replicas. fMasterReplica->fFrameSize = frameSize; fMasterReplica->fNumTruncatedBytes = numTruncatedBytes; fMasterReplica->fPresentationTime = presentationTime; fMasterReplica->fDurationInMicroseconds = durationInMicroseconds; deliverReceivedFrame(); } void StreamReplicator::onSourceClosure(void* clientData) { ((StreamReplicator*)clientData)->onSourceClosure(); } void StreamReplicator::onSourceClosure() { fInputSourceHasClosed = True; // Signal the closure to each replica that is currently awaiting a frame: StreamReplica* replica; while ((replica = fReplicasAwaitingCurrentFrame) != NULL) { fReplicasAwaitingCurrentFrame = replica->fNext; replica->fNext = NULL; FramedSource::handleClosure(replica); } while ((replica = fReplicasAwaitingNextFrame) != NULL) { fReplicasAwaitingNextFrame = replica->fNext; replica->fNext = NULL; FramedSource::handleClosure(replica); } if ((replica = fMasterReplica) != NULL) { fMasterReplica = NULL; FramedSource::handleClosure(replica); } } void StreamReplicator::deliverReceivedFrame() { // The 'master replica' has received its copy of the current frame. // Copy it (and complete delivery) to any other replica that has requested this frame. // Then, if no more requests for this frame are expected, complete delivery to the 'master replica' itself. StreamReplica* replica; while ((replica = fReplicasAwaitingCurrentFrame) != NULL) { fReplicasAwaitingCurrentFrame = replica->fNext; replica->fNext = NULL; replica->fDeliveryInProgress = True; // Assert: fMasterReplica != NULL if (fMasterReplica == NULL) fprintf(stderr, "StreamReplicator::deliverReceivedFrame() Internal Error 1!\n"); // shouldn't happen StreamReplica::copyReceivedFrame(replica, fMasterReplica); replica->fFrameIndex = 1 - replica->fFrameIndex; // toggle it (0<->1), because this replica no longer awaits the current frame ++fNumDeliveriesMadeSoFar; // Assert: fNumDeliveriesMadeSoFar < fNumActiveReplicas; // because we still have the 'master replica' to deliver to if (!(fNumDeliveriesMadeSoFar < fNumActiveReplicas)) fprintf(stderr, "StreamReplicator::deliverReceivedFrame() Internal Error 2(%d,%d)!\n", fNumDeliveriesMadeSoFar, fNumActiveReplicas); // should not happen // Complete delivery to this replica: FramedSource::afterGetting(replica); replica->fDeliveryInProgress = False; } if (fNumDeliveriesMadeSoFar == fNumActiveReplicas - 1 && fMasterReplica != NULL) { // No more requests for this frame are expected, so complete delivery to the 'master replica': replica = fMasterReplica; fMasterReplica = NULL; replica->fFrameIndex = 1 - replica->fFrameIndex; // toggle it (0<->1), because this replica no longer awaits the current frame fFrameIndex = 1 - fFrameIndex; // toggle it (0<->1) for the next frame fNumDeliveriesMadeSoFar = 0; // reset for the next frame if (fReplicasAwaitingNextFrame != NULL) { // One of the other replicas has already requested the next frame, so make it the next 'master replica': fMasterReplica = fReplicasAwaitingNextFrame; fReplicasAwaitingNextFrame = fReplicasAwaitingNextFrame->fNext; fMasterReplica->fNext = NULL; // Arrange to read the next frame into this replica's buffer: if (fInputSource != NULL) fInputSource->getNextFrame(fMasterReplica->fTo, fMasterReplica->fMaxSize, afterGettingFrame, this, onSourceClosure, this); } // Move any other replicas that had already requested the next frame to the 'requesting current frame' list: // Assert: fReplicasAwaitingCurrentFrame == NULL; if (!(fReplicasAwaitingCurrentFrame == NULL)) fprintf(stderr, "StreamReplicator::deliverReceivedFrame() Internal Error 3!\n"); // should not happen fReplicasAwaitingCurrentFrame = fReplicasAwaitingNextFrame; fReplicasAwaitingNextFrame = NULL; FramedSource::afterGetting(replica); } } ////////// StreamReplica implementation ////////// StreamReplica::StreamReplica(StreamReplicator& ourReplicator) : FramedSource(ourReplicator.envir()), fOurReplicator(ourReplicator), fFrameIndex(-1/*we haven't started playing yet*/), fDeliveryInProgress(False), fNext(NULL) { } StreamReplica::~StreamReplica() { fOurReplicator.removeStreamReplica(this); } void StreamReplica::doGetNextFrame() { fOurReplicator.getNextFrame(this); } void StreamReplica::doStopGettingFrames() { if (fFrameIndex != -1) { // we had been activated fFrameIndex = -1; // When we start reading again, this will tell the replicator that we were previously inactive. fOurReplicator.deactivateStreamReplica(this); } } void StreamReplica::copyReceivedFrame(StreamReplica* toReplica, StreamReplica* fromReplica) { // First, figure out how much data to copy. ("toReplica" might have a smaller buffer than "fromReplica".) unsigned numNewBytesToTruncate = toReplica->fMaxSize < fromReplica->fFrameSize ? fromReplica->fFrameSize - toReplica->fMaxSize : 0; toReplica->fFrameSize = fromReplica->fFrameSize - numNewBytesToTruncate; toReplica->fNumTruncatedBytes = fromReplica->fNumTruncatedBytes + numNewBytesToTruncate; memmove(toReplica->fTo, fromReplica->fTo, toReplica->fFrameSize); toReplica->fPresentationTime = fromReplica->fPresentationTime; toReplica->fDurationInMicroseconds = fromReplica->fDurationInMicroseconds; } live/liveMedia/MatroskaFileServerDemux.cpp000444 001751 000000 00000015306 12265042432 021122 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A server demultiplexor for a Matroska file // Implementation #include "MatroskaFileServerDemux.hh" #include "MP3AudioMatroskaFileServerMediaSubsession.hh" #include "AACAudioMatroskaFileServerMediaSubsession.hh" #include "AC3AudioMatroskaFileServerMediaSubsession.hh" #include "VorbisAudioMatroskaFileServerMediaSubsession.hh" #include "H264VideoMatroskaFileServerMediaSubsession.hh" #include "H265VideoMatroskaFileServerMediaSubsession.hh" #include "VP8VideoMatroskaFileServerMediaSubsession.hh" #include "T140TextMatroskaFileServerMediaSubsession.hh" void MatroskaFileServerDemux ::createNew(UsageEnvironment& env, char const* fileName, onCreationFunc* onCreation, void* onCreationClientData, char const* preferredLanguage) { (void)new MatroskaFileServerDemux(env, fileName, onCreation, onCreationClientData, preferredLanguage); } ServerMediaSubsession* MatroskaFileServerDemux::newServerMediaSubsession() { unsigned dummyResultTrackNumber; return newServerMediaSubsession(dummyResultTrackNumber); } ServerMediaSubsession* MatroskaFileServerDemux ::newServerMediaSubsession(unsigned& resultTrackNumber) { ServerMediaSubsession* result; resultTrackNumber = 0; for (result = NULL; result == NULL && fNextTrackTypeToCheck != MATROSKA_TRACK_TYPE_OTHER; fNextTrackTypeToCheck <<= 1) { if (fNextTrackTypeToCheck == MATROSKA_TRACK_TYPE_VIDEO) resultTrackNumber = fOurMatroskaFile->chosenVideoTrackNumber(); else if (fNextTrackTypeToCheck == MATROSKA_TRACK_TYPE_AUDIO) resultTrackNumber = fOurMatroskaFile->chosenAudioTrackNumber(); else if (fNextTrackTypeToCheck == MATROSKA_TRACK_TYPE_SUBTITLE) resultTrackNumber = fOurMatroskaFile->chosenSubtitleTrackNumber(); result = newServerMediaSubsessionByTrackNumber(resultTrackNumber); } return result; } ServerMediaSubsession* MatroskaFileServerDemux ::newServerMediaSubsessionByTrackNumber(unsigned trackNumber) { MatroskaTrack* track = fOurMatroskaFile->lookup(trackNumber); if (track == NULL) return NULL; // Use the track's "codecID" string to figure out which "ServerMediaSubsession" subclass to use: ServerMediaSubsession* result = NULL; if (strncmp(track->codecID, "A_MPEG", 6) == 0) { track->mimeType = "audio/MPEG"; result = MP3AudioMatroskaFileServerMediaSubsession::createNew(*this, track->trackNumber, False, NULL); } else if (strncmp(track->codecID, "A_AAC", 5) == 0) { track->mimeType = "audio/AAC"; result = AACAudioMatroskaFileServerMediaSubsession::createNew(*this, track->trackNumber); } else if (strncmp(track->codecID, "A_AC3", 5) == 0) { track->mimeType = "audio/AC3"; result = AC3AudioMatroskaFileServerMediaSubsession::createNew(*this, track->trackNumber); } else if (strncmp(track->codecID, "A_VORBIS", 8) == 0) { track->mimeType = "audio/VORBIS"; result = VorbisAudioMatroskaFileServerMediaSubsession::createNew(*this, track->trackNumber); } else if (strcmp(track->codecID, "V_MPEG4/ISO/AVC") == 0) { track->mimeType = "video/H264"; result = H264VideoMatroskaFileServerMediaSubsession::createNew(*this, track->trackNumber); } else if (strcmp(track->codecID, "V_MPEGH/ISO/HEVC") == 0) { track->mimeType = "video/H265"; result = H265VideoMatroskaFileServerMediaSubsession::createNew(*this, track->trackNumber); } else if (strncmp(track->codecID, "V_VP8", 5) == 0) { track->mimeType = "video/VP8"; result = VP8VideoMatroskaFileServerMediaSubsession::createNew(*this, track->trackNumber); } else if (strncmp(track->codecID, "S_TEXT", 6) == 0) { track->mimeType = "text/T140"; result = T140TextMatroskaFileServerMediaSubsession::createNew(*this, track->trackNumber); } if (result != NULL) { #ifdef DEBUG fprintf(stderr, "Created 'ServerMediaSubsession' object for track #%d: %s (%s)\n", track->trackNumber, track->codecID, track->mimeType); #endif } return result; } FramedSource* MatroskaFileServerDemux::newDemuxedTrack(unsigned clientSessionId, unsigned trackNumber) { MatroskaDemux* demuxToUse = NULL; if (clientSessionId != 0 && clientSessionId == fLastClientSessionId) { demuxToUse = fLastCreatedDemux; // use the same demultiplexor as before // Note: This code relies upon the fact that the creation of streams for different // client sessions do not overlap - so all demuxed tracks are created for one "MatroskaDemux" at a time. // Also, the "clientSessionId != 0" test is a hack, because 'session 0' is special; its audio and video streams // are created and destroyed one-at-a-time, rather than both streams being // created, and then (later) both streams being destroyed (as is the case // for other ('real') session ids). Because of this, a separate demultiplexor is used for each 'session 0' track. } if (demuxToUse == NULL) demuxToUse = fOurMatroskaFile->newDemux(); fLastClientSessionId = clientSessionId; fLastCreatedDemux = demuxToUse; return demuxToUse->newDemuxedTrackByTrackNumber(trackNumber); } MatroskaFileServerDemux ::MatroskaFileServerDemux(UsageEnvironment& env, char const* fileName, onCreationFunc* onCreation, void* onCreationClientData, char const* preferredLanguage) : Medium(env), fFileName(fileName), fOnCreation(onCreation), fOnCreationClientData(onCreationClientData), fNextTrackTypeToCheck(0x1), fLastClientSessionId(0), fLastCreatedDemux(NULL) { MatroskaFile::createNew(env, fileName, onMatroskaFileCreation, this, preferredLanguage); } MatroskaFileServerDemux::~MatroskaFileServerDemux() { Medium::close(fOurMatroskaFile); } void MatroskaFileServerDemux::onMatroskaFileCreation(MatroskaFile* newFile, void* clientData) { ((MatroskaFileServerDemux*)clientData)->onMatroskaFileCreation(newFile); } void MatroskaFileServerDemux::onMatroskaFileCreation(MatroskaFile* newFile) { fOurMatroskaFile = newFile; // Now, call our own creation notification function: if (fOnCreation != NULL) (*fOnCreation)(this, fOnCreationClientData); } live/liveMedia/H264or5VideoRTPSink.cpp000444 001751 000000 00000026331 12265042432 017622 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // RTP sink for H.264 or H.265 video // Implementation #include "H264or5VideoRTPSink.hh" #include "H264or5VideoStreamFramer.hh" ////////// H264or5Fragmenter definition ////////// // Because of the ideosyncracies of the H.264 RTP payload format, we implement // "H264or5VideoRTPSink" using a separate "H264or5Fragmenter" class that delivers, // to the "H264or5VideoRTPSink", only fragments that will fit within an outgoing // RTP packet. I.e., we implement fragmentation in this separate "H264or5Fragmenter" // class, rather than in "H264or5VideoRTPSink". // (Note: This class should be used only by "H264or5VideoRTPSink", or a subclass.) class H264or5Fragmenter: public FramedFilter { public: H264or5Fragmenter(int hNumber, UsageEnvironment& env, FramedSource* inputSource, unsigned inputBufferMax, unsigned maxOutputPacketSize); virtual ~H264or5Fragmenter(); Boolean lastFragmentCompletedNALUnit() const { return fLastFragmentCompletedNALUnit; } private: // redefined virtual functions: virtual void doGetNextFrame(); private: static void afterGettingFrame(void* clientData, unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime, unsigned durationInMicroseconds); void afterGettingFrame1(unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime, unsigned durationInMicroseconds); private: int fHNumber; unsigned fInputBufferSize; unsigned fMaxOutputPacketSize; unsigned char* fInputBuffer; unsigned fNumValidDataBytes; unsigned fCurDataOffset; unsigned fSaveNumTruncatedBytes; Boolean fLastFragmentCompletedNALUnit; }; ////////// H264or5VideoRTPSink implementation ////////// H264or5VideoRTPSink ::H264or5VideoRTPSink(int hNumber, UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, u_int8_t const* vps, unsigned vpsSize, u_int8_t const* sps, unsigned spsSize, u_int8_t const* pps, unsigned ppsSize) : VideoRTPSink(env, RTPgs, rtpPayloadFormat, 90000, hNumber == 264 ? "H264" : "H265"), fHNumber(hNumber), fOurFragmenter(NULL), fFmtpSDPLine(NULL) { if (vps != NULL) { fVPSSize = vpsSize; fVPS = new u_int8_t[fVPSSize]; memmove(fVPS, vps, fVPSSize); } else { fVPSSize = 0; fVPS = NULL; } if (sps != NULL) { fSPSSize = spsSize; fSPS = new u_int8_t[fSPSSize]; memmove(fSPS, sps, fSPSSize); } else { fSPSSize = 0; fSPS = NULL; } if (pps != NULL) { fPPSSize = ppsSize; fPPS = new u_int8_t[fPPSSize]; memmove(fPPS, pps, fPPSSize); } else { fPPSSize = 0; fPPS = NULL; } } H264or5VideoRTPSink::~H264or5VideoRTPSink() { fSource = fOurFragmenter; // hack: in case "fSource" had gotten set to NULL before we were called delete[] fFmtpSDPLine; delete[] fVPS; delete[] fSPS; delete[] fPPS; stopPlaying(); // call this now, because we won't have our 'fragmenter' when the base class destructor calls it later. // Close our 'fragmenter' as well: Medium::close(fOurFragmenter); fSource = NULL; // for the base class destructor, which gets called next } Boolean H264or5VideoRTPSink::continuePlaying() { // First, check whether we have a 'fragmenter' class set up yet. // If not, create it now: if (fOurFragmenter == NULL) { fOurFragmenter = new H264or5Fragmenter(fHNumber, envir(), fSource, OutPacketBuffer::maxSize, ourMaxPacketSize() - 12/*RTP hdr size*/); } else { fOurFragmenter->reassignInputSource(fSource); } fSource = fOurFragmenter; // Then call the parent class's implementation: return MultiFramedRTPSink::continuePlaying(); } void H264or5VideoRTPSink::doSpecialFrameHandling(unsigned /*fragmentationOffset*/, unsigned char* /*frameStart*/, unsigned /*numBytesInFrame*/, struct timeval framePresentationTime, unsigned /*numRemainingBytes*/) { // Set the RTP 'M' (marker) bit iff // 1/ The most recently delivered fragment was the end of (or the only fragment of) an NAL unit, and // 2/ This NAL unit was the last NAL unit of an 'access unit' (i.e. video frame). if (fOurFragmenter != NULL) { H264or5VideoStreamFramer* framerSource = (H264or5VideoStreamFramer*)(fOurFragmenter->inputSource()); // This relies on our fragmenter's source being a "H264or5VideoStreamFramer". if (((H264or5Fragmenter*)fOurFragmenter)->lastFragmentCompletedNALUnit() && framerSource != NULL && framerSource->pictureEndMarker()) { setMarkerBit(); framerSource->pictureEndMarker() = False; } } setTimestamp(framePresentationTime); } Boolean H264or5VideoRTPSink ::frameCanAppearAfterPacketStart(unsigned char const* /*frameStart*/, unsigned /*numBytesInFrame*/) const { return False; } ////////// H264or5Fragmenter implementation ////////// H264or5Fragmenter::H264or5Fragmenter(int hNumber, UsageEnvironment& env, FramedSource* inputSource, unsigned inputBufferMax, unsigned maxOutputPacketSize) : FramedFilter(env, inputSource), fHNumber(hNumber), fInputBufferSize(inputBufferMax+1), fMaxOutputPacketSize(maxOutputPacketSize), fNumValidDataBytes(1), fCurDataOffset(1), fSaveNumTruncatedBytes(0), fLastFragmentCompletedNALUnit(True) { fInputBuffer = new unsigned char[fInputBufferSize]; } H264or5Fragmenter::~H264or5Fragmenter() { delete[] fInputBuffer; detachInputSource(); // so that the subsequent ~FramedFilter() doesn't delete it } void H264or5Fragmenter::doGetNextFrame() { if (fNumValidDataBytes == 1) { // We have no NAL unit data currently in the buffer. Read a new one: fInputSource->getNextFrame(&fInputBuffer[1], fInputBufferSize - 1, afterGettingFrame, this, FramedSource::handleClosure, this); } else { // We have NAL unit data in the buffer. There are three cases to consider: // 1. There is a new NAL unit in the buffer, and it's small enough to deliver // to the RTP sink (as is). // 2. There is a new NAL unit in the buffer, but it's too large to deliver to // the RTP sink in its entirety. Deliver the first fragment of this data, // as a FU packet, with one extra preceding header byte (for the "FU header"). // 3. There is a NAL unit in the buffer, and we've already delivered some // fragment(s) of this. Deliver the next fragment of this data, // as a FU packet, with two (H.264) or three (H.265) extra preceding header bytes // (for the "NAL header" and the "FU header"). if (fMaxSize < fMaxOutputPacketSize) { // shouldn't happen envir() << "H264or5Fragmenter::doGetNextFrame(): fMaxSize (" << fMaxSize << ") is smaller than expected\n"; } else { fMaxSize = fMaxOutputPacketSize; } fLastFragmentCompletedNALUnit = True; // by default if (fCurDataOffset == 1) { // case 1 or 2 if (fNumValidDataBytes - 1 <= fMaxSize) { // case 1 memmove(fTo, &fInputBuffer[1], fNumValidDataBytes - 1); fFrameSize = fNumValidDataBytes - 1; fCurDataOffset = fNumValidDataBytes; } else { // case 2 // We need to send the NAL unit data as FU packets. Deliver the first // packet now. Note that we add "NAL header" and "FU header" bytes to the front // of the packet (overwriting the existing "NAL header"). if (fHNumber == 264) { fInputBuffer[0] = (fInputBuffer[1] & 0xE0) | 28; // FU indicator fInputBuffer[1] = 0x80 | (fInputBuffer[1] & 0x1F); // FU header (with S bit) } else { // 265 u_int8_t nal_unit_type = (fInputBuffer[1]&0x7E)>>1; fInputBuffer[0] = (fInputBuffer[1] & 0x81) | (49<<1); // Payload header (1st byte) fInputBuffer[1] = fInputBuffer[2]; // Payload header (2nd byte) fInputBuffer[2] = 0x80 | nal_unit_type; // FU header (with S bit) } memmove(fTo, fInputBuffer, fMaxSize); fFrameSize = fMaxSize; fCurDataOffset += fMaxSize - 1; fLastFragmentCompletedNALUnit = False; } } else { // case 3 // We are sending this NAL unit data as FU packets. We've already sent the // first packet (fragment). Now, send the next fragment. Note that we add // "NAL header" and "FU header" bytes to the front. (We reuse these bytes that // we already sent for the first fragment, but clear the S bit, and add the E // bit if this is the last fragment.) unsigned numExtraHeaderBytes; if (fHNumber == 264) { fInputBuffer[fCurDataOffset-2] = fInputBuffer[0]; // FU indicator fInputBuffer[fCurDataOffset-1] = fInputBuffer[1]&~0x80; // FU header (no S bit) numExtraHeaderBytes = 2; } else { // 265 fInputBuffer[fCurDataOffset-3] = fInputBuffer[0]; // Payload header (1st byte) fInputBuffer[fCurDataOffset-2] = fInputBuffer[1]; // Payload header (2nd byte) fInputBuffer[fCurDataOffset-1] = fInputBuffer[2]&~0x80; // FU header (no S bit) numExtraHeaderBytes = 3; } unsigned numBytesToSend = numExtraHeaderBytes + (fNumValidDataBytes - fCurDataOffset); if (numBytesToSend > fMaxSize) { // We can't send all of the remaining data this time: numBytesToSend = fMaxSize; fLastFragmentCompletedNALUnit = False; } else { // This is the last fragment: fInputBuffer[fCurDataOffset-1] |= 0x40; // set the E bit in the FU header fNumTruncatedBytes = fSaveNumTruncatedBytes; } memmove(fTo, &fInputBuffer[fCurDataOffset-numExtraHeaderBytes], numBytesToSend); fFrameSize = numBytesToSend; fCurDataOffset += numBytesToSend - numExtraHeaderBytes; } if (fCurDataOffset >= fNumValidDataBytes) { // We're done with this data. Reset the pointers for receiving new data: fNumValidDataBytes = fCurDataOffset = 1; } // Complete delivery to the client: FramedSource::afterGetting(this); } } void H264or5Fragmenter::afterGettingFrame(void* clientData, unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime, unsigned durationInMicroseconds) { H264or5Fragmenter* fragmenter = (H264or5Fragmenter*)clientData; fragmenter->afterGettingFrame1(frameSize, numTruncatedBytes, presentationTime, durationInMicroseconds); } void H264or5Fragmenter::afterGettingFrame1(unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime, unsigned durationInMicroseconds) { fNumValidDataBytes += frameSize; fSaveNumTruncatedBytes = numTruncatedBytes; fPresentationTime = presentationTime; fDurationInMicroseconds = durationInMicroseconds; // Deliver data to the client: doGetNextFrame(); } live/liveMedia/VP8VideoMatroskaFileServerMediaSubsession.hh000444 001751 000000 00000004267 12265042432 024303 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A 'ServerMediaSubsession' object that creates new, unicast, "RTPSink"s // on demand, from a VP8 Video track within a Matroska file. // C++ header #ifndef _VP8_VIDEO_MATROSKA_FILE_SERVER_MEDIA_SUBSESSION_HH #define _VP8_VIDEO_MATROSKA_FILE_SERVER_MEDIA_SUBSESSION_HH #ifndef _FILE_SERVER_MEDIA_SUBSESSION_HH #include "FileServerMediaSubsession.hh" #endif #ifndef _MATROSKA_FILE_SERVER_DEMUX_HH #include "MatroskaFileServerDemux.hh" #endif class VP8VideoMatroskaFileServerMediaSubsession: public FileServerMediaSubsession { public: static VP8VideoMatroskaFileServerMediaSubsession* createNew(MatroskaFileServerDemux& demux, unsigned trackNumber); private: VP8VideoMatroskaFileServerMediaSubsession(MatroskaFileServerDemux& demux, unsigned trackNumber); // called only by createNew(); virtual ~VP8VideoMatroskaFileServerMediaSubsession(); private: // redefined virtual functions virtual float duration() const; virtual void seekStreamSource(FramedSource* inputSource, double& seekNPT, double streamDuration, u_int64_t& numBytes); virtual FramedSource* createNewStreamSource(unsigned clientSessionId, unsigned& estBitrate); virtual RTPSink* createNewRTPSink(Groupsock* rtpGroupsock, unsigned char rtpPayloadTypeIfDynamic, FramedSource* inputSource); private: MatroskaFileServerDemux& fOurDemux; unsigned fTrackNumber; }; #endif live/liveMedia/VorbisAudioMatroskaFileServerMediaSubsession.cpp000444 001751 000000 00000014663 12265042432 025311 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A 'ServerMediaSubsession' object that creates new, unicast, "RTPSink"s // on demand, from a Vorbis audio track within a Matroska file. // Implementation #include "VorbisAudioMatroskaFileServerMediaSubsession.hh" #include "VorbisAudioRTPSink.hh" #include "MatroskaDemuxedTrack.hh" VorbisAudioMatroskaFileServerMediaSubsession* VorbisAudioMatroskaFileServerMediaSubsession ::createNew(MatroskaFileServerDemux& demux, unsigned trackNumber) { return new VorbisAudioMatroskaFileServerMediaSubsession(demux, trackNumber); } #define getPrivByte(b) if (n == 0) break; else do {b = *p++; --n;} while (0) VorbisAudioMatroskaFileServerMediaSubsession ::VorbisAudioMatroskaFileServerMediaSubsession(MatroskaFileServerDemux& demux, unsigned trackNumber) : FileServerMediaSubsession(demux.envir(), demux.fileName(), False), fOurDemux(demux), fTrackNumber(trackNumber), fIdentificationHeader(NULL), fIdentificationHeaderSize(0), fCommentHeader(NULL), fCommentHeaderSize(0), fSetupHeader(NULL), fSetupHeaderSize(0), fEstBitrate(96/* kbps, default guess */) { MatroskaTrack* track = fOurDemux.lookup(fTrackNumber); // The Matroska file's 'Codec Private' data is assumed to be the Vorbis configuration information, // containing the "Identification", "Comment", and "Setup" headers. Extract these headers now: do { u_int8_t* p = track->codecPrivate; unsigned n = track->codecPrivateSize; if (n == 0 || p == NULL) break; // we have no 'Codec Private' data u_int8_t numHeaders; getPrivByte(numHeaders); unsigned headerSize[3]; // we don't handle any more than 2+1 headers // Extract the sizes of each of these headers: unsigned sizesSum = 0; Boolean success = True; unsigned i; for (i = 0; i < numHeaders && i < 3; ++i) { unsigned len = 0; u_int8_t c; do { success = False; getPrivByte(c); success = True; len += c; } while (c == 255); if (!success || len == 0) break; headerSize[i] = len; sizesSum += len; } if (!success) break; // Compute the implicit size of the final header: if (numHeaders < 3) { int finalHeaderSize = n - sizesSum; if (finalHeaderSize <= 0) break; // error in data; give up headerSize[numHeaders] = (unsigned)finalHeaderSize; ++numHeaders; // include the final header now } else { numHeaders = 3; // The maximum number of headers that we handle } // Then, extract and classify each header: for (i = 0; i < numHeaders; ++i) { success = False; unsigned newHeaderSize = headerSize[i]; u_int8_t* newHeader = new u_int8_t[newHeaderSize]; if (newHeader == NULL) break; u_int8_t* hdr = newHeader; while (newHeaderSize-- > 0) { success = False; getPrivByte(*hdr++); success = True; } if (!success) { delete[] newHeader; break; } u_int8_t headerType = newHeader[0]; if (headerType == 1) { delete[] fIdentificationHeader; fIdentificationHeader = newHeader; fIdentificationHeaderSize = headerSize[i]; if (fIdentificationHeaderSize >= 28) { // Get the 'bitrate' values from this header, and use them to set "fEstBitrate": u_int32_t val; u_int8_t* p; p = &fIdentificationHeader[16]; val = ((p[3]*256 + p[2])*256 + p[1])*256 + p[0]; // i.e., little-endian int bitrate_maximum = (int)val; if (bitrate_maximum < 0) bitrate_maximum = 0; p = &fIdentificationHeader[20]; val = ((p[3]*256 + p[2])*256 + p[1])*256 + p[0]; // i.e., little-endian int bitrate_nominal = (int)val; if (bitrate_nominal < 0) bitrate_nominal = 0; p = &fIdentificationHeader[24]; val = ((p[3]*256 + p[2])*256 + p[1])*256 + p[0]; // i.e., little-endian int bitrate_minimum = (int)val; if (bitrate_minimum < 0) bitrate_minimum = 0; int bitrate = bitrate_nominal>0 ? bitrate_nominal : bitrate_maximum>0 ? bitrate_maximum : bitrate_minimum>0 ? bitrate_minimum : 0; if (bitrate > 0) fEstBitrate = ((unsigned)bitrate)/1000; } } else if (headerType == 3) { delete[] fCommentHeader; fCommentHeader = newHeader; fCommentHeaderSize = headerSize[i]; } else if (headerType == 5) { delete[] fSetupHeader; fSetupHeader = newHeader; fSetupHeaderSize = headerSize[i]; } else { delete[] newHeader; // because it was a header type that we don't understand } } if (!success) break; } while (0); } VorbisAudioMatroskaFileServerMediaSubsession ::~VorbisAudioMatroskaFileServerMediaSubsession() { delete[] fIdentificationHeader; delete[] fCommentHeader; delete[] fSetupHeader; } float VorbisAudioMatroskaFileServerMediaSubsession::duration() const { return fOurDemux.fileDuration(); } void VorbisAudioMatroskaFileServerMediaSubsession ::seekStreamSource(FramedSource* inputSource, double& seekNPT, double /*streamDuration*/, u_int64_t& /*numBytes*/) { ((MatroskaDemuxedTrack*)inputSource)->seekToTime(seekNPT); } FramedSource* VorbisAudioMatroskaFileServerMediaSubsession ::createNewStreamSource(unsigned clientSessionId, unsigned& estBitrate) { estBitrate = fEstBitrate; // kbps, estimate return fOurDemux.newDemuxedTrack(clientSessionId, fTrackNumber); } RTPSink* VorbisAudioMatroskaFileServerMediaSubsession ::createNewRTPSink(Groupsock* rtpGroupsock, unsigned char rtpPayloadTypeIfDynamic, FramedSource* /*inputSource*/) { MatroskaTrack* track = fOurDemux.lookup(fTrackNumber); return VorbisAudioRTPSink::createNew(envir(), rtpGroupsock, rtpPayloadTypeIfDynamic, track->samplingFrequency, track->numChannels, fIdentificationHeader, fIdentificationHeaderSize, fCommentHeader, fCommentHeaderSize, fSetupHeader, fSetupHeaderSize); } live/liveMedia/VorbisAudioRTPSink.cpp000444 001751 000000 00000025430 12265042432 020007 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // RTP sink for Vorbis audio // Implementation #include "VorbisAudioRTPSink.hh" #include "Base64.hh" VorbisAudioRTPSink::VorbisAudioRTPSink(UsageEnvironment& env, Groupsock* RTPgs, u_int8_t rtpPayloadFormat, u_int32_t rtpTimestampFrequency, unsigned numChannels, u_int8_t* identificationHeader, unsigned identificationHeaderSize, u_int8_t* commentHeader, unsigned commentHeaderSize, u_int8_t* setupHeader, unsigned setupHeaderSize, u_int32_t identField) : AudioRTPSink(env, RTPgs, rtpPayloadFormat, rtpTimestampFrequency, "VORBIS", numChannels), fIdent(identField), fFmtpSDPLine(NULL) { // Create packed configuration headers, and encode this data into a "a=fmtp:" SDP line that we'll use to describe it: // First, count how many headers (<=3) are included, and how many bytes will be used to encode these headers' sizes: unsigned numHeaders = 0; unsigned sizeSize[2]; // The number of bytes used to encode the lengths of the first two headers (but not the length of the 3rd) sizeSize[0] = sizeSize[1] = 0; if (identificationHeaderSize > 0) { sizeSize[numHeaders++] = identificationHeaderSize < 128 ? 1 : identificationHeaderSize < 16384 ? 2 : 3; } if (commentHeaderSize > 0) { sizeSize[numHeaders++] = commentHeaderSize < 128 ? 1 : commentHeaderSize < 16384 ? 2 : 3; } if (setupHeaderSize > 0) { ++numHeaders; } else { sizeSize[1] = 0; // We have at most two headers, so the second one's length isn't encoded } if (numHeaders == 0) return; // With no headers, we can't set up a configuration if (numHeaders == 1) sizeSize[0] = 0; // With only one header, its length isn't encoded // Then figure out the size of the packed configuration headers, and allocate space for this: unsigned length = identificationHeaderSize + commentHeaderSize + setupHeaderSize; // The "length" field in the packed headers if (length > (unsigned)0xFFFF) return; // too big for a 16-bit field; we can't handle this unsigned packedHeadersSize = 4 // "Number of packed headers" field + 3 // "ident" field + 2 // "length" field + 1 // "n. of headers" field + sizeSize[0] + sizeSize[1] // "length1" and "length2" (if present) fields + length; u_int8_t* packedHeaders = new u_int8_t[packedHeadersSize]; if (packedHeaders == NULL) return; // Fill in the 'packed headers': u_int8_t* p = packedHeaders; *p++ = 0; *p++ = 0; *p++ = 0; *p++ = 1; // "Number of packed headers": 1 *p++ = fIdent>>16; *p++ = fIdent>>8; *p++ = fIdent; // "Ident" (24 bits) *p++ = length>>8; *p++ = length; // "length" (16 bits) *p++ = numHeaders-1; // "n. of headers" if (numHeaders > 1) { // Fill in the "length1" header: unsigned length1 = identificationHeaderSize > 0 ? identificationHeaderSize : commentHeaderSize; if (length1 >= 16384) { *p++ = 0x80; // flag, but no more, because we know length1 <= 32767 } if (length1 >= 128) { *p++ = 0x80|((length1&0x3F80)>>7); // flag + the second 7 bits } *p++ = length1&0x7F; // the low 7 bits if (numHeaders > 2) { // numHeaders == 3 // Fill in the "length2" header (for the 'Comment' header): unsigned length2 = commentHeaderSize; if (length2 >= 16384) { *p++ = 0x80; // flag, but no more, because we know length2 <= 32767 } if (length2 >= 128) { *p++ = 0x80|((length2&0x3F80)>>7); // flag + the second 7 bits } *p++ = length2&0x7F; // the low 7 bits } } // Copy each header: if (identificationHeader != NULL) memmove(p, identificationHeader, identificationHeaderSize); p += identificationHeaderSize; if (commentHeader != NULL) memmove(p, commentHeader, commentHeaderSize); p += commentHeaderSize; if (setupHeader != NULL) memmove(p, setupHeader, setupHeaderSize); // Having set up the 'packed configuration headers', Base-64-encode this, and put it in our "a=fmtp:" SDP line: char* base64PackedHeaders = base64Encode((char const*)packedHeaders, packedHeadersSize); delete[] packedHeaders; unsigned fmtpSDPLineMaxSize = 50 + strlen(base64PackedHeaders); // 50 => more than enough space fFmtpSDPLine = new char[fmtpSDPLineMaxSize]; sprintf(fFmtpSDPLine, "a=fmtp:%d configuration=%s\r\n", rtpPayloadType(), base64PackedHeaders); delete[] base64PackedHeaders; } VorbisAudioRTPSink::~VorbisAudioRTPSink() { delete[] fFmtpSDPLine; } VorbisAudioRTPSink* VorbisAudioRTPSink::createNew(UsageEnvironment& env, Groupsock* RTPgs, u_int8_t rtpPayloadFormat, u_int32_t rtpTimestampFrequency, unsigned numChannels, u_int8_t* identificationHeader, unsigned identificationHeaderSize, u_int8_t* commentHeader, unsigned commentHeaderSize, u_int8_t* setupHeader, unsigned setupHeaderSize) { return new VorbisAudioRTPSink(env, RTPgs, rtpPayloadFormat, rtpTimestampFrequency, numChannels, identificationHeader, identificationHeaderSize, commentHeader, commentHeaderSize, setupHeader, setupHeaderSize); } #define ADVANCE(n) do { p += (n); rem -= (n); } while (0) #define GET_ENCODED_VAL(n) do { u_int8_t byte; n = 0; do { if (rem == 0) break; byte = *p; n = (n*128) + (byte&0x7F); ADVANCE(1); } while (byte&0x80); } while (0); if (rem == 0) break VorbisAudioRTPSink* VorbisAudioRTPSink::createNew(UsageEnvironment& env, Groupsock* RTPgs, u_int8_t rtpPayloadFormat, u_int32_t rtpTimestampFrequency, unsigned numChannels, char const* configStr) { u_int8_t* identificationHeader = NULL; unsigned identificationHeaderSize = 0; u_int8_t* commentHeader = NULL; unsigned commentHeaderSize = 0; u_int8_t* setupHeader = NULL; unsigned setupHeaderSize = 0; VorbisAudioRTPSink* resultSink = NULL; // Begin by Base64-decoding the configuration string: unsigned configDataSize; u_int8_t* configData = base64Decode(configStr, configDataSize); u_int8_t* p = configData; unsigned rem = configDataSize; do { if (rem < 4) break; u_int32_t numPackedHeaders = (p[0]<<24)|(p[1]<<16)|(p[2]<<8)|p[3]; ADVANCE(4); if (numPackedHeaders == 0) break; // Use the first 'packed header' only. if (rem < 3) break; u_int32_t ident = (p[0]<<16)|(p[1]<<8)|p[2]; ADVANCE(3); if (rem < 2) break; u_int16_t length = (p[0]<<8)|p[1]; ADVANCE(2); unsigned numHeaders; GET_ENCODED_VAL(numHeaders); Boolean success = False; for (unsigned i = 0; i < numHeaders+1 && i < 3; ++i) { success = False; unsigned headerSize; if (i < numHeaders) { // The header size is encoded: GET_ENCODED_VAL(headerSize); if (headerSize > length) break; length -= headerSize; } else { // The last header is implicit: headerSize = length; } // Allocate space for the header bytes; we'll fill it in later if (i == 0) { identificationHeaderSize = headerSize; identificationHeader = new u_int8_t[identificationHeaderSize]; } else if (i == 1) { commentHeaderSize = headerSize; commentHeader = new u_int8_t[commentHeaderSize]; } else { // i == 2 setupHeaderSize = headerSize; setupHeader = new u_int8_t[setupHeaderSize]; } success = True; } if (!success) break; // Copy the remaining config bytes into the appropriate 'header' buffers: if (identificationHeader != NULL) { memmove(identificationHeader, p, identificationHeaderSize); ADVANCE(identificationHeaderSize); if (commentHeader != NULL) { memmove(commentHeader, p, commentHeaderSize); ADVANCE(commentHeaderSize); if (setupHeader != NULL) { memmove(setupHeader, p, setupHeaderSize); ADVANCE(setupHeaderSize); } } } resultSink = new VorbisAudioRTPSink(env, RTPgs, rtpPayloadFormat, rtpTimestampFrequency, numChannels, identificationHeader, identificationHeaderSize, commentHeader, commentHeaderSize, setupHeader, setupHeaderSize, ident); } while (0); delete[] configData; delete[] identificationHeader; delete[] commentHeader; delete[] setupHeader; return resultSink; } char const* VorbisAudioRTPSink::auxSDPLine() { return fFmtpSDPLine; } void VorbisAudioRTPSink ::doSpecialFrameHandling(unsigned fragmentationOffset, unsigned char* frameStart, unsigned numBytesInFrame, struct timeval framePresentationTime, unsigned numRemainingBytes) { // Set the 4-byte "payload header", as defined in RFC 5215, section 2.2: u_int8_t header[4]; // The three bytes of the header are our "Ident": header[0] = fIdent>>16; header[1] = fIdent>>8; header[2] = fIdent; // The final byte contains the "F", "VDT", and "numPkts" fields: u_int8_t F; // Fragment type if (numRemainingBytes > 0) { if (fragmentationOffset > 0) { F = 2<<6; // continuation fragment } else { F = 1<<6; // start fragment } } else { if (fragmentationOffset > 0) { F = 3<<6; // end fragment } else { F = 0<<6; // not fragmented } } u_int8_t const VDT = 0<<4; // Vorbis Data Type (always a "Raw Vorbis payload") u_int8_t numPkts = F == 0 ? (numFramesUsedSoFar() + 1): 0; // set to 0 when we're a fragment header[3] = F|VDT|numPkts; setSpecialHeaderBytes(header, sizeof header); // There's also a 2-byte 'frame-specific' header: The length of the Vorbis data: u_int8_t frameSpecificHeader[2]; frameSpecificHeader[0] = numBytesInFrame>>8; frameSpecificHeader[1] = numBytesInFrame; setFrameSpecificHeaderBytes(frameSpecificHeader, 2); // Important: Also call our base class's doSpecialFrameHandling(), // to set the packet's timestamp: MultiFramedRTPSink::doSpecialFrameHandling(fragmentationOffset, frameStart, numBytesInFrame, framePresentationTime, numRemainingBytes); } Boolean VorbisAudioRTPSink::frameCanAppearAfterPacketStart(unsigned char const* /*frameStart*/, unsigned /*numBytesInFrame*/) const { // We allow more than one frame to be packed into an outgoing RTP packet, but no more than 15: return numFramesUsedSoFar() <= 15; } unsigned VorbisAudioRTPSink::specialHeaderSize() const { return 4; } unsigned VorbisAudioRTPSink::frameSpecificHeaderSize() const { return 2; } live/liveMedia/VorbisAudioRTPSource.cpp000444 001751 000000 00000007411 12265042432 020342 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // Vorbis Audio RTP Sources // Implementation #include "VorbisAudioRTPSource.hh" ////////// VorbisBufferedPacket and VorbisBufferedPacketFactory ////////// class VorbisBufferedPacket: public BufferedPacket { public: VorbisBufferedPacket(); virtual ~VorbisBufferedPacket(); private: // redefined virtual functions virtual unsigned nextEnclosedFrameSize(unsigned char*& framePtr, unsigned dataSize); }; class VorbisBufferedPacketFactory: public BufferedPacketFactory { private: // redefined virtual functions virtual BufferedPacket* createNewPacket(MultiFramedRTPSource* ourSource); }; ///////// MPEG4VorbisAudioRTPSource implementation //////// VorbisAudioRTPSource* VorbisAudioRTPSource::createNew(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, unsigned rtpTimestampFrequency) { return new VorbisAudioRTPSource(env, RTPgs, rtpPayloadFormat, rtpTimestampFrequency); } VorbisAudioRTPSource ::VorbisAudioRTPSource(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, unsigned rtpTimestampFrequency) : MultiFramedRTPSource(env, RTPgs, rtpPayloadFormat, rtpTimestampFrequency, new VorbisBufferedPacketFactory), fCurPacketIdent(0) { } VorbisAudioRTPSource::~VorbisAudioRTPSource() { } Boolean VorbisAudioRTPSource ::processSpecialHeader(BufferedPacket* packet, unsigned& resultSpecialHeaderSize) { unsigned char* headerStart = packet->data(); unsigned packetSize = packet->dataSize(); resultSpecialHeaderSize = 4; if (packetSize < resultSpecialHeaderSize) return False; // packet was too small // The first 3 bytes of the header are the "Ident" field: fCurPacketIdent = (headerStart[0]<<16) | (headerStart[1]<<8) | headerStart[2]; // The 4th byte is F|VDT|numPkts. // Reject any packet with VDT == 3: if ((headerStart[3]&0x30) == 0x30) return False; u_int8_t F = headerStart[3]>>6; fCurrentPacketBeginsFrame = F <= 1; // "Not Fragmented" or "Start Fragment" fCurrentPacketCompletesFrame = F == 0 || F == 3; // "Not Fragmented" or "End Fragment" return True; } char const* VorbisAudioRTPSource::MIMEtype() const { return "audio/VORBIS"; } ////////// VorbisBufferedPacket and VorbisBufferedPacketFactory implementation ////////// VorbisBufferedPacket::VorbisBufferedPacket() { } VorbisBufferedPacket::~VorbisBufferedPacket() { } unsigned VorbisBufferedPacket ::nextEnclosedFrameSize(unsigned char*& framePtr, unsigned dataSize) { if (dataSize < 2) { // There's not enough space for a 2-byte header. TARFU! Just return the data that's left: return dataSize; } unsigned frameSize = (framePtr[0]<<8) | framePtr[1]; framePtr += 2; if (frameSize > dataSize - 2) return dataSize - 2; // inconsistent frame size => just return all the data that's left return frameSize; } BufferedPacket* VorbisBufferedPacketFactory ::createNewPacket(MultiFramedRTPSource* /*ourSource*/) { return new VorbisBufferedPacket(); } live/liveMedia/T140TextRTPSink.cpp000444 001751 000000 00000016012 12265042432 017052 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // RTP sink for T.140 text (RFC 2793) // Implementation #include "T140TextRTPSink.hh" #include // for "gettimeofday()" ////////// T140TextRTPSink implementation ////////// T140TextRTPSink::T140TextRTPSink(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat) : TextRTPSink(env, RTPgs, rtpPayloadFormat, 1000/*mandatory RTP timestamp frequency for this payload format*/, "T140"), fOurIdleFilter(NULL), fAreInIdlePeriod(True) { } T140TextRTPSink::~T140TextRTPSink() { fSource = fOurIdleFilter; // hack: in case "fSource" had gotten set to NULL before we were called stopPlaying(); // call this now, because we won't have our 'idle filter' when the base class destructor calls it later. // Close our 'idle filter' as well: Medium::close(fOurIdleFilter); fSource = NULL; // for the base class destructor, which gets called next } T140TextRTPSink* T140TextRTPSink::createNew(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat) { return new T140TextRTPSink(env, RTPgs, rtpPayloadFormat); } Boolean T140TextRTPSink::continuePlaying() { // First, check whether we have an 'idle filter' set up yet. If not, create it now, and insert it in front of our existing source: if (fOurIdleFilter == NULL) { fOurIdleFilter = new T140IdleFilter(envir(), fSource); } else { fOurIdleFilter->reassignInputSource(fSource); } fSource = fOurIdleFilter; // Then call the parent class's implementation: return MultiFramedRTPSink::continuePlaying(); } void T140TextRTPSink::doSpecialFrameHandling(unsigned /*fragmentationOffset*/, unsigned char* /*frameStart*/, unsigned numBytesInFrame, struct timeval framePresentationTime, unsigned /*numRemainingBytes*/) { // Set the RTP 'M' (marker) bit if we have just ended an idle period - i.e., if we were in an idle period, but just got data: if (fAreInIdlePeriod && numBytesInFrame > 0) setMarkerBit(); fAreInIdlePeriod = numBytesInFrame == 0; setTimestamp(framePresentationTime); } Boolean T140TextRTPSink::frameCanAppearAfterPacketStart(unsigned char const* /*frameStart*/, unsigned /*numBytesInFrame*/) const { return False; // We don't concatenate input data; instead, send it out immediately } ////////// T140IdleFilter implementation ////////// T140IdleFilter::T140IdleFilter(UsageEnvironment& env, FramedSource* inputSource) : FramedFilter(env, inputSource), fIdleTimerTask(NULL), fBufferSize(OutPacketBuffer::maxSize), fNumBufferedBytes(0) { fBuffer = new char[fBufferSize]; } T140IdleFilter::~T140IdleFilter() { envir().taskScheduler().unscheduleDelayedTask(fIdleTimerTask); delete[] fBuffer; detachInputSource(); // so that the subsequent ~FramedFilter() doesn't delete it } #define IDLE_TIMEOUT_MICROSECONDS 300000 /* 300 ms */ void T140IdleFilter::doGetNextFrame() { // First, see if we have buffered data that we can deliver: if (fNumBufferedBytes > 0) { deliverFromBuffer(); return; } // We don't have any buffered data, so ask our input source for data (unless we've already done so). // But also set a timer to expire if this doesn't arrive promptly: fIdleTimerTask = envir().taskScheduler().scheduleDelayedTask(IDLE_TIMEOUT_MICROSECONDS, handleIdleTimeout, this); if (fInputSource != NULL && !fInputSource->isCurrentlyAwaitingData()) { fInputSource->getNextFrame((unsigned char*)fBuffer, fBufferSize, afterGettingFrame, this, onSourceClosure, this); } } void T140IdleFilter::afterGettingFrame(void* clientData, unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime, unsigned durationInMicroseconds) { ((T140IdleFilter*)clientData)->afterGettingFrame(frameSize, numTruncatedBytes, presentationTime, durationInMicroseconds); } void T140IdleFilter::afterGettingFrame(unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime, unsigned durationInMicroseconds) { // First, cancel any pending idle timer: envir().taskScheduler().unscheduleDelayedTask(fIdleTimerTask); // Then note the new data that we have in our buffer: fNumBufferedBytes = frameSize; fBufferedNumTruncatedBytes = numTruncatedBytes; fBufferedDataPresentationTime = presentationTime; fBufferedDataDurationInMicroseconds = durationInMicroseconds; // Then, attempt to deliver this data. (If we can't deliver it now, we'll do so the next time the reader asks for data.) if (isCurrentlyAwaitingData()) (void)deliverFromBuffer(); } void T140IdleFilter::doStopGettingFrames() { // Cancel any pending idle timer: envir().taskScheduler().unscheduleDelayedTask(fIdleTimerTask); // And call the parent's implementation of this virtual function: FramedFilter::doStopGettingFrames(); } void T140IdleFilter::handleIdleTimeout(void* clientData) { ((T140IdleFilter*)clientData)->handleIdleTimeout(); } void T140IdleFilter::handleIdleTimeout() { // No data has arrived from the upstream source within our specified 'idle period' (after data was requested from downstream). // Send an empty 'idle' frame to our downstream "T140TextRTPSink". (This will cause an empty RTP packet to get sent.) deliverEmptyFrame(); } void T140IdleFilter::deliverFromBuffer() { if (fNumBufferedBytes <= fMaxSize) { // common case fNumTruncatedBytes = fBufferedNumTruncatedBytes; fFrameSize = fNumBufferedBytes; } else { fNumTruncatedBytes = fBufferedNumTruncatedBytes + fNumBufferedBytes - fMaxSize; fFrameSize = fMaxSize; } memmove(fTo, fBuffer, fFrameSize); fPresentationTime = fBufferedDataPresentationTime; fDurationInMicroseconds = fBufferedDataDurationInMicroseconds; fNumBufferedBytes = 0; // reset buffer FramedSource::afterGetting(this); // complete delivery } void T140IdleFilter::deliverEmptyFrame() { fFrameSize = fNumTruncatedBytes = 0; gettimeofday(&fPresentationTime, NULL); FramedSource::afterGetting(this); // complete delivery } void T140IdleFilter::onSourceClosure(void* clientData) { ((T140IdleFilter*)clientData)->onSourceClosure(); } void T140IdleFilter::onSourceClosure() { envir().taskScheduler().unscheduleDelayedTask(fIdleTimerTask); fIdleTimerTask = NULL; FramedSource::handleClosure(this); } live/liveMedia/H264or5VideoStreamFramer.cpp000444 001751 000000 00000124374 12265042432 020726 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A filter that breaks up a H.264 or H.265 Video Elementary Stream into NAL units. // Implementation #include "H264or5VideoStreamFramer.hh" #include "MPEGVideoStreamParser.hh" #include "BitVector.hh" ////////// H264or5VideoStreamParser definition ////////// class H264or5VideoStreamParser: public MPEGVideoStreamParser { public: H264or5VideoStreamParser(int hNumber, H264or5VideoStreamFramer* usingSource, FramedSource* inputSource, Boolean includeStartCodeInOutput); virtual ~H264or5VideoStreamParser(); private: // redefined virtual functions: virtual void flushInput(); virtual unsigned parse(); private: H264or5VideoStreamFramer* usingSource() { return (H264or5VideoStreamFramer*)fUsingSource; } Boolean isVPS(u_int8_t nal_unit_type) { return usingSource()->isVPS(nal_unit_type); } Boolean isSPS(u_int8_t nal_unit_type) { return usingSource()->isSPS(nal_unit_type); } Boolean isPPS(u_int8_t nal_unit_type) { return usingSource()->isPPS(nal_unit_type); } Boolean isVCL(u_int8_t nal_unit_type) { return usingSource()->isVCL(nal_unit_type); } Boolean isSEI(u_int8_t nal_unit_type); Boolean isEOF(u_int8_t nal_unit_type); Boolean usuallyBeginsAccessUnit(u_int8_t nal_unit_type); void removeEmulationBytes(u_int8_t* nalUnitCopy, unsigned maxSize, unsigned& nalUnitCopySize); void analyze_video_parameter_set_data(unsigned& num_units_in_tick, unsigned& time_scale); void analyze_seq_parameter_set_data(unsigned& num_units_in_tick, unsigned& time_scale); void profile_tier_level(BitVector& bv, unsigned max_sub_layers_minus1); void analyze_vui_parameters(BitVector& bv, unsigned& num_units_in_tick, unsigned& time_scale); void analyze_sei_data(u_int8_t nal_unit_type); private: int fHNumber; // 264 or 265 unsigned fOutputStartCodeSize; Boolean fHaveSeenFirstStartCode, fHaveSeenFirstByteOfNALUnit; u_int8_t fFirstByteOfNALUnit; double fParsedFrameRate; }; ////////// H264or5VideoStreamFramer implementation ////////// H264or5VideoStreamFramer ::H264or5VideoStreamFramer(int hNumber, UsageEnvironment& env, FramedSource* inputSource, Boolean createParser, Boolean includeStartCodeInOutput) : MPEGVideoStreamFramer(env, inputSource), fHNumber(hNumber), fLastSeenVPS(NULL), fLastSeenVPSSize(0), fLastSeenSPS(NULL), fLastSeenSPSSize(0), fLastSeenPPS(NULL), fLastSeenPPSSize(0), fProfileLevelId(0) { for (unsigned i = 0; i < 12; ++i) fProfileTierLevelHeaderBytes[i] = 0; fParser = createParser ? new H264or5VideoStreamParser(hNumber, this, inputSource, includeStartCodeInOutput) : NULL; fNextPresentationTime = fPresentationTimeBase; fFrameRate = 25.0; // We assume a frame rate of 25 fps, unless we learn otherwise (from parsing a VPS or SPS NAL unit) } H264or5VideoStreamFramer::~H264or5VideoStreamFramer() { delete[] fLastSeenPPS; delete[] fLastSeenSPS; delete[] fLastSeenVPS; } #define VPS_MAX_SIZE 1000 // larger than the largest possible VPS (Video Parameter Set) NAL unit void H264or5VideoStreamFramer::saveCopyOfVPS(u_int8_t* from, unsigned size) { if (from == NULL) return; delete[] fLastSeenVPS; fLastSeenVPS = new u_int8_t[size]; memmove(fLastSeenVPS, from, size); fLastSeenVPSSize = size; // We also make another copy - without 'emulation bytes', to extract parameters that we need: u_int8_t vps[VPS_MAX_SIZE]; unsigned vpsSize = removeH264or5EmulationBytes(vps, VPS_MAX_SIZE, fLastSeenVPS, fLastSeenVPSSize); // Extract the first 12 'profile_tier_level' bytes: if (vpsSize >= 6/*'profile_tier_level' offset*/ + 12/*num 'profile_tier_level' bytes*/) { memmove(fProfileTierLevelHeaderBytes, &vps[6], 12); } } #define SPS_MAX_SIZE 1000 // larger than the largest possible SPS (Sequence Parameter Set) NAL unit void H264or5VideoStreamFramer::saveCopyOfSPS(u_int8_t* from, unsigned size) { if (from == NULL) return; delete[] fLastSeenSPS; fLastSeenSPS = new u_int8_t[size]; memmove(fLastSeenSPS, from, size); fLastSeenSPSSize = size; // We also make another copy - without 'emulation bytes', to extract parameters that we need: u_int8_t sps[SPS_MAX_SIZE]; unsigned spsSize = removeH264or5EmulationBytes(sps, SPS_MAX_SIZE, fLastSeenSPS, fLastSeenSPSSize); if (fHNumber == 264) { // Extract the first 3 bytes of the SPS (after the nal_unit_header byte) as 'profile_level_id' if (spsSize >= 1/*'profile_level_id' offset within SPS*/ + 3/*num bytes needed*/) { fProfileLevelId = (sps[1]<<16) | (sps[2]<<8) | sps[3]; } } else { // 265 // Extract the first 12 'profile_tier_level' bytes: if (spsSize >= 3/*'profile_tier_level' offset*/ + 12/*num 'profile_tier_level' bytes*/) { memmove(fProfileTierLevelHeaderBytes, &sps[3], 12); } } } void H264or5VideoStreamFramer::saveCopyOfPPS(u_int8_t* from, unsigned size) { if (from == NULL) return; delete[] fLastSeenPPS; fLastSeenPPS = new u_int8_t[size]; memmove(fLastSeenPPS, from, size); fLastSeenPPSSize = size; } Boolean H264or5VideoStreamFramer::isVPS(u_int8_t nal_unit_type) { // VPS NAL units occur in H.265 only: return fHNumber == 265 && nal_unit_type == 32; } Boolean H264or5VideoStreamFramer::isSPS(u_int8_t nal_unit_type) { return fHNumber == 264 ? nal_unit_type == 7 : nal_unit_type == 33; } Boolean H264or5VideoStreamFramer::isPPS(u_int8_t nal_unit_type) { return fHNumber == 264 ? nal_unit_type == 8 : nal_unit_type == 34; } Boolean H264or5VideoStreamFramer::isVCL(u_int8_t nal_unit_type) { return fHNumber == 264 ? (nal_unit_type <= 5 && nal_unit_type > 0) : (nal_unit_type <= 31); } ////////// H264or5VideoStreamParser implementation ////////// H264or5VideoStreamParser ::H264or5VideoStreamParser(int hNumber, H264or5VideoStreamFramer* usingSource, FramedSource* inputSource, Boolean includeStartCodeInOutput) : MPEGVideoStreamParser(usingSource, inputSource), fHNumber(hNumber), fOutputStartCodeSize(includeStartCodeInOutput ? 4 : 0), fHaveSeenFirstStartCode(False), fHaveSeenFirstByteOfNALUnit(False), fParsedFrameRate(0.0) { } H264or5VideoStreamParser::~H264or5VideoStreamParser() { } #define PREFIX_SEI_NUT 39 // for H.265 #define SUFFIX_SEI_NUT 40 // for H.265 Boolean H264or5VideoStreamParser::isSEI(u_int8_t nal_unit_type) { return fHNumber == 264 ? nal_unit_type == 6 : (nal_unit_type == PREFIX_SEI_NUT || nal_unit_type == SUFFIX_SEI_NUT); } Boolean H264or5VideoStreamParser::isEOF(u_int8_t nal_unit_type) { // "end of sequence" or "end of (bit)stream" return fHNumber == 264 ? (nal_unit_type == 10 || nal_unit_type == 11) : (nal_unit_type == 36 || nal_unit_type == 37); } Boolean H264or5VideoStreamParser::usuallyBeginsAccessUnit(u_int8_t nal_unit_type) { return fHNumber == 264 ? (nal_unit_type >= 6 && nal_unit_type <= 9) || (nal_unit_type >= 14 && nal_unit_type <= 18) : (nal_unit_type >= 32 && nal_unit_type <= 35) || (nal_unit_type == 39) || (nal_unit_type >= 41 && nal_unit_type <= 44) || (nal_unit_type >= 48 && nal_unit_type <= 55); } void H264or5VideoStreamParser ::removeEmulationBytes(u_int8_t* nalUnitCopy, unsigned maxSize, unsigned& nalUnitCopySize) { u_int8_t* nalUnitOrig = fStartOfFrame + fOutputStartCodeSize; unsigned const numBytesInNALunit = fTo - nalUnitOrig; nalUnitCopySize = removeH264or5EmulationBytes(nalUnitCopy, maxSize, nalUnitOrig, numBytesInNALunit); } #ifdef DEBUG char const* nal_unit_type_description_h264[32] = { "Unspecified", //0 "Coded slice of a non-IDR picture", //1 "Coded slice data partition A", //2 "Coded slice data partition B", //3 "Coded slice data partition C", //4 "Coded slice of an IDR picture", //5 "Supplemental enhancement information (SEI)", //6 "Sequence parameter set", //7 "Picture parameter set", //8 "Access unit delimiter", //9 "End of sequence", //10 "End of stream", //11 "Filler data", //12 "Sequence parameter set extension", //13 "Prefix NAL unit", //14 "Subset sequence parameter set", //15 "Reserved", //16 "Reserved", //17 "Reserved", //18 "Coded slice of an auxiliary coded picture without partitioning", //19 "Coded slice extension", //20 "Reserved", //21 "Reserved", //22 "Reserved", //23 "Unspecified", //24 "Unspecified", //25 "Unspecified", //26 "Unspecified", //27 "Unspecified", //28 "Unspecified", //29 "Unspecified", //30 "Unspecified" //31 }; char const* nal_unit_type_description_h265[64] = { "Coded slice segment of a non-TSA, non-STSA trailing picture", //0 "Coded slice segment of a non-TSA, non-STSA trailing picture", //1 "Coded slice segment of a TSA picture", //2 "Coded slice segment of a TSA picture", //3 "Coded slice segment of a STSA picture", //4 "Coded slice segment of a STSA picture", //5 "Coded slice segment of a RADL picture", //6 "Coded slice segment of a RADL picture", //7 "Coded slice segment of a RASL picture", //8 "Coded slice segment of a RASL picture", //9 "Reserved", //10 "Reserved", //11 "Reserved", //12 "Reserved", //13 "Reserved", //14 "Reserved", //15 "Coded slice segment of a BLA picture", //16 "Coded slice segment of a BLA picture", //17 "Coded slice segment of a BLA picture", //18 "Coded slice segment of an IDR picture", //19 "Coded slice segment of an IDR picture", //20 "Coded slice segment of a CRA picture", //21 "Reserved", //22 "Reserved", //23 "Reserved", //24 "Reserved", //25 "Reserved", //26 "Reserved", //27 "Reserved", //28 "Reserved", //29 "Reserved", //30 "Reserved", //31 "Video parameter set", //32 "Sequence parameter set", //33 "Picture parameter set", //34 "Access unit delimiter", //35 "End of sequence", //36 "End of bitstream", //37 "Filler data", //38 "Supplemental enhancement information (SEI)", //39 "Supplemental enhancement information (SEI)", //40 "Reserved", //41 "Reserved", //42 "Reserved", //43 "Reserved", //44 "Reserved", //45 "Reserved", //46 "Reserved", //47 "Unspecified", //48 "Unspecified", //49 "Unspecified", //50 "Unspecified", //51 "Unspecified", //52 "Unspecified", //53 "Unspecified", //54 "Unspecified", //55 "Unspecified", //56 "Unspecified", //57 "Unspecified", //58 "Unspecified", //59 "Unspecified", //60 "Unspecified", //61 "Unspecified", //62 "Unspecified", //63 }; #endif #ifdef DEBUG static unsigned numDebugTabs = 1; #define DEBUG_PRINT_TABS for (unsigned _i = 0; _i < numDebugTabs; ++_i) fprintf(stderr, "\t") #define DEBUG_PRINT(x) do { DEBUG_PRINT_TABS; fprintf(stderr, "%s: %d\n", #x, x); } while (0) #define DEBUG_STR(x) do { DEBUG_PRINT_TABS; fprintf(stderr, "%s\n", x); } while (0) class DebugTab { public: DebugTab() {++numDebugTabs;} ~DebugTab() {--numDebugTabs;} }; #define DEBUG_TAB DebugTab dummy #else #define DEBUG_PRINT(x) do {x = x;} while (0) // Note: the "x=x;" statement is intended to eliminate "unused variable" compiler warning messages #define DEBUG_STR(x) do {} while (0) #define DEBUG_TAB do {} while (0) #endif void H264or5VideoStreamParser::profile_tier_level(BitVector& bv, unsigned max_sub_layers_minus1) { bv.skipBits(96); unsigned i; Boolean sub_layer_profile_present_flag[7], sub_layer_level_present_flag[7]; for (i = 0; i < max_sub_layers_minus1; ++i) { sub_layer_profile_present_flag[i] = bv.get1BitBoolean(); sub_layer_level_present_flag[i] = bv.get1BitBoolean(); } if (max_sub_layers_minus1 > 0) { bv.skipBits(2*(8-max_sub_layers_minus1)); // reserved_zero_2bits } for (i = 0; i < max_sub_layers_minus1; ++i) { if (sub_layer_profile_present_flag[i]) { bv.skipBits(88); } if (sub_layer_level_present_flag[i]) { bv.skipBits(8); // sub_layer_level_idc[i] } } } void H264or5VideoStreamParser ::analyze_vui_parameters(BitVector& bv, unsigned& num_units_in_tick, unsigned& time_scale) { Boolean aspect_ratio_info_present_flag = bv.get1BitBoolean(); DEBUG_PRINT(aspect_ratio_info_present_flag); if (aspect_ratio_info_present_flag) { DEBUG_TAB; unsigned aspect_ratio_idc = bv.getBits(8); DEBUG_PRINT(aspect_ratio_idc); if (aspect_ratio_idc == 255/*Extended_SAR*/) { bv.skipBits(32); // sar_width; sar_height } } Boolean overscan_info_present_flag = bv.get1BitBoolean(); DEBUG_PRINT(overscan_info_present_flag); if (overscan_info_present_flag) { bv.skipBits(1); // overscan_appropriate_flag } Boolean video_signal_type_present_flag = bv.get1BitBoolean(); DEBUG_PRINT(video_signal_type_present_flag); if (video_signal_type_present_flag) { DEBUG_TAB; bv.skipBits(4); // video_format; video_full_range_flag Boolean colour_description_present_flag = bv.get1BitBoolean(); DEBUG_PRINT(colour_description_present_flag); if (colour_description_present_flag) { bv.skipBits(24); // colour_primaries; transfer_characteristics; matrix_coefficients } } Boolean chroma_loc_info_present_flag = bv.get1BitBoolean(); DEBUG_PRINT(chroma_loc_info_present_flag); if (chroma_loc_info_present_flag) { (void)bv.get_expGolomb(); // chroma_sample_loc_type_top_field (void)bv.get_expGolomb(); // chroma_sample_loc_type_bottom_field } if (fHNumber == 265) { bv.skipBits(3); // neutral_chroma_indication_flag, field_seq_flag, frame_field_info_present_flag Boolean default_display_window_flag = bv.get1BitBoolean(); DEBUG_PRINT(default_display_window_flag); if (default_display_window_flag) { (void)bv.get_expGolomb(); // def_disp_win_left_offset (void)bv.get_expGolomb(); // def_disp_win_right_offset (void)bv.get_expGolomb(); // def_disp_win_top_offset (void)bv.get_expGolomb(); // def_disp_win_bottom_offset } } Boolean timing_info_present_flag = bv.get1BitBoolean(); DEBUG_PRINT(timing_info_present_flag); if (timing_info_present_flag) { DEBUG_TAB; num_units_in_tick = bv.getBits(32); DEBUG_PRINT(num_units_in_tick); time_scale = bv.getBits(32); DEBUG_PRINT(time_scale); if (fHNumber == 264) { Boolean fixed_frame_rate_flag = bv.get1BitBoolean(); DEBUG_PRINT(fixed_frame_rate_flag); } else { // 265 Boolean vui_poc_proportional_to_timing_flag = bv.get1BitBoolean(); DEBUG_PRINT(vui_poc_proportional_to_timing_flag); if (vui_poc_proportional_to_timing_flag) { unsigned vui_num_ticks_poc_diff_one_minus1 = bv.get_expGolomb(); DEBUG_PRINT(vui_num_ticks_poc_diff_one_minus1); } } } } void H264or5VideoStreamParser ::analyze_video_parameter_set_data(unsigned& num_units_in_tick, unsigned& time_scale) { num_units_in_tick = time_scale = 0; // default values // Begin by making a copy of the NAL unit data, removing any 'emulation prevention' bytes: u_int8_t vps[VPS_MAX_SIZE]; unsigned vpsSize; removeEmulationBytes(vps, sizeof vps, vpsSize); BitVector bv(vps, 0, 8*vpsSize); // Assert: fHNumber == 265 (because this function is called only when parsing H.265) unsigned i; bv.skipBits(28); // nal_unit_header, vps_video_parameter_set_id, vps_reserved_three_2bits, vps_max_layers_minus1 unsigned vps_max_sub_layers_minus1 = bv.getBits(3); DEBUG_PRINT(vps_max_sub_layers_minus1); bv.skipBits(17); // vps_temporal_id_nesting_flag, vps_reserved_0xffff_16bits profile_tier_level(bv, vps_max_sub_layers_minus1); Boolean vps_sub_layer_ordering_info_present_flag = bv.get1BitBoolean(); DEBUG_PRINT(vps_sub_layer_ordering_info_present_flag); for (i = vps_sub_layer_ordering_info_present_flag ? 0 : vps_max_sub_layers_minus1; i <= vps_max_sub_layers_minus1; ++i) { (void)bv.get_expGolomb(); // vps_max_dec_pic_buffering_minus1[i] (void)bv.get_expGolomb(); // vps_max_num_reorder_pics[i] (void)bv.get_expGolomb(); // vps_max_latency_increase_plus1[i] } unsigned vps_max_layer_id = bv.getBits(6); DEBUG_PRINT(vps_max_layer_id); unsigned vps_num_layer_sets_minus1 = bv.get_expGolomb(); DEBUG_PRINT(vps_num_layer_sets_minus1); for (i = 1; i <= vps_num_layer_sets_minus1; ++i) { bv.skipBits(vps_max_layer_id+1); // layer_id_included_flag[i][0..vps_max_layer_id] } Boolean vps_timing_info_present_flag = bv.get1BitBoolean(); DEBUG_PRINT(vps_timing_info_present_flag); if (vps_timing_info_present_flag) { DEBUG_TAB; num_units_in_tick = bv.getBits(32); DEBUG_PRINT(num_units_in_tick); time_scale = bv.getBits(32); DEBUG_PRINT(time_scale); Boolean vps_poc_proportional_to_timing_flag = bv.get1BitBoolean(); DEBUG_PRINT(vps_poc_proportional_to_timing_flag); if (vps_poc_proportional_to_timing_flag) { unsigned vps_num_ticks_poc_diff_one_minus1 = bv.get_expGolomb(); DEBUG_PRINT(vps_num_ticks_poc_diff_one_minus1); } } Boolean vps_extension_flag = bv.get1BitBoolean(); DEBUG_PRINT(vps_extension_flag); } void H264or5VideoStreamParser ::analyze_seq_parameter_set_data(unsigned& num_units_in_tick, unsigned& time_scale) { num_units_in_tick = time_scale = 0; // default values // Begin by making a copy of the NAL unit data, removing any 'emulation prevention' bytes: u_int8_t sps[SPS_MAX_SIZE]; unsigned spsSize; removeEmulationBytes(sps, sizeof sps, spsSize); BitVector bv(sps, 0, 8*spsSize); if (fHNumber == 264) { bv.skipBits(8); // forbidden_zero_bit; nal_ref_idc; nal_unit_type unsigned profile_idc = bv.getBits(8); DEBUG_PRINT(profile_idc); unsigned constraint_setN_flag = bv.getBits(8); // also "reserved_zero_2bits" at end DEBUG_PRINT(constraint_setN_flag); unsigned level_idc = bv.getBits(8); DEBUG_PRINT(level_idc); unsigned seq_parameter_set_id = bv.get_expGolomb(); DEBUG_PRINT(seq_parameter_set_id); if (profile_idc == 100 || profile_idc == 110 || profile_idc == 122 || profile_idc == 244 || profile_idc == 44 || profile_idc == 83 || profile_idc == 86 || profile_idc == 118 || profile_idc == 128 ) { DEBUG_TAB; unsigned chroma_format_idc = bv.get_expGolomb(); DEBUG_PRINT(chroma_format_idc); if (chroma_format_idc == 3) { DEBUG_TAB; Boolean separate_colour_plane_flag = bv.get1BitBoolean(); DEBUG_PRINT(separate_colour_plane_flag); } (void)bv.get_expGolomb(); // bit_depth_luma_minus8 (void)bv.get_expGolomb(); // bit_depth_chroma_minus8 bv.skipBits(1); // qpprime_y_zero_transform_bypass_flag Boolean seq_scaling_matrix_present_flag = bv.get1BitBoolean(); DEBUG_PRINT(seq_scaling_matrix_present_flag); if (seq_scaling_matrix_present_flag) { for (int i = 0; i < ((chroma_format_idc != 3) ? 8 : 12); ++i) { DEBUG_TAB; DEBUG_PRINT(i); Boolean seq_scaling_list_present_flag = bv.get1BitBoolean(); DEBUG_PRINT(seq_scaling_list_present_flag); if (seq_scaling_list_present_flag) { DEBUG_TAB; unsigned sizeOfScalingList = i < 6 ? 16 : 64; unsigned lastScale = 8; unsigned nextScale = 8; for (unsigned j = 0; j < sizeOfScalingList; ++j) { DEBUG_TAB; DEBUG_PRINT(j); DEBUG_PRINT(nextScale); if (nextScale != 0) { DEBUG_TAB; unsigned delta_scale = bv.get_expGolomb(); DEBUG_PRINT(delta_scale); nextScale = (lastScale + delta_scale + 256) % 256; } lastScale = (nextScale == 0) ? lastScale : nextScale; DEBUG_PRINT(lastScale); } } } } } unsigned log2_max_frame_num_minus4 = bv.get_expGolomb(); DEBUG_PRINT(log2_max_frame_num_minus4); unsigned pic_order_cnt_type = bv.get_expGolomb(); DEBUG_PRINT(pic_order_cnt_type); if (pic_order_cnt_type == 0) { DEBUG_TAB; unsigned log2_max_pic_order_cnt_lsb_minus4 = bv.get_expGolomb(); DEBUG_PRINT(log2_max_pic_order_cnt_lsb_minus4); } else if (pic_order_cnt_type == 1) { DEBUG_TAB; bv.skipBits(1); // delta_pic_order_always_zero_flag (void)bv.get_expGolomb(); // offset_for_non_ref_pic (void)bv.get_expGolomb(); // offset_for_top_to_bottom_field unsigned num_ref_frames_in_pic_order_cnt_cycle = bv.get_expGolomb(); DEBUG_PRINT(num_ref_frames_in_pic_order_cnt_cycle); for (unsigned i = 0; i < num_ref_frames_in_pic_order_cnt_cycle; ++i) { (void)bv.get_expGolomb(); // offset_for_ref_frame[i] } } unsigned max_num_ref_frames = bv.get_expGolomb(); DEBUG_PRINT(max_num_ref_frames); Boolean gaps_in_frame_num_value_allowed_flag = bv.get1BitBoolean(); DEBUG_PRINT(gaps_in_frame_num_value_allowed_flag); unsigned pic_width_in_mbs_minus1 = bv.get_expGolomb(); DEBUG_PRINT(pic_width_in_mbs_minus1); unsigned pic_height_in_map_units_minus1 = bv.get_expGolomb(); DEBUG_PRINT(pic_height_in_map_units_minus1); Boolean frame_mbs_only_flag = bv.get1BitBoolean(); DEBUG_PRINT(frame_mbs_only_flag); if (!frame_mbs_only_flag) { bv.skipBits(1); // mb_adaptive_frame_field_flag } bv.skipBits(1); // direct_8x8_inference_flag Boolean frame_cropping_flag = bv.get1BitBoolean(); DEBUG_PRINT(frame_cropping_flag); if (frame_cropping_flag) { (void)bv.get_expGolomb(); // frame_crop_left_offset (void)bv.get_expGolomb(); // frame_crop_right_offset (void)bv.get_expGolomb(); // frame_crop_top_offset (void)bv.get_expGolomb(); // frame_crop_bottom_offset } Boolean vui_parameters_present_flag = bv.get1BitBoolean(); DEBUG_PRINT(vui_parameters_present_flag); if (vui_parameters_present_flag) { DEBUG_TAB; analyze_vui_parameters(bv, num_units_in_tick, time_scale); } } else { // 265 unsigned i; bv.skipBits(16); // nal_unit_header bv.skipBits(4); // sps_video_parameter_set_id unsigned sps_max_sub_layers_minus1 = bv.getBits(3); DEBUG_PRINT(sps_max_sub_layers_minus1); bv.skipBits(1); // sps_temporal_id_nesting_flag profile_tier_level(bv, sps_max_sub_layers_minus1); (void)bv.get_expGolomb(); // sps_seq_parameter_set_id unsigned chroma_format_idc = bv.get_expGolomb(); DEBUG_PRINT(chroma_format_idc); if (chroma_format_idc == 3) bv.skipBits(1); // separate_colour_plane_flag unsigned pic_width_in_luma_samples = bv.get_expGolomb(); DEBUG_PRINT(pic_width_in_luma_samples); unsigned pic_height_in_luma_samples = bv.get_expGolomb(); DEBUG_PRINT(pic_height_in_luma_samples); Boolean conformance_window_flag = bv.get1BitBoolean(); DEBUG_PRINT(conformance_window_flag); if (conformance_window_flag) { DEBUG_TAB; unsigned conf_win_left_offset = bv.get_expGolomb(); DEBUG_PRINT(conf_win_left_offset); unsigned conf_win_right_offset = bv.get_expGolomb(); DEBUG_PRINT(conf_win_right_offset); unsigned conf_win_top_offset = bv.get_expGolomb(); DEBUG_PRINT(conf_win_top_offset); unsigned conf_win_bottom_offset = bv.get_expGolomb(); DEBUG_PRINT(conf_win_bottom_offset); } (void)bv.get_expGolomb(); // bit_depth_luma_minus8 (void)bv.get_expGolomb(); // bit_depth_chroma_minus8 unsigned log2_max_pic_order_cnt_lsb_minus4 = bv.get_expGolomb(); Boolean sps_sub_layer_ordering_info_present_flag = bv.get1BitBoolean(); DEBUG_PRINT(sps_sub_layer_ordering_info_present_flag); for (i = (sps_sub_layer_ordering_info_present_flag ? 0 : sps_max_sub_layers_minus1); i <= sps_max_sub_layers_minus1; ++i) { (void)bv.get_expGolomb(); // sps_max_dec_pic_buffering_minus1[i] (void)bv.get_expGolomb(); // sps_max_num_reorder_pics[i] (void)bv.get_expGolomb(); // sps_max_latency_increase[i] } (void)bv.get_expGolomb(); // log2_min_luma_coding_block_size_minus3 (void)bv.get_expGolomb(); // log2_diff_max_min_luma_coding_block_size (void)bv.get_expGolomb(); // log2_min_transform_block_size_minus2 (void)bv.get_expGolomb(); // log2_diff_max_min_transform_block_size (void)bv.get_expGolomb(); // max_transform_hierarchy_depth_inter (void)bv.get_expGolomb(); // max_transform_hierarchy_depth_intra Boolean scaling_list_enabled_flag = bv.get1BitBoolean(); DEBUG_PRINT(scaling_list_enabled_flag); if (scaling_list_enabled_flag) { DEBUG_TAB; Boolean sps_scaling_list_data_present_flag = bv.get1BitBoolean(); DEBUG_PRINT(sps_scaling_list_data_present_flag); if (sps_scaling_list_data_present_flag) { // scaling_list_data() DEBUG_TAB; for (unsigned sizeId = 0; sizeId < 4; ++sizeId) { DEBUG_PRINT(sizeId); for (unsigned matrixId = 0; matrixId < (sizeId == 3 ? 2 : 6); ++matrixId) { DEBUG_TAB; DEBUG_PRINT(matrixId); Boolean scaling_list_pred_mode_flag = bv.get1BitBoolean(); DEBUG_PRINT(scaling_list_pred_mode_flag); if (!scaling_list_pred_mode_flag) { (void)bv.get_expGolomb(); // scaling_list_pred_matrix_id_delta[sizeId][matrixId] } else { unsigned const c = 1 << (4+(sizeId<<1)); unsigned coefNum = c < 64 ? c : 64; if (sizeId > 1) { (void)bv.get_expGolomb(); // scaling_list_dc_coef_minus8[sizeId][matrixId] } for (i = 0; i < coefNum; ++i) { (void)bv.get_expGolomb(); // scaling_list_delta_coef } } } } } } bv.skipBits(2); // amp_enabled_flag, sample_adaptive_offset_enabled_flag Boolean pcm_enabled_flag = bv.get1BitBoolean(); DEBUG_PRINT(pcm_enabled_flag); if (pcm_enabled_flag) { bv.skipBits(8); // pcm_sample_bit_depth_luma_minus1, pcm_sample_bit_depth_chroma_minus1 (void)bv.get_expGolomb(); // log2_min_pcm_luma_coding_block_size_minus3 (void)bv.get_expGolomb(); // log2_diff_max_min_pcm_luma_coding_block_size bv.skipBits(1); // pcm_loop_filter_disabled_flag } unsigned num_short_term_ref_pic_sets = bv.get_expGolomb(); DEBUG_PRINT(num_short_term_ref_pic_sets); for (i = 0; i < num_short_term_ref_pic_sets; ++i) { // short_term_ref_pic_set(i): DEBUG_TAB; DEBUG_PRINT(i); unsigned num_negative_pics = 0; unsigned num_positive_pics = 0; Boolean inter_ref_pic_set_prediction_flag = False; if (i != 0) { inter_ref_pic_set_prediction_flag = bv.get1BitBoolean(); } DEBUG_PRINT(inter_ref_pic_set_prediction_flag); if (inter_ref_pic_set_prediction_flag) { DEBUG_TAB; if (i == num_short_term_ref_pic_sets) { // This can't happen here, but it's in the spec, so we include it for completeness (void)bv.get_expGolomb(); // delta_idx_minus1 } bv.skipBits(1); // delta_rps_sign (void)bv.get_expGolomb(); // abs_delta_rps_minus1 for (unsigned j = 0; j < num_negative_pics+num_positive_pics; ++j) { DEBUG_PRINT(j); Boolean used_by_curr_pic_flag = bv.get1BitBoolean(); DEBUG_PRINT(used_by_curr_pic_flag); if (!used_by_curr_pic_flag) bv.skipBits(1); // use_delta_flag[j] } } else { num_negative_pics = bv.get_expGolomb(); DEBUG_PRINT(num_negative_pics); num_positive_pics = bv.get_expGolomb(); DEBUG_PRINT(num_positive_pics); unsigned k; for (k = 0; k < num_negative_pics; ++k) { (void)bv.get_expGolomb(); // delta_poc_s0_minus1[k] bv.skipBits(1); // used_by_curr_pic_s0_flag[k] } for (k = 0; k < num_positive_pics; ++k) { (void)bv.get_expGolomb(); // delta_poc_s1_minus1[k] bv.skipBits(1); // used_by_curr_pic_s1_flag[k] } } } Boolean long_term_ref_pics_present_flag = bv.get1BitBoolean(); DEBUG_PRINT(long_term_ref_pics_present_flag); if (long_term_ref_pics_present_flag) { DEBUG_TAB; unsigned num_long_term_ref_pics_sps = bv.get_expGolomb(); DEBUG_PRINT(num_long_term_ref_pics_sps); for (i = 0; i < num_long_term_ref_pics_sps; ++i) { bv.skipBits(log2_max_pic_order_cnt_lsb_minus4); // lt_ref_pic_poc_lsb_sps[i] bv.skipBits(1); // used_by_curr_pic_lt_sps_flag[1] } } bv.skipBits(2); // sps_temporal_mvp_enabled_flag, strong_intra_smoothing_enabled_flag Boolean vui_parameters_present_flag = bv.get1BitBoolean(); DEBUG_PRINT(vui_parameters_present_flag); if (vui_parameters_present_flag) { DEBUG_TAB; analyze_vui_parameters(bv, num_units_in_tick, time_scale); } Boolean sps_extension_flag = bv.get1BitBoolean(); DEBUG_PRINT(sps_extension_flag); } } #define SEI_MAX_SIZE 5000 // larger than the largest possible SEI NAL unit #ifdef DEBUG #define MAX_SEI_PAYLOAD_TYPE_DESCRIPTION_H264 46 char const* sei_payloadType_description_h264[MAX_SEI_PAYLOAD_TYPE_DESCRIPTION_H264+1] = { "buffering_period", //0 "pic_timing", //1 "pan_scan_rect", //2 "filler_payload", //3 "user_data_registered_itu_t_t35", //4 "user_data_unregistered", //5 "recovery_point", //6 "dec_ref_pic_marking_repetition", //7 "spare_pic", //8 "scene_info", //9 "sub_seq_info", //10 "sub_seq_layer_characteristics", //11 "sub_seq_characteristics", //12 "full_frame_freeze", //13 "full_frame_freeze_release", //14 "full_frame_snapshot", //15 "progressive_refinement_segment_start", //16 "progressive_refinement_segment_end", //17 "motion_constrained_slice_group_set", //18 "film_grain_characteristics", //19 "deblocking_filter_display_preference", //20 "stereo_video_info", //21 "post_filter_hint", //22 "tone_mapping_info", //23 "scalability_info", //24 "sub_pic_scalable_layer", //25 "non_required_layer_rep", //26 "priority_layer_info", //27 "layers_not_present", //28 "layer_dependency_change", //29 "scalable_nesting", //30 "base_layer_temporal_hrd", //31 "quality_layer_integrity_check", //32 "redundant_pic_property", //33 "tl0_dep_rep_index", //34 "tl_switching_point", //35 "parallel_decoding_info", //36 "mvc_scalable_nesting", //37 "view_scalability_info", //38 "multiview_scene_info", //39 "multiview_acquisition_info", //40 "non_required_view_component", //41 "view_dependency_change", //42 "operation_points_not_present", //43 "base_view_temporal_hrd", //44 "frame_packing_arrangement", //45 "reserved_sei_message" // 46 or higher }; #endif void H264or5VideoStreamParser::analyze_sei_data(u_int8_t nal_unit_type) { // Begin by making a copy of the NAL unit data, removing any 'emulation prevention' bytes: u_int8_t sei[SEI_MAX_SIZE]; unsigned seiSize; removeEmulationBytes(sei, sizeof sei, seiSize); unsigned j = 1; // skip the initial byte (forbidden_zero_bit; nal_ref_idc; nal_unit_type); we've already seen it while (j < seiSize) { unsigned payloadType = 0; do { payloadType += sei[j]; } while (sei[j++] == 255 && j < seiSize); if (j >= seiSize) break; unsigned payloadSize = 0; do { payloadSize += sei[j]; } while (sei[j++] == 255 && j < seiSize); if (j >= seiSize) break; #ifdef DEBUG char const* description; if (fHNumber == 264) { unsigned descriptionNum = payloadType <= MAX_SEI_PAYLOAD_TYPE_DESCRIPTION_H264 ? payloadType : MAX_SEI_PAYLOAD_TYPE_DESCRIPTION_H264; description = sei_payloadType_description_h264[descriptionNum]; } else { // 265 description = payloadType == 3 ? "filler_payload" : payloadType == 4 ? "user_data_registered_itu_t_t35" : payloadType == 5 ? "user_data_unregistered" : payloadType == 17 ? "progressive_refinement_segment_end" : payloadType == 22 ? "post_filter_hint" : (payloadType == 132 && nal_unit_type == SUFFIX_SEI_NUT) ? "decoded_picture_hash" : nal_unit_type == SUFFIX_SEI_NUT ? "reserved_sei_message" : payloadType == 0 ? "buffering_period" : payloadType == 1 ? "pic_timing" : payloadType == 2 ? "pan_scan_rect" : payloadType == 6 ? "recovery_point" : payloadType == 9 ? "scene_info" : payloadType == 15 ? "picture_snapshot" : payloadType == 16 ? "progressive_refinement_segment_start" : payloadType == 19 ? "film_grain_characteristics" : payloadType == 23 ? "tone_mapping_info" : payloadType == 45 ? "frame_packing_arrangement" : payloadType == 47 ? "display_orientation" : payloadType == 128 ? "structure_of_pictures_info" : payloadType == 129 ? "active_parameter_sets" : payloadType == 130 ? "decoding_unit_info" : payloadType == 131 ? "temporal_sub_layer_zero_index" : payloadType == 133 ? "scalable_nesting" : payloadType == 134 ? "region_refresh_info" : "reserved_sei_message"; } fprintf(stderr, "\tpayloadType %d (\"%s\"); payloadSize %d\n", payloadType, description, payloadSize); #endif j += payloadSize; } } void H264or5VideoStreamParser::flushInput() { fHaveSeenFirstStartCode = False; fHaveSeenFirstByteOfNALUnit = False; StreamParser::flushInput(); } #define NUM_NEXT_SLICE_HEADER_BYTES_TO_ANALYZE 12 unsigned H264or5VideoStreamParser::parse() { try { // The stream must start with a 0x00000001: if (!fHaveSeenFirstStartCode) { // Skip over any input bytes that precede the first 0x00000001: u_int32_t first4Bytes; while ((first4Bytes = test4Bytes()) != 0x00000001) { get1Byte(); setParseState(); // ensures that we progress over bad data } skipBytes(4); // skip this initial code setParseState(); fHaveSeenFirstStartCode = True; // from now on } if (fOutputStartCodeSize > 0 && curFrameSize() == 0 && !haveSeenEOF()) { // Include a start code in the output: save4Bytes(0x00000001); } // Then save everything up until the next 0x00000001 (4 bytes) or 0x000001 (3 bytes), or we hit EOF. // Also make note of the first byte, because it contains the "nal_unit_type": if (haveSeenEOF()) { // We hit EOF the last time that we tried to parse this data, so we know that any remaining unparsed data // forms a complete NAL unit, and that there's no 'start code' at the end: unsigned remainingDataSize = totNumValidBytes() - curOffset(); #ifdef DEBUG unsigned const trailingNALUnitSize = remainingDataSize; #endif while (remainingDataSize > 0) { u_int8_t nextByte = get1Byte(); if (!fHaveSeenFirstByteOfNALUnit) { fFirstByteOfNALUnit = nextByte; fHaveSeenFirstByteOfNALUnit = True; } saveByte(nextByte); --remainingDataSize; } #ifdef DEBUG if (fHNumber == 264) { u_int8_t nal_ref_idc = (fFirstByteOfNALUnit&0x60)>>5; u_int8_t nal_unit_type = fFirstByteOfNALUnit&0x1F; fprintf(stderr, "Parsed trailing %d-byte NAL-unit (nal_ref_idc: %d, nal_unit_type: %d (\"%s\"))\n", trailingNALUnitSize, nal_ref_idc, nal_unit_type, nal_unit_type_description_h264[nal_unit_type]); } else { // 265 u_int8_t nal_unit_type = (fFirstByteOfNALUnit&0x7E)>>1; fprintf(stderr, "Parsed trailing %d-byte NAL-unit (nal_unit_type: %d (\"%s\"))\n", trailingNALUnitSize, nal_unit_type, nal_unit_type_description_h265[nal_unit_type]); } #endif (void)get1Byte(); // forces another read, which will cause EOF to get handled for real this time return 0; } else { u_int32_t next4Bytes = test4Bytes(); if (!fHaveSeenFirstByteOfNALUnit) { fFirstByteOfNALUnit = next4Bytes>>24; fHaveSeenFirstByteOfNALUnit = True; } while (next4Bytes != 0x00000001 && (next4Bytes&0xFFFFFF00) != 0x00000100) { // We save at least some of "next4Bytes". if ((unsigned)(next4Bytes&0xFF) > 1) { // Common case: 0x00000001 or 0x000001 definitely doesn't begin anywhere in "next4Bytes", so we save all of it: save4Bytes(next4Bytes); skipBytes(4); } else { // Save the first byte, and continue testing the rest: saveByte(next4Bytes>>24); skipBytes(1); } setParseState(); // ensures forward progress next4Bytes = test4Bytes(); } // Assert: next4Bytes starts with 0x00000001 or 0x000001, and we've saved all previous bytes (forming a complete NAL unit). // Skip over these remaining bytes, up until the start of the next NAL unit: if (next4Bytes == 0x00000001) { skipBytes(4); } else { skipBytes(3); } } fHaveSeenFirstByteOfNALUnit = False; // for the next NAL unit that we'll parse u_int8_t nal_unit_type; if (fHNumber == 264) { nal_unit_type = fFirstByteOfNALUnit&0x1F; #ifdef DEBUG u_int8_t nal_ref_idc = (fFirstByteOfNALUnit&0x60)>>5; fprintf(stderr, "Parsed %d-byte NAL-unit (nal_ref_idc: %d, nal_unit_type: %d (\"%s\"))\n", curFrameSize()-fOutputStartCodeSize, nal_ref_idc, nal_unit_type, nal_unit_type_description_h264[nal_unit_type]); #endif } else { // 265 nal_unit_type = (fFirstByteOfNALUnit&0x7E)>>1; #ifdef DEBUG fprintf(stderr, "Parsed %d-byte NAL-unit (nal_unit_type: %d (\"%s\"))\n", curFrameSize()-fOutputStartCodeSize, nal_unit_type, nal_unit_type_description_h265[nal_unit_type]); #endif } // Now that we have found (& copied) a NAL unit, process it if it's of special interest to us: if (isVPS(nal_unit_type)) { // Video parameter set // First, save a copy of this NAL unit, in case the downstream object wants to see it: usingSource()->saveCopyOfVPS(fStartOfFrame + fOutputStartCodeSize, fTo - fStartOfFrame - fOutputStartCodeSize); if (fParsedFrameRate == 0.0) { // We haven't yet parsed a frame rate from the stream. // So parse this NAL unit to check whether frame rate information is present: unsigned num_units_in_tick, time_scale; analyze_video_parameter_set_data(num_units_in_tick, time_scale); if (time_scale > 0 && num_units_in_tick > 0) { usingSource()->fFrameRate = fParsedFrameRate = time_scale/(2.0*num_units_in_tick); #ifdef DEBUG fprintf(stderr, "Set frame rate to %f fps\n", usingSource()->fFrameRate); #endif } else { #ifdef DEBUG fprintf(stderr, "\tThis \"Picture Parameter Set\" NAL unit contained no frame rate information, so we use a default frame rate of %f fps\n", usingSource()->fFrameRate); #endif } } } else if (isSPS(nal_unit_type)) { // Sequence parameter set // First, save a copy of this NAL unit, in case the downstream object wants to see it: usingSource()->saveCopyOfSPS(fStartOfFrame + fOutputStartCodeSize, fTo - fStartOfFrame - fOutputStartCodeSize); if (fParsedFrameRate == 0.0) { // We haven't yet parsed a frame rate from the stream. // So parse this NAL unit to check whether frame rate information is present: unsigned num_units_in_tick, time_scale; analyze_seq_parameter_set_data(num_units_in_tick, time_scale); if (time_scale > 0 && num_units_in_tick > 0) { usingSource()->fFrameRate = fParsedFrameRate = time_scale/(2.0*num_units_in_tick); #ifdef DEBUG fprintf(stderr, "Set frame rate to %f fps\n", usingSource()->fFrameRate); #endif } else { #ifdef DEBUG fprintf(stderr, "\tThis \"Sequence Parameter Set\" NAL unit contained no frame rate information, so we use a default frame rate of %f fps\n", usingSource()->fFrameRate); #endif } } } else if (isPPS(nal_unit_type)) { // Picture parameter set // Save a copy of this NAL unit, in case the downstream object wants to see it: usingSource()->saveCopyOfPPS(fStartOfFrame + fOutputStartCodeSize, fTo - fStartOfFrame - fOutputStartCodeSize); } else if (isSEI(nal_unit_type)) { // Supplemental enhancement information (SEI) analyze_sei_data(nal_unit_type); // Later, perhaps adjust "fPresentationTime" if we saw a "pic_timing" SEI payload??? ##### } usingSource()->setPresentationTime(); #ifdef DEBUG unsigned long secs = (unsigned long)usingSource()->fPresentationTime.tv_sec; unsigned uSecs = (unsigned)usingSource()->fPresentationTime.tv_usec; fprintf(stderr, "\tPresentation time: %lu.%06u\n", secs, uSecs); #endif // Now, check whether this NAL unit ends an 'access unit'. // (RTP streamers need to know this in order to figure out whether or not to set the "M" bit.) Boolean thisNALUnitEndsAccessUnit; if (haveSeenEOF() || isEOF(nal_unit_type)) { // There is no next NAL unit, so we assume that this one ends the current 'access unit': thisNALUnitEndsAccessUnit = True; } else if (usuallyBeginsAccessUnit(nal_unit_type)) { // These NAL units usually *begin* an access unit, so assume that they don't end one here: thisNALUnitEndsAccessUnit = False; } else { // We need to check the *next* NAL unit to figure out whether // the current NAL unit ends an 'access unit': u_int8_t firstBytesOfNextNALUnit[3]; testBytes(firstBytesOfNextNALUnit, 3); u_int8_t const& next_nal_unit_type = fHNumber == 264 ? (firstBytesOfNextNALUnit[0]&0x1F) : ((firstBytesOfNextNALUnit[0]&0x7E)>>1); if (isVCL(next_nal_unit_type)) { // The high-order bit of the byte after the "nal_unit_header" tells us whether it's // the start of a new 'access unit' (and thus the current NAL unit ends an 'access unit'): u_int8_t const byteAfter_nal_unit_header = fHNumber == 264 ? firstBytesOfNextNALUnit[1] : firstBytesOfNextNALUnit[2]; thisNALUnitEndsAccessUnit = (byteAfter_nal_unit_header&0x80) != 0; } else if (usuallyBeginsAccessUnit(next_nal_unit_type)) { // The next NAL unit's type is one that usually appears at the start of an 'access unit', // so we assume that the current NAL unit ends an 'access unit': thisNALUnitEndsAccessUnit = True; } else { // The next NAL unit definitely doesn't start a new 'access unit', // which means that the current NAL unit doesn't end one: thisNALUnitEndsAccessUnit = False; } } if (thisNALUnitEndsAccessUnit) { #ifdef DEBUG fprintf(stderr, "*****This NAL unit ends the current access unit*****\n"); #endif usingSource()->fPictureEndMarker = True; ++usingSource()->fPictureCount; // Note that the presentation time for the next NAL unit will be different: struct timeval& nextPT = usingSource()->fNextPresentationTime; // alias nextPT = usingSource()->fPresentationTime; double nextFraction = nextPT.tv_usec/1000000.0 + 1/usingSource()->fFrameRate; unsigned nextSecsIncrement = (long)nextFraction; nextPT.tv_sec += (long)nextSecsIncrement; nextPT.tv_usec = (long)((nextFraction - nextSecsIncrement)*1000000); } setParseState(); return curFrameSize(); } catch (int /*e*/) { #ifdef DEBUG fprintf(stderr, "H264or5VideoStreamParser::parse() EXCEPTION (This is normal behavior - *not* an error)\n"); #endif return 0; // the parsing got interrupted } } unsigned removeH264or5EmulationBytes(u_int8_t* to, unsigned toMaxSize, u_int8_t* from, unsigned fromSize) { unsigned toSize = 0; unsigned i = 0; while (i < fromSize && toSize+1 < toMaxSize) { if (i+2 < fromSize && from[i] == 0 && from[i+1] == 0 && from[i+2] == 3) { to[toSize] = to[toSize+1] = 0; toSize += 2; i += 3; } else { to[toSize] = from[i]; toSize += 1; i += 1; } } return toSize; } live/liveMedia/MPEG2TransportUDPServerMediaSubsession.cpp000444 001751 000000 00000006333 12265042432 023654 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A 'ServerMediaSubsession' object that creates new, unicast, "RTPSink"s // on demand, from an incoming UDP (or RTP/UDP) MPEG-2 Transport Stream // Implementation #include "MPEG2TransportUDPServerMediaSubsession.hh" #include "BasicUDPSource.hh" #include "SimpleRTPSource.hh" #include "MPEG2TransportStreamFramer.hh" #include "SimpleRTPSink.hh" #include "GroupsockHelper.hh" MPEG2TransportUDPServerMediaSubsession* MPEG2TransportUDPServerMediaSubsession::createNew(UsageEnvironment& env, char const* inputAddressStr, Port const& inputPort, Boolean inputStreamIsRawUDP) { return new MPEG2TransportUDPServerMediaSubsession(env, inputAddressStr, inputPort, inputStreamIsRawUDP); } MPEG2TransportUDPServerMediaSubsession ::MPEG2TransportUDPServerMediaSubsession(UsageEnvironment& env, char const* inputAddressStr, Port const& inputPort, Boolean inputStreamIsRawUDP) : OnDemandServerMediaSubsession(env, True/*reuseFirstSource*/), fInputPort(inputPort), fInputGroupsock(NULL), fInputStreamIsRawUDP(inputStreamIsRawUDP) { fInputAddressStr = strDup(inputAddressStr); } MPEG2TransportUDPServerMediaSubsession:: ~MPEG2TransportUDPServerMediaSubsession() { delete fInputGroupsock; delete[] (char*)fInputAddressStr; } FramedSource* MPEG2TransportUDPServerMediaSubsession ::createNewStreamSource(unsigned/* clientSessionId*/, unsigned& estBitrate) { estBitrate = 5000; // kbps, estimate if (fInputGroupsock == NULL) { // Create a 'groupsock' object for receiving the input stream: struct in_addr inputAddress; inputAddress.s_addr = fInputAddressStr == NULL ? 0 : our_inet_addr(fInputAddressStr); fInputGroupsock = new Groupsock(envir(), inputAddress, fInputPort, 255); } FramedSource* transportStreamSource; if (fInputStreamIsRawUDP) { transportStreamSource = BasicUDPSource::createNew(envir(), fInputGroupsock); } else { transportStreamSource = SimpleRTPSource::createNew(envir(), fInputGroupsock, 33, 90000, "video/MP2T", 0, False /*no 'M' bit*/); } return MPEG2TransportStreamFramer::createNew(envir(), transportStreamSource); } RTPSink* MPEG2TransportUDPServerMediaSubsession ::createNewRTPSink(Groupsock* rtpGroupsock, unsigned char /*rtpPayloadTypeIfDynamic*/, FramedSource* /*inputSource*/) { return SimpleRTPSink::createNew(envir(), rtpGroupsock, 33, 90000, "video", "MP2T", 1, True, False /*no 'M' bit*/); } live/liveMedia/VP8VideoRTPSink.cpp000444 001751 000000 00000004611 12265042432 017163 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // RTP sink for VP8 video // Implementation #include "VP8VideoRTPSink.hh" VP8VideoRTPSink ::VP8VideoRTPSink(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat) : VideoRTPSink(env, RTPgs, rtpPayloadFormat, 90000, "VP8") { } VP8VideoRTPSink::~VP8VideoRTPSink() { } VP8VideoRTPSink* VP8VideoRTPSink::createNew(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat) { return new VP8VideoRTPSink(env, RTPgs, rtpPayloadFormat); } Boolean VP8VideoRTPSink ::frameCanAppearAfterPacketStart(unsigned char const* /*frameStart*/, unsigned /*numBytesInFrame*/) const { // A packet can contain only one frame return False; } void VP8VideoRTPSink ::doSpecialFrameHandling(unsigned fragmentationOffset, unsigned char* /*frameStart*/, unsigned /*numBytesInFrame*/, struct timeval framePresentationTime, unsigned numRemainingBytes) { // Set the "VP8 Payload Descriptor" (just the minimal required 1-byte version): u_int8_t vp8PayloadDescriptor = fragmentationOffset == 0 ? 0x10 : 0x00; // X = R = N = 0; PartID = 0; S = 1 iff this is the first (or only) fragment of the frame setSpecialHeaderBytes(&vp8PayloadDescriptor, 1); if (numRemainingBytes == 0) { // This packet contains the last (or only) fragment of the frame. // Set the RTP 'M' ('marker') bit: setMarkerBit(); } // Also set the RTP timestamp: setTimestamp(framePresentationTime); } unsigned VP8VideoRTPSink::specialHeaderSize() const { // We include only the required 1-byte form of the "VP8 Payload Descriptor": return 1; } live/liveMedia/MPEG4GenericRTPSink.cpp000444 001751 000000 00000011620 12265042432 017666 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // MPEG4-GENERIC ("audio", "video", or "application") RTP stream sinks // Implementation #include "MPEG4GenericRTPSink.hh" #include "Locale.hh" #include // needed on some systems to define "tolower()" MPEG4GenericRTPSink ::MPEG4GenericRTPSink(UsageEnvironment& env, Groupsock* RTPgs, u_int8_t rtpPayloadFormat, u_int32_t rtpTimestampFrequency, char const* sdpMediaTypeString, char const* mpeg4Mode, char const* configString, unsigned numChannels) : MultiFramedRTPSink(env, RTPgs, rtpPayloadFormat, rtpTimestampFrequency, "MPEG4-GENERIC", numChannels), fSDPMediaTypeString(strDup(sdpMediaTypeString)), fMPEG4Mode(strDup(mpeg4Mode)), fConfigString(strDup(configString)) { // Check whether "mpeg4Mode" is one that we handle: if (mpeg4Mode == NULL) { env << "MPEG4GenericRTPSink error: NULL \"mpeg4Mode\" parameter\n"; } else { // To ease comparison, convert "mpeg4Mode" to lower case: size_t const len = strlen(mpeg4Mode) + 1; char* m = new char[len]; Locale l("POSIX"); for (size_t i = 0; i < len; ++i) m[i] = tolower(mpeg4Mode[i]); if (strcmp(m, "aac-hbr") != 0) { env << "MPEG4GenericRTPSink error: Unknown \"mpeg4Mode\" parameter: \"" << mpeg4Mode << "\"\n"; } delete[] m; } // Set up the "a=fmtp:" SDP line for this stream: char const* fmtpFmt = "a=fmtp:%d " "streamtype=%d;profile-level-id=1;" "mode=%s;sizelength=13;indexlength=3;indexdeltalength=3;" "config=%s\r\n"; unsigned fmtpFmtSize = strlen(fmtpFmt) + 3 /* max char len */ + 3 /* max char len */ + strlen(fMPEG4Mode) + strlen(fConfigString); char* fmtp = new char[fmtpFmtSize]; sprintf(fmtp, fmtpFmt, rtpPayloadType(), strcmp(fSDPMediaTypeString, "video") == 0 ? 4 : 5, fMPEG4Mode, fConfigString); fFmtpSDPLine = strDup(fmtp); delete[] fmtp; } MPEG4GenericRTPSink::~MPEG4GenericRTPSink() { delete[] fFmtpSDPLine; delete[] (char*)fConfigString; delete[] (char*)fMPEG4Mode; delete[] (char*)fSDPMediaTypeString; } MPEG4GenericRTPSink* MPEG4GenericRTPSink::createNew(UsageEnvironment& env, Groupsock* RTPgs, u_int8_t rtpPayloadFormat, u_int32_t rtpTimestampFrequency, char const* sdpMediaTypeString, char const* mpeg4Mode, char const* configString, unsigned numChannels) { return new MPEG4GenericRTPSink(env, RTPgs, rtpPayloadFormat, rtpTimestampFrequency, sdpMediaTypeString, mpeg4Mode, configString, numChannels); } Boolean MPEG4GenericRTPSink ::frameCanAppearAfterPacketStart(unsigned char const* /*frameStart*/, unsigned /*numBytesInFrame*/) const { // (For now) allow at most 1 frame in a single packet: return False; } void MPEG4GenericRTPSink ::doSpecialFrameHandling(unsigned fragmentationOffset, unsigned char* frameStart, unsigned numBytesInFrame, struct timeval framePresentationTime, unsigned numRemainingBytes) { // Set the "AU Header Section". This is 4 bytes: 2 bytes for the // initial "AU-headers-length" field, and 2 bytes for the first // (and only) "AU Header": unsigned fullFrameSize = fragmentationOffset + numBytesInFrame + numRemainingBytes; unsigned char headers[4]; headers[0] = 0; headers[1] = 16 /* bits */; // AU-headers-length headers[2] = fullFrameSize >> 5; headers[3] = (fullFrameSize&0x1F)<<3; setSpecialHeaderBytes(headers, sizeof headers); if (numRemainingBytes == 0) { // This packet contains the last (or only) fragment of the frame. // Set the RTP 'M' ('marker') bit: setMarkerBit(); } // Important: Also call our base class's doSpecialFrameHandling(), // to set the packet's timestamp: MultiFramedRTPSink::doSpecialFrameHandling(fragmentationOffset, frameStart, numBytesInFrame, framePresentationTime, numRemainingBytes); } unsigned MPEG4GenericRTPSink::specialHeaderSize() const { return 2 + 2; } char const* MPEG4GenericRTPSink::sdpMediaType() const { return fSDPMediaTypeString; } char const* MPEG4GenericRTPSink::auxSDPLine() { return fFmtpSDPLine; } live/liveMedia/MultiFramedRTPSink.cpp000444 001751 000000 00000037702 12265042432 017777 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // RTP sink for a common kind of payload format: Those which pack multiple, // complete codec frames (as many as possible) into each RTP packet. // Implementation #include "MultiFramedRTPSink.hh" #include "GroupsockHelper.hh" ////////// MultiFramedRTPSink ////////// void MultiFramedRTPSink::setPacketSizes(unsigned preferredPacketSize, unsigned maxPacketSize) { if (preferredPacketSize > maxPacketSize || preferredPacketSize == 0) return; // sanity check delete fOutBuf; fOutBuf = new OutPacketBuffer(preferredPacketSize, maxPacketSize); fOurMaxPacketSize = maxPacketSize; // save value, in case subclasses need it } MultiFramedRTPSink::MultiFramedRTPSink(UsageEnvironment& env, Groupsock* rtpGS, unsigned char rtpPayloadType, unsigned rtpTimestampFrequency, char const* rtpPayloadFormatName, unsigned numChannels) : RTPSink(env, rtpGS, rtpPayloadType, rtpTimestampFrequency, rtpPayloadFormatName, numChannels), fOutBuf(NULL), fCurFragmentationOffset(0), fPreviousFrameEndedFragmentation(False), fOnSendErrorFunc(NULL), fOnSendErrorData(NULL) { setPacketSizes(1000, 1448); // Default max packet size (1500, minus allowance for IP, UDP, UMTP headers) // (Also, make it a multiple of 4 bytes, just in case that matters.) } MultiFramedRTPSink::~MultiFramedRTPSink() { delete fOutBuf; } void MultiFramedRTPSink ::doSpecialFrameHandling(unsigned /*fragmentationOffset*/, unsigned char* /*frameStart*/, unsigned /*numBytesInFrame*/, struct timeval framePresentationTime, unsigned /*numRemainingBytes*/) { // default implementation: If this is the first frame in the packet, // use its presentationTime for the RTP timestamp: if (isFirstFrameInPacket()) { setTimestamp(framePresentationTime); } } Boolean MultiFramedRTPSink::allowFragmentationAfterStart() const { return False; // by default } Boolean MultiFramedRTPSink::allowOtherFramesAfterLastFragment() const { return False; // by default } Boolean MultiFramedRTPSink ::frameCanAppearAfterPacketStart(unsigned char const* /*frameStart*/, unsigned /*numBytesInFrame*/) const { return True; // by default } unsigned MultiFramedRTPSink::specialHeaderSize() const { // default implementation: Assume no special header: return 0; } unsigned MultiFramedRTPSink::frameSpecificHeaderSize() const { // default implementation: Assume no frame-specific header: return 0; } unsigned MultiFramedRTPSink::computeOverflowForNewFrame(unsigned newFrameSize) const { // default implementation: Just call numOverflowBytes() return fOutBuf->numOverflowBytes(newFrameSize); } void MultiFramedRTPSink::setMarkerBit() { unsigned rtpHdr = fOutBuf->extractWord(0); rtpHdr |= 0x00800000; fOutBuf->insertWord(rtpHdr, 0); } void MultiFramedRTPSink::setTimestamp(struct timeval framePresentationTime) { // First, convert the presentation time to a 32-bit RTP timestamp: fCurrentTimestamp = convertToRTPTimestamp(framePresentationTime); // Then, insert it into the RTP packet: fOutBuf->insertWord(fCurrentTimestamp, fTimestampPosition); } void MultiFramedRTPSink::setSpecialHeaderWord(unsigned word, unsigned wordPosition) { fOutBuf->insertWord(word, fSpecialHeaderPosition + 4*wordPosition); } void MultiFramedRTPSink::setSpecialHeaderBytes(unsigned char const* bytes, unsigned numBytes, unsigned bytePosition) { fOutBuf->insert(bytes, numBytes, fSpecialHeaderPosition + bytePosition); } void MultiFramedRTPSink::setFrameSpecificHeaderWord(unsigned word, unsigned wordPosition) { fOutBuf->insertWord(word, fCurFrameSpecificHeaderPosition + 4*wordPosition); } void MultiFramedRTPSink::setFrameSpecificHeaderBytes(unsigned char const* bytes, unsigned numBytes, unsigned bytePosition) { fOutBuf->insert(bytes, numBytes, fCurFrameSpecificHeaderPosition + bytePosition); } void MultiFramedRTPSink::setFramePadding(unsigned numPaddingBytes) { if (numPaddingBytes > 0) { // Add the padding bytes (with the last one being the padding size): unsigned char paddingBuffer[255]; //max padding memset(paddingBuffer, 0, numPaddingBytes); paddingBuffer[numPaddingBytes-1] = numPaddingBytes; fOutBuf->enqueue(paddingBuffer, numPaddingBytes); // Set the RTP padding bit: unsigned rtpHdr = fOutBuf->extractWord(0); rtpHdr |= 0x20000000; fOutBuf->insertWord(rtpHdr, 0); } } Boolean MultiFramedRTPSink::continuePlaying() { // Send the first packet. // (This will also schedule any future sends.) buildAndSendPacket(True); return True; } void MultiFramedRTPSink::stopPlaying() { fOutBuf->resetPacketStart(); fOutBuf->resetOffset(); fOutBuf->resetOverflowData(); // Then call the default "stopPlaying()" function: MediaSink::stopPlaying(); } void MultiFramedRTPSink::buildAndSendPacket(Boolean isFirstPacket) { fIsFirstPacket = isFirstPacket; // Set up the RTP header: unsigned rtpHdr = 0x80000000; // RTP version 2; marker ('M') bit not set (by default; it can be set later) rtpHdr |= (fRTPPayloadType<<16); rtpHdr |= fSeqNo; // sequence number fOutBuf->enqueueWord(rtpHdr); // Note where the RTP timestamp will go. // (We can't fill this in until we start packing payload frames.) fTimestampPosition = fOutBuf->curPacketSize(); fOutBuf->skipBytes(4); // leave a hole for the timestamp fOutBuf->enqueueWord(SSRC()); // Allow for a special, payload-format-specific header following the // RTP header: fSpecialHeaderPosition = fOutBuf->curPacketSize(); fSpecialHeaderSize = specialHeaderSize(); fOutBuf->skipBytes(fSpecialHeaderSize); // Begin packing as many (complete) frames into the packet as we can: fTotalFrameSpecificHeaderSizes = 0; fNoFramesLeft = False; fNumFramesUsedSoFar = 0; packFrame(); } void MultiFramedRTPSink::packFrame() { // Get the next frame. // First, see if we have an overflow frame that was too big for the last pkt if (fOutBuf->haveOverflowData()) { // Use this frame before reading a new one from the source unsigned frameSize = fOutBuf->overflowDataSize(); struct timeval presentationTime = fOutBuf->overflowPresentationTime(); unsigned durationInMicroseconds = fOutBuf->overflowDurationInMicroseconds(); fOutBuf->useOverflowData(); afterGettingFrame1(frameSize, 0, presentationTime, durationInMicroseconds); } else { // Normal case: we need to read a new frame from the source if (fSource == NULL) return; fCurFrameSpecificHeaderPosition = fOutBuf->curPacketSize(); fCurFrameSpecificHeaderSize = frameSpecificHeaderSize(); fOutBuf->skipBytes(fCurFrameSpecificHeaderSize); fTotalFrameSpecificHeaderSizes += fCurFrameSpecificHeaderSize; fSource->getNextFrame(fOutBuf->curPtr(), fOutBuf->totalBytesAvailable(), afterGettingFrame, this, ourHandleClosure, this); } } void MultiFramedRTPSink ::afterGettingFrame(void* clientData, unsigned numBytesRead, unsigned numTruncatedBytes, struct timeval presentationTime, unsigned durationInMicroseconds) { MultiFramedRTPSink* sink = (MultiFramedRTPSink*)clientData; sink->afterGettingFrame1(numBytesRead, numTruncatedBytes, presentationTime, durationInMicroseconds); } void MultiFramedRTPSink ::afterGettingFrame1(unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime, unsigned durationInMicroseconds) { if (fIsFirstPacket) { // Record the fact that we're starting to play now: gettimeofday(&fNextSendTime, NULL); } fMostRecentPresentationTime = presentationTime; if (fInitialPresentationTime.tv_sec == 0 && fInitialPresentationTime.tv_usec == 0) { fInitialPresentationTime = presentationTime; } if (numTruncatedBytes > 0) { unsigned const bufferSize = fOutBuf->totalBytesAvailable(); envir() << "MultiFramedRTPSink::afterGettingFrame1(): The input frame data was too large for our buffer size (" << bufferSize << "). " << numTruncatedBytes << " bytes of trailing data was dropped! Correct this by increasing \"OutPacketBuffer::maxSize\" to at least " << OutPacketBuffer::maxSize + numTruncatedBytes << ", *before* creating this 'RTPSink'. (Current value is " << OutPacketBuffer::maxSize << ".)\n"; } unsigned curFragmentationOffset = fCurFragmentationOffset; unsigned numFrameBytesToUse = frameSize; unsigned overflowBytes = 0; // If we have already packed one or more frames into this packet, // check whether this new frame is eligible to be packed after them. // (This is independent of whether the packet has enough room for this // new frame; that check comes later.) if (fNumFramesUsedSoFar > 0) { if ((fPreviousFrameEndedFragmentation && !allowOtherFramesAfterLastFragment()) || !frameCanAppearAfterPacketStart(fOutBuf->curPtr(), frameSize)) { // Save away this frame for next time: numFrameBytesToUse = 0; fOutBuf->setOverflowData(fOutBuf->curPacketSize(), frameSize, presentationTime, durationInMicroseconds); } } fPreviousFrameEndedFragmentation = False; if (numFrameBytesToUse > 0) { // Check whether this frame overflows the packet if (fOutBuf->wouldOverflow(frameSize)) { // Don't use this frame now; instead, save it as overflow data, and // send it in the next packet instead. However, if the frame is too // big to fit in a packet by itself, then we need to fragment it (and // use some of it in this packet, if the payload format permits this.) if (isTooBigForAPacket(frameSize) && (fNumFramesUsedSoFar == 0 || allowFragmentationAfterStart())) { // We need to fragment this frame, and use some of it now: overflowBytes = computeOverflowForNewFrame(frameSize); numFrameBytesToUse -= overflowBytes; fCurFragmentationOffset += numFrameBytesToUse; } else { // We don't use any of this frame now: overflowBytes = frameSize; numFrameBytesToUse = 0; } fOutBuf->setOverflowData(fOutBuf->curPacketSize() + numFrameBytesToUse, overflowBytes, presentationTime, durationInMicroseconds); } else if (fCurFragmentationOffset > 0) { // This is the last fragment of a frame that was fragmented over // more than one packet. Do any special handling for this case: fCurFragmentationOffset = 0; fPreviousFrameEndedFragmentation = True; } } if (numFrameBytesToUse == 0 && frameSize > 0) { // Send our packet now, because we have filled it up: sendPacketIfNecessary(); } else { // Use this frame in our outgoing packet: unsigned char* frameStart = fOutBuf->curPtr(); fOutBuf->increment(numFrameBytesToUse); // do this now, in case "doSpecialFrameHandling()" calls "setFramePadding()" to append padding bytes // Here's where any payload format specific processing gets done: doSpecialFrameHandling(curFragmentationOffset, frameStart, numFrameBytesToUse, presentationTime, overflowBytes); ++fNumFramesUsedSoFar; // Update the time at which the next packet should be sent, based // on the duration of the frame that we just packed into it. // However, if this frame has overflow data remaining, then don't // count its duration yet. if (overflowBytes == 0) { fNextSendTime.tv_usec += durationInMicroseconds; fNextSendTime.tv_sec += fNextSendTime.tv_usec/1000000; fNextSendTime.tv_usec %= 1000000; } // Send our packet now if (i) it's already at our preferred size, or // (ii) (heuristic) another frame of the same size as the one we just // read would overflow the packet, or // (iii) it contains the last fragment of a fragmented frame, and we // don't allow anything else to follow this or // (iv) one frame per packet is allowed: if (fOutBuf->isPreferredSize() || fOutBuf->wouldOverflow(numFrameBytesToUse) || (fPreviousFrameEndedFragmentation && !allowOtherFramesAfterLastFragment()) || !frameCanAppearAfterPacketStart(fOutBuf->curPtr() - frameSize, frameSize) ) { // The packet is ready to be sent now sendPacketIfNecessary(); } else { // There's room for more frames; try getting another: packFrame(); } } } static unsigned const rtpHeaderSize = 12; Boolean MultiFramedRTPSink::isTooBigForAPacket(unsigned numBytes) const { // Check whether a 'numBytes'-byte frame - together with a RTP header and // (possible) special headers - would be too big for an output packet: // (Later allow for RTP extension header!) ##### numBytes += rtpHeaderSize + specialHeaderSize() + frameSpecificHeaderSize(); return fOutBuf->isTooBigForAPacket(numBytes); } void MultiFramedRTPSink::sendPacketIfNecessary() { if (fNumFramesUsedSoFar > 0) { // Send the packet: #ifdef TEST_LOSS if ((our_random()%10) != 0) // simulate 10% packet loss ##### #endif if (!fRTPInterface.sendPacket(fOutBuf->packet(), fOutBuf->curPacketSize())) { // if failure handler has been specified, call it if (fOnSendErrorFunc != NULL) (*fOnSendErrorFunc)(fOnSendErrorData); } ++fPacketCount; fTotalOctetCount += fOutBuf->curPacketSize(); fOctetCount += fOutBuf->curPacketSize() - rtpHeaderSize - fSpecialHeaderSize - fTotalFrameSpecificHeaderSizes; ++fSeqNo; // for next time } if (fOutBuf->haveOverflowData() && fOutBuf->totalBytesAvailable() > fOutBuf->totalBufferSize()/2) { // Efficiency hack: Reset the packet start pointer to just in front of // the overflow data (allowing for the RTP header and special headers), // so that we probably don't have to "memmove()" the overflow data // into place when building the next packet: unsigned newPacketStart = fOutBuf->curPacketSize() - (rtpHeaderSize + fSpecialHeaderSize + frameSpecificHeaderSize()); fOutBuf->adjustPacketStart(newPacketStart); } else { // Normal case: Reset the packet start pointer back to the start: fOutBuf->resetPacketStart(); } fOutBuf->resetOffset(); fNumFramesUsedSoFar = 0; if (fNoFramesLeft) { // We're done: onSourceClosure(); } else { // We have more frames left to send. Figure out when the next frame // is due to start playing, then make sure that we wait this long before // sending the next packet. struct timeval timeNow; gettimeofday(&timeNow, NULL); int secsDiff = fNextSendTime.tv_sec - timeNow.tv_sec; int64_t uSecondsToGo = secsDiff*1000000 + (fNextSendTime.tv_usec - timeNow.tv_usec); if (uSecondsToGo < 0 || secsDiff < 0) { // sanity check: Make sure that the time-to-delay is non-negative: uSecondsToGo = 0; } // Delay this amount of time: nextTask() = envir().taskScheduler().scheduleDelayedTask(uSecondsToGo, (TaskFunc*)sendNext, this); } } // The following is called after each delay between packet sends: void MultiFramedRTPSink::sendNext(void* firstArg) { MultiFramedRTPSink* sink = (MultiFramedRTPSink*)firstArg; sink->buildAndSendPacket(False); } void MultiFramedRTPSink::ourHandleClosure(void* clientData) { MultiFramedRTPSink* sink = (MultiFramedRTPSink*)clientData; // There are no frames left, but we may have a partially built packet // to send sink->fNoFramesLeft = True; sink->sendPacketIfNecessary(); } live/liveMedia/TheoraVideoRTPSink.cpp000444 001751 000000 00000016442 12265042432 017775 0ustar00rsfwheel000000 000000 /* * Theora Video RTP packetizer * Copied from live555's VorbisAudioRTPSink */ #include "TheoraVideoRTPSink.hh" #include "Base64.hh" TheoraVideoRTPSink::TheoraVideoRTPSink(UsageEnvironment& env, Groupsock* RTPgs, u_int8_t rtpPayloadFormat, u_int32_t rtpTimestampFrequency, unsigned width, unsigned height, enum PixFmt pf, u_int8_t* identificationHeader, unsigned identificationHeaderSize, u_int8_t* commentHeader, unsigned commentHeaderSize, u_int8_t* setupHeader, unsigned setupHeaderSize, u_int32_t identField) : VideoRTPSink(env, RTPgs, rtpPayloadFormat, rtpTimestampFrequency, "theora"), fIdent(identField), fFmtpSDPLine(NULL) { static const char *pf_to_str[] = { "YCbCr-4:2:0", "YCbCr-4:2:2", "YCbCr-4:4:4", }; // Create packed configuration headers, and encode this data into a "a=fmtp:" SDP line that we'll use to describe it: // First, count how many headers (<=3) are included, and how many bytes will be used to encode these headers' sizes: unsigned numHeaders = 0; unsigned sizeSize[2]; // The number of bytes used to encode the lengths of the first two headers (but not the length of the 3rd) sizeSize[0] = sizeSize[1] = 0; if (identificationHeaderSize > 0) { sizeSize[numHeaders++] = identificationHeaderSize < 128 ? 1 : identificationHeaderSize < 16384 ? 2 : 3; } if (commentHeaderSize > 0) { sizeSize[numHeaders++] = commentHeaderSize < 128 ? 1 : commentHeaderSize < 16384 ? 2 : 3; } if (setupHeaderSize > 0) { ++numHeaders; } else { sizeSize[1] = 0; // We have at most two headers, so the second one's length isn't encoded } if (numHeaders == 0) return; // With no headers, we can't set up a configuration if (numHeaders == 1) sizeSize[0] = 0; // With only one header, its length isn't encoded // Then figure out the size of the packed configuration headers, and allocate space for this: unsigned length = identificationHeaderSize + commentHeaderSize + setupHeaderSize; // The "length" field in the packed headers if (length > (unsigned)0xFFFF) return; // too big for a 16-bit field; we can't handle this unsigned packedHeadersSize = 4 // "Number of packed headers" field + 3 // "ident" field + 2 // "length" field + 1 // "n. of headers" field + sizeSize[0] + sizeSize[1] // "length1" and "length2" (if present) fields + length; u_int8_t* packedHeaders = new u_int8_t[packedHeadersSize]; if (packedHeaders == NULL) return; // Fill in the 'packed headers': u_int8_t* p = packedHeaders; *p++ = 0; *p++ = 0; *p++ = 0; *p++ = 1; // "Number of packed headers": 1 *p++ = fIdent>>16; *p++ = fIdent>>8; *p++ = fIdent; // "Ident" (24 bits) *p++ = length>>8; *p++ = length; // "length" (16 bits) *p++ = numHeaders-1; // "n. of headers" if (numHeaders > 1) { // Fill in the "length1" header: unsigned length1 = identificationHeaderSize > 0 ? identificationHeaderSize : commentHeaderSize; if (length1 >= 16384) { *p++ = 0x80; // flag, but no more, because we know length1 <= 32767 } if (length1 >= 128) { *p++ = 0x80|((length1&0x3F80)>>7); // flag + the second 7 bits } *p++ = length1&0x7F; // the low 7 bits if (numHeaders > 2) { // numHeaders == 3 // Fill in the "length2" header (for the 'Comment' header): unsigned length2 = commentHeaderSize; if (length2 >= 16384) { *p++ = 0x80; // flag, but no more, because we know length2 <= 32767 } if (length2 >= 128) { *p++ = 0x80|((length2&0x3F80)>>7); // flag + the second 7 bits } *p++ = length2&0x7F; // the low 7 bits } } // Copy each header: if (identificationHeader != NULL) memmove(p, identificationHeader, identificationHeaderSize); p += identificationHeaderSize; if (commentHeader != NULL) memmove(p, commentHeader, commentHeaderSize); p += commentHeaderSize; if (setupHeader != NULL) memmove(p, setupHeader, setupHeaderSize); // Having set up the 'packed configuration headers', Base-64-encode this, and put it in our "a=fmtp:" SDP line: char* base64PackedHeaders = base64Encode((char const*)packedHeaders, packedHeadersSize); delete[] packedHeaders; unsigned fmtpSDPLineMaxSize = 200 + strlen(base64PackedHeaders);// 200 => more than enough space fFmtpSDPLine = new char[fmtpSDPLineMaxSize]; sprintf(fFmtpSDPLine, "a=fmtp:%d sampling=%s;width=%u;height=%u;delivery-method=out_band/rtsp;configuration=%s\r\n", rtpPayloadType(), pf_to_str[pf], width, height, base64PackedHeaders); delete[] base64PackedHeaders; } TheoraVideoRTPSink::~TheoraVideoRTPSink() { delete[] fFmtpSDPLine; } TheoraVideoRTPSink* TheoraVideoRTPSink::createNew(UsageEnvironment& env, Groupsock* RTPgs, u_int8_t rtpPayloadFormat, u_int32_t rtpTimestampFrequency, unsigned width, unsigned height, enum PixFmt pf, u_int8_t* identificationHeader, unsigned identificationHeaderSize, u_int8_t* commentHeader, unsigned commentHeaderSize, u_int8_t* setupHeader, unsigned setupHeaderSize, u_int32_t identField) { return new TheoraVideoRTPSink(env, RTPgs, rtpPayloadFormat, rtpTimestampFrequency, width, height, pf, identificationHeader, identificationHeaderSize, commentHeader, commentHeaderSize, setupHeader, setupHeaderSize, identField); } char const* TheoraVideoRTPSink::auxSDPLine() { return fFmtpSDPLine; } void TheoraVideoRTPSink ::doSpecialFrameHandling(unsigned fragmentationOffset, unsigned char* frameStart, unsigned numBytesInFrame, struct timeval framePresentationTime, unsigned numRemainingBytes) { // Set the 4-byte "payload header", as defined in http://svn.xiph.org/trunk/theora/doc/draft-ietf-avt-rtp-theora-00.txt u_int8_t header[6]; // The three bytes of the header are our "Ident": header[0] = fIdent>>16; header[1] = fIdent>>8; header[2] = fIdent; // The final byte contains the "F", "TDT", and "numPkts" fields: u_int8_t F; // Fragment type if (numRemainingBytes > 0) { if (fragmentationOffset > 0) { F = 2<<6; // continuation fragment } else { F = 1<<6; // start fragment } } else { if (fragmentationOffset > 0) { F = 3<<6; // end fragment } else { F = 0<<6; // not fragmented } } u_int8_t const TDT = 0<<4; // Theora Data Type (always a "Raw Theora payload") u_int8_t numPkts = F == 0 ? (numFramesUsedSoFar() + 1): 0; // set to 0 when we're a fragment header[3] = F|TDT|numPkts; // There's also a 2-byte 'frame-specific' header: The length of the // Theora data: header[4] = numBytesInFrame >>8; header[5] = numBytesInFrame; setSpecialHeaderBytes(header, sizeof(header)); if (numRemainingBytes == 0) { // This packet contains the last (or only) fragment of the frame. // Set the RTP 'M' ('marker') bit: setMarkerBit(); } // Important: Also call our base class's doSpecialFrameHandling(), // to set the packet's timestamp: MultiFramedRTPSink::doSpecialFrameHandling(fragmentationOffset, frameStart, numBytesInFrame, framePresentationTime, numRemainingBytes); } Boolean TheoraVideoRTPSink::frameCanAppearAfterPacketStart(unsigned char const* /*frameStart*/, unsigned /*numBytesInFrame*/) const { // Only one frame per packet: return False; } unsigned TheoraVideoRTPSink::specialHeaderSize() const { return 6; } live/liveMedia/ProxyServerMediaSession.cpp000444 001751 000000 00000111320 12265042432 021154 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A subclass of "ServerMediaSession" that can be used to create a (unicast) RTSP servers that acts as a 'proxy' for // another (unicast or multicast) RTSP/RTP stream. // Implementation #include "liveMedia.hh" #include "RTSPCommon.hh" #include "GroupsockHelper.hh" // for "our_random()" #ifndef MILLION #define MILLION 1000000 #endif // A "OnDemandServerMediaSubsession" subclass, used to implement a unicast RTSP server that's proxying another RTSP stream: class ProxyServerMediaSubsession: public OnDemandServerMediaSubsession { public: ProxyServerMediaSubsession(MediaSubsession& mediaSubsession); virtual ~ProxyServerMediaSubsession(); char const* codecName() const { return fClientMediaSubsession.codecName(); } private: // redefined virtual functions virtual FramedSource* createNewStreamSource(unsigned clientSessionId, unsigned& estBitrate); virtual void closeStreamSource(FramedSource *inputSource); virtual RTPSink* createNewRTPSink(Groupsock* rtpGroupsock, unsigned char rtpPayloadTypeIfDynamic, FramedSource* inputSource); private: static void subsessionByeHandler(void* clientData); void subsessionByeHandler(); int verbosityLevel() const { return ((ProxyServerMediaSession*)fParentSession)->fVerbosityLevel; } private: friend class ProxyRTSPClient; MediaSubsession& fClientMediaSubsession; // the 'client' media subsession object that corresponds to this 'server' media subsession ProxyServerMediaSubsession* fNext; // used when we're part of a queue Boolean fHaveSetupStream; }; ////////// ProxyServerMediaSession implementation ////////// UsageEnvironment& operator<<(UsageEnvironment& env, const ProxyServerMediaSession& psms) { // used for debugging return env << "ProxyServerMediaSession[\"" << psms.url() << "\"]"; } ProxyRTSPClient* defaultCreateNewProxyRTSPClientFunc(ProxyServerMediaSession& ourServerMediaSession, char const* rtspURL, char const* username, char const* password, portNumBits tunnelOverHTTPPortNum, int verbosityLevel, int socketNumToServer) { return new ProxyRTSPClient(ourServerMediaSession, rtspURL, username, password, tunnelOverHTTPPortNum, verbosityLevel, socketNumToServer); } ProxyServerMediaSession* ProxyServerMediaSession ::createNew(UsageEnvironment& env, RTSPServer* ourRTSPServer, char const* inputStreamURL, char const* streamName, char const* username, char const* password, portNumBits tunnelOverHTTPPortNum, int verbosityLevel, int socketNumToServer) { return new ProxyServerMediaSession(env, ourRTSPServer, inputStreamURL, streamName, username, password, tunnelOverHTTPPortNum, verbosityLevel, socketNumToServer); } ProxyServerMediaSession ::ProxyServerMediaSession(UsageEnvironment& env, RTSPServer* ourRTSPServer, char const* inputStreamURL, char const* streamName, char const* username, char const* password, portNumBits tunnelOverHTTPPortNum, int verbosityLevel, int socketNumToServer, createNewProxyRTSPClientFunc* ourCreateNewProxyRTSPClientFunc) : ServerMediaSession(env, streamName, NULL, NULL, False, NULL), describeCompletedFlag(0), fOurRTSPServer(ourRTSPServer), fClientMediaSession(NULL), fVerbosityLevel(verbosityLevel), fPresentationTimeSessionNormalizer(new PresentationTimeSessionNormalizer(envir())), fCreateNewProxyRTSPClientFunc(ourCreateNewProxyRTSPClientFunc) { // Open a RTSP connection to the input stream, and send a "DESCRIBE" command. // We'll use the SDP description in the response to set ourselves up. fProxyRTSPClient = (*fCreateNewProxyRTSPClientFunc)(*this, inputStreamURL, username, password, tunnelOverHTTPPortNum, verbosityLevel > 0 ? verbosityLevel-1 : verbosityLevel, socketNumToServer); ProxyRTSPClient::sendDESCRIBE(fProxyRTSPClient); } ProxyServerMediaSession::~ProxyServerMediaSession() { if (fVerbosityLevel > 0) { envir() << *this << "::~ProxyServerMediaSession()\n"; } // Begin by sending a "TEARDOWN" command (without checking for a response): if (fProxyRTSPClient != NULL) fProxyRTSPClient->sendTeardownCommand(*fClientMediaSession, NULL, fProxyRTSPClient->auth()); // Then delete our state: Medium::close(fClientMediaSession); Medium::close(fProxyRTSPClient); delete fPresentationTimeSessionNormalizer; } char const* ProxyServerMediaSession::url() const { return fProxyRTSPClient == NULL ? NULL : fProxyRTSPClient->url(); } void ProxyServerMediaSession::continueAfterDESCRIBE(char const* sdpDescription) { describeCompletedFlag = 1; // Create a (client) "MediaSession" object from the stream's SDP description ("resultString"), then iterate through its // "MediaSubsession" objects, to set up corresponding "ServerMediaSubsession" objects that we'll use to serve the stream's tracks. do { fClientMediaSession = MediaSession::createNew(envir(), sdpDescription); if (fClientMediaSession == NULL) break; MediaSubsessionIterator iter(*fClientMediaSession); for (MediaSubsession* mss = iter.next(); mss != NULL; mss = iter.next()) { ServerMediaSubsession* smss = new ProxyServerMediaSubsession(*mss); addSubsession(smss); if (fVerbosityLevel > 0) { envir() << *this << " added new \"ProxyServerMediaSubsession\" for " << mss->protocolName() << "/" << mss->mediumName() << "/" << mss->codecName() << " track\n"; } } } while (0); } void ProxyServerMediaSession::resetDESCRIBEState() { // Delete all of our "ProxyServerMediaSubsession"s; they'll get set up again once we get a response to the new "DESCRIBE". if (fOurRTSPServer != NULL) { // First, close any RTSP client connections that may have already been set up: fOurRTSPServer->closeAllClientSessionsForServerMediaSession(this); } deleteAllSubsessions(); // Finally, delete the client "MediaSession" object that we had set up after receiving the response to the previous "DESCRIBE": Medium::close(fClientMediaSession); fClientMediaSession = NULL; } ///////// RTSP 'response handlers' ////////// static void continueAfterDESCRIBE(RTSPClient* rtspClient, int resultCode, char* resultString) { char const* res; if (resultCode == 0) { // The "DESCRIBE" command succeeded, so "resultString" should be the stream's SDP description. res = resultString; } else { // The "DESCRIBE" command failed. res = NULL; } ((ProxyRTSPClient*)rtspClient)->continueAfterDESCRIBE(res); delete[] resultString; } static void continueAfterSETUP(RTSPClient* rtspClient, int resultCode, char* resultString) { if (resultCode == 0) { ((ProxyRTSPClient*)rtspClient)->continueAfterSETUP(); } delete[] resultString; } static void continueAfterOPTIONS(RTSPClient* rtspClient, int resultCode, char* resultString) { Boolean serverSupportsGetParameter = False; if (resultCode == 0) { // Note whether the server told us that it supports the "GET_PARAMETER" command: serverSupportsGetParameter = RTSPOptionIsSupported("GET_PARAMETER", resultString); } ((ProxyRTSPClient*)rtspClient)->continueAfterLivenessCommand(resultCode, serverSupportsGetParameter); delete[] resultString; } #ifdef SEND_GET_PARAMETER_IF_SUPPORTED static void continueAfterGET_PARAMETER(RTSPClient* rtspClient, int resultCode, char* resultString) { ((ProxyRTSPClient*)rtspClient)->continueAfterLivenessCommand(resultCode, True); delete[] resultString; } #endif ////////// "ProxyRTSPClient" implementation ///////// UsageEnvironment& operator<<(UsageEnvironment& env, const ProxyRTSPClient& proxyRTSPClient) { // used for debugging return env << "ProxyRTSPClient[\"" << proxyRTSPClient.url() << "\"]"; } ProxyRTSPClient::ProxyRTSPClient(ProxyServerMediaSession& ourServerMediaSession, char const* rtspURL, char const* username, char const* password, portNumBits tunnelOverHTTPPortNum, int verbosityLevel, int socketNumToServer) : RTSPClient(ourServerMediaSession.envir(), rtspURL, verbosityLevel, "ProxyRTSPClient", tunnelOverHTTPPortNum == (portNumBits)(~0) ? 0 : tunnelOverHTTPPortNum, socketNumToServer), fOurServerMediaSession(ourServerMediaSession), fOurURL(strDup(rtspURL)), fStreamRTPOverTCP(tunnelOverHTTPPortNum != 0), fSetupQueueHead(NULL), fSetupQueueTail(NULL), fNumSetupsDone(0), fNextDESCRIBEDelay(1), fServerSupportsGetParameter(False), fLastCommandWasPLAY(False), fLivenessCommandTask(NULL), fDESCRIBECommandTask(NULL), fSubsessionTimerTask(NULL) { if (username != NULL && password != NULL) { fOurAuthenticator = new Authenticator(username, password); } else { fOurAuthenticator = NULL; } } void ProxyRTSPClient::reset() { envir().taskScheduler().unscheduleDelayedTask(fLivenessCommandTask); fLivenessCommandTask = NULL; envir().taskScheduler().unscheduleDelayedTask(fDESCRIBECommandTask); fDESCRIBECommandTask = NULL; envir().taskScheduler().unscheduleDelayedTask(fSubsessionTimerTask); fSubsessionTimerTask = NULL; fSetupQueueHead = fSetupQueueTail = NULL; fNumSetupsDone = 0; fNextDESCRIBEDelay = 1; fLastCommandWasPLAY = False; RTSPClient::reset(); } ProxyRTSPClient::~ProxyRTSPClient() { reset(); delete fOurAuthenticator; delete[] fOurURL; } void ProxyRTSPClient::continueAfterDESCRIBE(char const* sdpDescription) { if (sdpDescription != NULL) { fOurServerMediaSession.continueAfterDESCRIBE(sdpDescription); // Unlike most RTSP streams, there might be a long delay between this "DESCRIBE" command (to the downstream server) and the // subsequent "SETUP"/"PLAY" - which doesn't occur until the first time that a client requests the stream. // To prevent the proxied connection (between us and the downstream server) from timing out, we send periodic 'liveness' // ("OPTIONS" or "GET_PARAMETER") commands. (The usual RTCP liveness mechanism wouldn't work here, because RTCP packets // don't get sent until after the "PLAY" command.) scheduleLivenessCommand(); } else { // The "DESCRIBE" command failed, most likely because the server or the stream is not yet running. // Reschedule another "DESCRIBE" command to take place later: scheduleDESCRIBECommand(); } } void ProxyRTSPClient::continueAfterLivenessCommand(int resultCode, Boolean serverSupportsGetParameter) { if (resultCode != 0) { // The periodic 'liveness' command failed, suggesting that the back-end stream is no longer alive. // We handle this by resetting our connection state with this server. Any current clients will be closed, but // subsequent clients will cause new RTSP "SETUP"s and "PLAY"s to get done, restarting the stream. // Then continue by sending more "DESCRIBE" commands, to try to restore the stream. fServerSupportsGetParameter = False; // until we learn otherwise, in response to a future "OPTIONS" command if (resultCode < 0) { // The 'liveness' command failed without getting a response from the server (otherwise "resultCode" would have been > 0). // This suggests that the RTSP connection itself has failed. Print this error code, in case it's useful for debugging: if (fVerbosityLevel > 0) { envir() << *this << ": lost connection to server ('errno': " << -resultCode << "). Resetting...\n"; } } reset(); fOurServerMediaSession.resetDESCRIBEState(); setBaseURL(fOurURL); // because we'll be sending an initial "DESCRIBE" all over again sendDESCRIBE(this); return; } fServerSupportsGetParameter = serverSupportsGetParameter; // Schedule the next 'liveness' command (i.e., to tell the back-end server that we're still alive): scheduleLivenessCommand(); } #define SUBSESSION_TIMEOUT_SECONDS 10 // how many seconds to wait for the last track's "SETUP" to be done (note below) void ProxyRTSPClient::continueAfterSETUP() { if (fVerbosityLevel > 0) { envir() << *this << "::continueAfterSETUP(): head codec: " << fSetupQueueHead->fClientMediaSubsession.codecName() << "; numSubsessions " << fSetupQueueHead->fParentSession->numSubsessions() << "\n\tqueue:"; for (ProxyServerMediaSubsession* p = fSetupQueueHead; p != NULL; p = p->fNext) { envir() << "\t" << p->fClientMediaSubsession.codecName(); } envir() << "\n"; } envir().taskScheduler().unscheduleDelayedTask(fSubsessionTimerTask); // in case it had been set // Dequeue the first "ProxyServerMediaSubsession" from our 'SETUP queue'. It will be the one for which this "SETUP" was done: ProxyServerMediaSubsession* smss = fSetupQueueHead; // Assert: != NULL fSetupQueueHead = fSetupQueueHead->fNext; if (fSetupQueueHead == NULL) fSetupQueueTail = NULL; if (fSetupQueueHead != NULL) { // There are still entries in the queue, for tracks for which we have still to do a "SETUP". // "SETUP" the first of these now: sendSetupCommand(fSetupQueueHead->fClientMediaSubsession, ::continueAfterSETUP, False, fStreamRTPOverTCP, False, fOurAuthenticator); ++fNumSetupsDone; fSetupQueueHead->fHaveSetupStream = True; } else { if (fNumSetupsDone >= smss->fParentSession->numSubsessions()) { // We've now finished setting up each of our subsessions (i.e., 'tracks'). // Continue by sending a "PLAY" command (an 'aggregate' "PLAY" command, on the whole session): sendPlayCommand(smss->fClientMediaSubsession.parentSession(), NULL, -1.0f, -1.0f, 1.0f, fOurAuthenticator); // the "-1.0f" "start" parameter causes the "PLAY" to be sent without a "Range:" header, in case we'd already done // a "PLAY" before (as a result of a 'subsession timeout' (note below)) fLastCommandWasPLAY = True; } else { // Some of this session's subsessions (i.e., 'tracks') remain to be "SETUP". They might get "SETUP" very soon, but it's // also possible - if the remote client chose to play only some of the session's tracks - that they might not. // To allow for this possibility, we set a timer. If the timer expires without the remaining subsessions getting "SETUP", // then we send a "PLAY" command anyway: fSubsessionTimerTask = envir().taskScheduler().scheduleDelayedTask(SUBSESSION_TIMEOUT_SECONDS*MILLION, (TaskFunc*)subsessionTimeout, this); } } } void ProxyRTSPClient::scheduleLivenessCommand() { // Delay a random time before sending another 'liveness' command. unsigned delayMax = sessionTimeoutParameter(); // if the server specified a maximum time between 'liveness' probes, then use that if (delayMax == 0) { delayMax = 60; } // Choose a random time from [delayMax/2,delayMax-1) seconds: unsigned const us_1stPart = delayMax*500000; unsigned uSecondsToDelay; if (us_1stPart <= 1000000) { uSecondsToDelay = us_1stPart; } else { unsigned const us_2ndPart = us_1stPart-1000000; uSecondsToDelay = us_1stPart + (us_2ndPart*our_random())%us_2ndPart; } fLivenessCommandTask = envir().taskScheduler().scheduleDelayedTask(uSecondsToDelay, sendLivenessCommand, this); } void ProxyRTSPClient::sendLivenessCommand(void* clientData) { ProxyRTSPClient* rtspClient = (ProxyRTSPClient*)clientData; // Note. By default, we do not send "GET_PARAMETER" as our 'liveness notification' command, even if the server previously // indicated (in its response to our earlier "OPTIONS" command) that it supported "GET_PARAMETER". This is because // "GET_PARAMETER" crashes some camera servers (even though they claimed to support "GET_PARAMETER"). #ifdef SEND_GET_PARAMETER_IF_SUPPORTED MediaSession* sess = rtspClient->fOurServerMediaSession.fClientMediaSession; if (rtspClient->fServerSupportsGetParameter && rtspClient->fNumSetupsDone > 0 && sess != NULL) { rtspClient->sendGetParameterCommand(*sess, ::continueAfterGET_PARAMETER, "", rtspClient->auth()); } else { #endif rtspClient->sendOptionsCommand(::continueAfterOPTIONS, rtspClient->auth()); #ifdef SEND_GET_PARAMETER_IF_SUPPORTED } #endif } void ProxyRTSPClient::scheduleDESCRIBECommand() { // Delay 1s, 2s, 4s, 8s ... 256s until sending the next "DESCRIBE". Then, keep delaying a random time from [256..511] seconds: unsigned secondsToDelay; if (fNextDESCRIBEDelay <= 256) { secondsToDelay = fNextDESCRIBEDelay; fNextDESCRIBEDelay *= 2; } else { secondsToDelay = 256 + (our_random()&0xFF); // [256..511] seconds } if (fVerbosityLevel > 0) { envir() << *this << ": RTSP \"DESCRIBE\" command failed; trying again in " << secondsToDelay << " seconds\n"; } fDESCRIBECommandTask = envir().taskScheduler().scheduleDelayedTask(secondsToDelay*MILLION, sendDESCRIBE, this); } void ProxyRTSPClient::sendDESCRIBE(void* clientData) { ProxyRTSPClient* rtspClient = (ProxyRTSPClient*)clientData; if (rtspClient != NULL) rtspClient->sendDescribeCommand(::continueAfterDESCRIBE, rtspClient->auth()); } void ProxyRTSPClient::subsessionTimeout(void* clientData) { ((ProxyRTSPClient*)clientData)->handleSubsessionTimeout(); } void ProxyRTSPClient::handleSubsessionTimeout() { // We still have one or more subsessions ('tracks') left to "SETUP". But we can't wait any longer for them. Send a "PLAY" now: MediaSession* sess = fOurServerMediaSession.fClientMediaSession; if (sess != NULL) sendPlayCommand(*sess, NULL, -1.0f, -1.0f, 1.0f, fOurAuthenticator); fLastCommandWasPLAY = True; } //////// "ProxyServerMediaSubsession" implementation ////////// ProxyServerMediaSubsession::ProxyServerMediaSubsession(MediaSubsession& mediaSubsession) : OnDemandServerMediaSubsession(mediaSubsession.parentSession().envir(), True/*reuseFirstSource*/), fClientMediaSubsession(mediaSubsession), fNext(NULL), fHaveSetupStream(False) { } UsageEnvironment& operator<<(UsageEnvironment& env, const ProxyServerMediaSubsession& psmss) { // used for debugging return env << "ProxyServerMediaSubsession[\"" << psmss.codecName() << "\"]"; } ProxyServerMediaSubsession::~ProxyServerMediaSubsession() { if (verbosityLevel() > 0) { envir() << *this << "::~ProxyServerMediaSubsession()\n"; } } FramedSource* ProxyServerMediaSubsession::createNewStreamSource(unsigned clientSessionId, unsigned& estBitrate) { ProxyServerMediaSession* const sms = (ProxyServerMediaSession*)fParentSession; if (verbosityLevel() > 0) { envir() << *this << "::createNewStreamSource(session id " << clientSessionId << ")\n"; } // If we haven't yet created a data source from our 'media subsession' object, initiate() it to do so: if (fClientMediaSubsession.readSource() == NULL) { fClientMediaSubsession.receiveRawMP3ADUs(); // hack for MPA-ROBUST streams fClientMediaSubsession.receiveRawJPEGFrames(); // hack for proxying JPEG/RTP streams. (Don't do this if we're transcoding.) fClientMediaSubsession.initiate(); if (verbosityLevel() > 0) { envir() << "\tInitiated: " << *this << "\n"; } if (fClientMediaSubsession.readSource() != NULL) { // Add to the front of all data sources a filter that will 'normalize' their frames' presentation times, // before the frames get re-transmitted by our server: char const* const codecName = fClientMediaSubsession.codecName(); FramedFilter* normalizerFilter = sms->fPresentationTimeSessionNormalizer ->createNewPresentationTimeSubsessionNormalizer(fClientMediaSubsession.readSource(), fClientMediaSubsession.rtpSource(), codecName); fClientMediaSubsession.addFilter(normalizerFilter); // Some data sources require a 'framer' object to be added, before they can be fed into a "RTPSink". Adjust for this now: if (strcmp(codecName, "H264") == 0) { fClientMediaSubsession.addFilter(H264VideoStreamDiscreteFramer::createNew(envir(), fClientMediaSubsession.readSource())); } else if (strcmp(codecName, "MP4V-ES") == 0) { fClientMediaSubsession.addFilter(MPEG4VideoStreamDiscreteFramer ::createNew(envir(), fClientMediaSubsession.readSource(), True/* leave PTs unmodified*/)); } else if (strcmp(codecName, "MPV") == 0) { fClientMediaSubsession.addFilter(MPEG1or2VideoStreamDiscreteFramer::createNew(envir(), fClientMediaSubsession.readSource(), False, 5.0, True/* leave PTs unmodified*/)); } else if (strcmp(codecName, "DV") == 0) { fClientMediaSubsession.addFilter(DVVideoStreamFramer::createNew(envir(), fClientMediaSubsession.readSource(), False, True/* leave PTs unmodified*/)); } } if (fClientMediaSubsession.rtcpInstance() != NULL) { fClientMediaSubsession.rtcpInstance()->setByeHandler(subsessionByeHandler, this); } } ProxyRTSPClient* const proxyRTSPClient = sms->fProxyRTSPClient; if (clientSessionId != 0) { // We're being called as a result of implementing a RTSP "SETUP". if (!fHaveSetupStream) { // This is our first "SETUP". Send RTSP "SETUP" and later "PLAY" commands to the proxied server, to start streaming: // (Before sending "SETUP", enqueue ourselves on the "RTSPClient"s 'SETUP queue', so we'll be able to get the correct // "ProxyServerMediaSubsession" to handle the response. (Note that responses come back in the same order as requests.)) Boolean queueWasEmpty = proxyRTSPClient->fSetupQueueHead == NULL; if (queueWasEmpty) { proxyRTSPClient->fSetupQueueHead = this; } else { proxyRTSPClient->fSetupQueueTail->fNext = this; } proxyRTSPClient->fSetupQueueTail = this; // Hack: If there's already a pending "SETUP" request (for another track), don't send this track's "SETUP" right away, because // the server might not properly handle 'pipelined' requests. Instead, wait until after previous "SETUP" responses come back. if (queueWasEmpty) { proxyRTSPClient->sendSetupCommand(fClientMediaSubsession, ::continueAfterSETUP, False, proxyRTSPClient->fStreamRTPOverTCP, False, proxyRTSPClient->auth()); ++proxyRTSPClient->fNumSetupsDone; fHaveSetupStream = True; } } else { // This is a "SETUP" from a new client. We know that there are no other currently active clients (otherwise we wouldn't // have been called here), so we know that the substream was previously "PAUSE"d. Send "PLAY" downstream once again, // to resume the stream: if (!proxyRTSPClient->fLastCommandWasPLAY) { // so that we send only one "PLAY"; not one for each subsession proxyRTSPClient->sendPlayCommand(fClientMediaSubsession.parentSession(), NULL, -1.0f/*resume from previous point*/, -1.0f, 1.0f, proxyRTSPClient->auth()); proxyRTSPClient->fLastCommandWasPLAY = True; } } } estBitrate = fClientMediaSubsession.bandwidth(); if (estBitrate == 0) estBitrate = 50; // kbps, estimate return fClientMediaSubsession.readSource(); } void ProxyServerMediaSubsession::closeStreamSource(FramedSource* inputSource) { if (verbosityLevel() > 0) { envir() << *this << "::closeStreamSource()\n"; } // Because there's only one input source for this 'subsession' (regardless of how many downstream clients are proxying it), // we don't close the input source here. (Instead, we wait until *this* object gets deleted.) // However, because (as evidenced by this function having been called) we no longer have any clients accessing the stream, // then we "PAUSE" the downstream proxied stream, until a new client arrives: if (fHaveSetupStream) { ProxyServerMediaSession* const sms = (ProxyServerMediaSession*)fParentSession; ProxyRTSPClient* const proxyRTSPClient = sms->fProxyRTSPClient; if (proxyRTSPClient->fLastCommandWasPLAY) { // so that we send only one "PAUSE"; not one for each subsession proxyRTSPClient->sendPauseCommand(fClientMediaSubsession.parentSession(), NULL, proxyRTSPClient->auth()); proxyRTSPClient->fLastCommandWasPLAY = False; } } } RTPSink* ProxyServerMediaSubsession ::createNewRTPSink(Groupsock* rtpGroupsock, unsigned char rtpPayloadTypeIfDynamic, FramedSource* inputSource) { if (verbosityLevel() > 0) { envir() << *this << "::createNewRTPSink()\n"; } // Create (and return) the appropriate "RTPSink" object for our codec: RTPSink* newSink; char const* const codecName = fClientMediaSubsession.codecName(); if (strcmp(codecName, "AC3") == 0 || strcmp(codecName, "EAC3") == 0) { newSink = AC3AudioRTPSink::createNew(envir(), rtpGroupsock, rtpPayloadTypeIfDynamic, fClientMediaSubsession.rtpTimestampFrequency()); #if 0 // This code does not work; do *not* enable it: } else if (strcmp(codecName, "AMR") == 0 || strcmp(codecName, "AMR-WB") == 0) { Boolean isWideband = strcmp(codecName, "AMR-WB") == 0; newSink = AMRAudioRTPSink::createNew(envir(), rtpGroupsock, rtpPayloadTypeIfDynamic, isWideband, fClientMediaSubsession.numChannels()); #endif } else if (strcmp(codecName, "DV") == 0) { newSink = DVVideoRTPSink::createNew(envir(), rtpGroupsock, rtpPayloadTypeIfDynamic); } else if (strcmp(codecName, "GSM") == 0) { newSink = GSMAudioRTPSink::createNew(envir(), rtpGroupsock); } else if (strcmp(codecName, "H263-1998") == 0 || strcmp(codecName, "H263-2000") == 0) { newSink = H263plusVideoRTPSink::createNew(envir(), rtpGroupsock, rtpPayloadTypeIfDynamic, fClientMediaSubsession.rtpTimestampFrequency()); } else if (strcmp(codecName, "H264") == 0) { newSink = H264VideoRTPSink::createNew(envir(), rtpGroupsock, rtpPayloadTypeIfDynamic, fClientMediaSubsession.fmtp_spropparametersets()); } else if (strcmp(codecName, "JPEG") == 0) { newSink = SimpleRTPSink::createNew(envir(), rtpGroupsock, 26, 90000, "video", "JPEG", 1/*numChannels*/, False/*allowMultipleFramesPerPacket*/, False/*doNormalMBitRule*/); } else if (strcmp(codecName, "MP4A-LATM") == 0) { newSink = MPEG4LATMAudioRTPSink::createNew(envir(), rtpGroupsock, rtpPayloadTypeIfDynamic, fClientMediaSubsession.rtpTimestampFrequency(), fClientMediaSubsession.fmtp_config(), fClientMediaSubsession.numChannels()); } else if (strcmp(codecName, "MP4V-ES") == 0) { newSink = MPEG4ESVideoRTPSink::createNew(envir(), rtpGroupsock, rtpPayloadTypeIfDynamic, fClientMediaSubsession.rtpTimestampFrequency(), fClientMediaSubsession.fmtp_profile_level_id(), fClientMediaSubsession.fmtp_config()); } else if (strcmp(codecName, "MPA") == 0) { newSink = MPEG1or2AudioRTPSink::createNew(envir(), rtpGroupsock); } else if (strcmp(codecName, "MPA-ROBUST") == 0) { newSink = MP3ADURTPSink::createNew(envir(), rtpGroupsock, rtpPayloadTypeIfDynamic); } else if (strcmp(codecName, "MPEG4-GENERIC") == 0) { newSink = MPEG4GenericRTPSink::createNew(envir(), rtpGroupsock, rtpPayloadTypeIfDynamic, fClientMediaSubsession.rtpTimestampFrequency(), fClientMediaSubsession.mediumName(), fClientMediaSubsession.fmtp_mode(), fClientMediaSubsession.fmtp_config(), fClientMediaSubsession.numChannels()); } else if (strcmp(codecName, "MPV") == 0) { newSink = MPEG1or2VideoRTPSink::createNew(envir(), rtpGroupsock); } else if (strcmp(codecName, "T140") == 0) { newSink = T140TextRTPSink::createNew(envir(), rtpGroupsock, rtpPayloadTypeIfDynamic); } else if (strcmp(codecName, "VORBIS") == 0) { newSink = VorbisAudioRTPSink::createNew(envir(), rtpGroupsock, rtpPayloadTypeIfDynamic, fClientMediaSubsession.rtpTimestampFrequency(), fClientMediaSubsession.numChannels(), fClientMediaSubsession.fmtp_config()); } else if (strcmp(codecName, "VP8") == 0) { newSink = VP8VideoRTPSink::createNew(envir(), rtpGroupsock, rtpPayloadTypeIfDynamic); } else if (strcmp(codecName, "AMR") == 0 || strcmp(codecName, "AMR-WB") == 0) { // Proxying of these codecs is currently *not* supported, because the data received by the "RTPSource" object is not in a // form that can be fed directly into a corresponding "RTPSink" object. if (verbosityLevel() > 0) { envir() << "\treturns NULL (because we currently don't support the proxying of \"" << fClientMediaSubsession.mediumName() << "/" << codecName << "\" streams)\n"; } return NULL; } else if (strcmp(codecName, "QCELP") == 0 || strcmp(codecName, "H261") == 0 || strcmp(codecName, "H263-1998") == 0 || strcmp(codecName, "H263-2000") == 0 || strcmp(codecName, "X-QT") == 0 || strcmp(codecName, "X-QUICKTIME") == 0) { // This codec requires a specialized RTP payload format; however, we don't yet have an appropriate "RTPSink" subclass for it: if (verbosityLevel() > 0) { envir() << "\treturns NULL (because we don't have a \"RTPSink\" subclass for this RTP payload format)\n"; } return NULL; } else { // This codec is assumed to have a simple RTP payload format that can be implemented just with a "SimpleRTPSink": Boolean allowMultipleFramesPerPacket = True; // by default Boolean doNormalMBitRule = True; // by default // Some codecs change the above default parameters: if (strcmp(codecName, "MP2T") == 0) { doNormalMBitRule = False; // no RTP 'M' bit } newSink = SimpleRTPSink::createNew(envir(), rtpGroupsock, rtpPayloadTypeIfDynamic, fClientMediaSubsession.rtpTimestampFrequency(), fClientMediaSubsession.mediumName(), fClientMediaSubsession.codecName(), fClientMediaSubsession.numChannels(), allowMultipleFramesPerPacket, doNormalMBitRule); } // Because our relayed frames' presentation times are inaccurate until the input frames have been RTCP-synchronized, // we temporarily disable RTCP "SR" reports for this "RTPSink" object: newSink->enableRTCPReports() = False; // Also tell our "PresentationTimeSubsessionNormalizer" object about the "RTPSink", so it can enable RTCP "SR" reports later: PresentationTimeSubsessionNormalizer* ssNormalizer; if (strcmp(codecName, "H264") == 0 || strcmp(codecName, "MP4V-ES") == 0 || strcmp(codecName, "MPV") == 0 || strcmp(codecName, "DV") == 0) { // There was a separate 'framer' object in front of the "PresentationTimeSubsessionNormalizer", so go back one object to get it: ssNormalizer = (PresentationTimeSubsessionNormalizer*)(((FramedFilter*)inputSource)->inputSource()); } else { ssNormalizer = (PresentationTimeSubsessionNormalizer*)inputSource; } ssNormalizer->setRTPSink(newSink); return newSink; } void ProxyServerMediaSubsession::subsessionByeHandler(void* clientData) { ((ProxyServerMediaSubsession*)clientData)->subsessionByeHandler(); } void ProxyServerMediaSubsession::subsessionByeHandler() { if (verbosityLevel() > 0) { envir() << *this << ": received RTCP \"BYE\". (The back-end stream has ended.)\n"; } // This "BYE" signals that our input source has (effectively) closed, so pass this onto the front-end clients: fHaveSetupStream = False; // hack to stop "PAUSE" getting sent by: FramedSource::handleClosure(fClientMediaSubsession.readSource()); // And then treat this as if we had lost connection to the back-end server, // and can reestablish streaming from it only by sending another "DESCRIBE": ProxyServerMediaSession* const sms = (ProxyServerMediaSession*)fParentSession; ProxyRTSPClient* const proxyRTSPClient = sms->fProxyRTSPClient; proxyRTSPClient->continueAfterLivenessCommand(1/*hack*/, proxyRTSPClient->fServerSupportsGetParameter); } ////////// PresentationTimeSessionNormalizer and PresentationTimeSubsessionNormalizer implementations ////////// // PresentationTimeSessionNormalizer: PresentationTimeSessionNormalizer::PresentationTimeSessionNormalizer(UsageEnvironment& env) : Medium(env), fSubsessionNormalizers(NULL), fMasterSSNormalizer(NULL) { } PresentationTimeSessionNormalizer::~PresentationTimeSessionNormalizer() { while (fSubsessionNormalizers != NULL) { delete fSubsessionNormalizers; } } PresentationTimeSubsessionNormalizer* PresentationTimeSessionNormalizer::createNewPresentationTimeSubsessionNormalizer(FramedSource* inputSource, RTPSource* rtpSource, char const* codecName) { fSubsessionNormalizers = new PresentationTimeSubsessionNormalizer(*this, inputSource, rtpSource, codecName, fSubsessionNormalizers); return fSubsessionNormalizers; } void PresentationTimeSessionNormalizer::normalizePresentationTime(PresentationTimeSubsessionNormalizer* ssNormalizer, struct timeval& toPT, struct timeval const& fromPT) { Boolean const hasBeenSynced = ssNormalizer->fRTPSource->hasBeenSynchronizedUsingRTCP(); if (!hasBeenSynced) { // If "fromPT" has not yet been RTCP-synchronized, then it was generated by our own receiving code, and thus // is already aligned with 'wall-clock' time. Just copy it 'as is' to "toPT": toPT = fromPT; } else { if (fMasterSSNormalizer == NULL) { // Make "ssNormalizer" the 'master' subsession - meaning that its presentation time is adjusted to align with 'wall clock' // time, and the presentation times of other subsessions (if any) are adjusted to retain their relative separation with // those of the master: fMasterSSNormalizer = ssNormalizer; struct timeval timeNow; gettimeofday(&timeNow, NULL); // Compute: fPTAdjustment = timeNow - fromPT fPTAdjustment.tv_sec = timeNow.tv_sec - fromPT.tv_sec; fPTAdjustment.tv_usec = timeNow.tv_usec - fromPT.tv_usec; // Note: It's OK if one or both of these fields underflows; the result still works out OK later. } // Compute a normalized presentation time: toPT = fromPT + fPTAdjustment toPT.tv_sec = fromPT.tv_sec + fPTAdjustment.tv_sec - 1; toPT.tv_usec = fromPT.tv_usec + fPTAdjustment.tv_usec + MILLION; while (toPT.tv_usec > MILLION) { ++toPT.tv_sec; toPT.tv_usec -= MILLION; } // Because "ssNormalizer"s relayed presentation times are accurate from now on, enable RTCP "SR" reports for its "RTPSink": RTPSink* const rtpSink = ssNormalizer->fRTPSink; if (rtpSink != NULL) { // sanity check; should always be true rtpSink->enableRTCPReports() = True; } } } void PresentationTimeSessionNormalizer ::removePresentationTimeSubsessionNormalizer(PresentationTimeSubsessionNormalizer* ssNormalizer) { // Unlink "ssNormalizer" from the linked list (starting with "fSubsessionNormalizers"): if (fSubsessionNormalizers == ssNormalizer) { fSubsessionNormalizers = fSubsessionNormalizers->fNext; } else { PresentationTimeSubsessionNormalizer** ssPtrPtr = &(fSubsessionNormalizers->fNext); while (*ssPtrPtr != ssNormalizer) ssPtrPtr = &((*ssPtrPtr)->fNext); *ssPtrPtr = (*ssPtrPtr)->fNext; } } // PresentationTimeSubsessionNormalizer: PresentationTimeSubsessionNormalizer ::PresentationTimeSubsessionNormalizer(PresentationTimeSessionNormalizer& parent, FramedSource* inputSource, RTPSource* rtpSource, char const* codecName, PresentationTimeSubsessionNormalizer* next) : FramedFilter(parent.envir(), inputSource), fParent(parent), fRTPSource(rtpSource), fRTPSink(NULL), fCodecName(codecName), fNext(next) { } PresentationTimeSubsessionNormalizer::~PresentationTimeSubsessionNormalizer() { fParent.removePresentationTimeSubsessionNormalizer(this); } void PresentationTimeSubsessionNormalizer::afterGettingFrame(void* clientData, unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime, unsigned durationInMicroseconds) { ((PresentationTimeSubsessionNormalizer*)clientData) ->afterGettingFrame(frameSize, numTruncatedBytes, presentationTime, durationInMicroseconds); } void PresentationTimeSubsessionNormalizer::afterGettingFrame(unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime, unsigned durationInMicroseconds) { // This filter is implemented by passing all frames through unchanged, except that "fPresentationTime" is changed: fFrameSize = frameSize; fNumTruncatedBytes = numTruncatedBytes; fDurationInMicroseconds = durationInMicroseconds; fParent.normalizePresentationTime(this, fPresentationTime, presentationTime); // Hack for JPEG/RTP proxying. Because we're proxying JPEG by just copying the raw JPEG/RTP payloads, without interpreting them, // we need to also 'copy' the RTP 'M' (marker) bit from the "RTPSource" to the "RTPSink": if (fRTPSource->curPacketMarkerBit() && strcmp(fCodecName, "JPEG") == 0) ((SimpleRTPSink*)fRTPSink)->setMBitOnNextPacket(); // Complete delivery: FramedSource::afterGetting(this); } void PresentationTimeSubsessionNormalizer::doGetNextFrame() { fInputSource->getNextFrame(fTo, fMaxSize, afterGettingFrame, this, FramedSource::handleClosure, this); } live/liveMedia/MPEG1or2VideoFileServerMediaSubsession.cpp000444 001751 000000 00000005132 12265042432 023573 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A 'ServerMediaSubsession' object that creates new, unicast, "RTPSink"s // on demand, from a MPEG-1 or 2 Elementary Stream video file. // Implementation #include "MPEG1or2VideoFileServerMediaSubsession.hh" #include "MPEG1or2VideoRTPSink.hh" #include "ByteStreamFileSource.hh" #include "MPEG1or2VideoStreamFramer.hh" MPEG1or2VideoFileServerMediaSubsession* MPEG1or2VideoFileServerMediaSubsession::createNew(UsageEnvironment& env, char const* fileName, Boolean reuseFirstSource, Boolean iFramesOnly, double vshPeriod) { return new MPEG1or2VideoFileServerMediaSubsession(env, fileName, reuseFirstSource, iFramesOnly, vshPeriod); } MPEG1or2VideoFileServerMediaSubsession ::MPEG1or2VideoFileServerMediaSubsession(UsageEnvironment& env, char const* fileName, Boolean reuseFirstSource, Boolean iFramesOnly, double vshPeriod) : FileServerMediaSubsession(env, fileName, reuseFirstSource), fIFramesOnly(iFramesOnly), fVSHPeriod(vshPeriod) { } MPEG1or2VideoFileServerMediaSubsession ::~MPEG1or2VideoFileServerMediaSubsession() { } FramedSource* MPEG1or2VideoFileServerMediaSubsession ::createNewStreamSource(unsigned /*clientSessionId*/, unsigned& estBitrate) { estBitrate = 500; // kbps, estimate ByteStreamFileSource* fileSource = ByteStreamFileSource::createNew(envir(), fFileName); if (fileSource == NULL) return NULL; fFileSize = fileSource->fileSize(); return MPEG1or2VideoStreamFramer ::createNew(envir(), fileSource, fIFramesOnly, fVSHPeriod); } RTPSink* MPEG1or2VideoFileServerMediaSubsession ::createNewRTPSink(Groupsock* rtpGroupsock, unsigned char /*rtpPayloadTypeIfDynamic*/, FramedSource* /*inputSource*/) { return MPEG1or2VideoRTPSink::createNew(envir(), rtpGroupsock); } live/liveMedia/VorbisAudioMatroskaFileServerMediaSubsession.hh000444 001751 000000 00000004646 12265042432 025126 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A 'ServerMediaSubsession' object that creates new, unicast, "RTPSink"s // on demand, from a Vorbis audio track within a Matroska file. // C++ header #ifndef _VORBIS_AUDIO_MATROSKA_FILE_SERVER_MEDIA_SUBSESSION_HH #define _VORBIS_AUDIO_MATROSKA_FILE_SERVER_MEDIA_SUBSESSION_HH #ifndef _FILE_SERVER_MEDIA_SUBSESSION_HH #include "FileServerMediaSubsession.hh" #endif #ifndef _MATROSKA_FILE_SERVER_DEMUX_HH #include "MatroskaFileServerDemux.hh" #endif class VorbisAudioMatroskaFileServerMediaSubsession: public FileServerMediaSubsession { public: static VorbisAudioMatroskaFileServerMediaSubsession* createNew(MatroskaFileServerDemux& demux, unsigned trackNumber); private: VorbisAudioMatroskaFileServerMediaSubsession(MatroskaFileServerDemux& demux, unsigned trackNumber); // called only by createNew(); virtual ~VorbisAudioMatroskaFileServerMediaSubsession(); private: // redefined virtual functions virtual float duration() const; virtual void seekStreamSource(FramedSource* inputSource, double& seekNPT, double streamDuration, u_int64_t& numBytes); virtual FramedSource* createNewStreamSource(unsigned clientSessionId, unsigned& estBitrate); virtual RTPSink* createNewRTPSink(Groupsock* rtpGroupsock, unsigned char rtpPayloadTypeIfDynamic, FramedSource* inputSource); private: MatroskaFileServerDemux& fOurDemux; unsigned fTrackNumber; u_int8_t* fIdentificationHeader; unsigned fIdentificationHeaderSize; u_int8_t* fCommentHeader; unsigned fCommentHeaderSize; u_int8_t* fSetupHeader; unsigned fSetupHeaderSize; unsigned fEstBitrate; // in kbps }; #endif live/liveMedia/VP8VideoMatroskaFileServerMediaSubsession.cpp000444 001751 000000 00000004767 12265042432 024473 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A 'ServerMediaSubsession' object that creates new, unicast, "RTPSink"s // on demand, from a VP8 Video track within a Matroska file. // Implementation #include "VP8VideoMatroskaFileServerMediaSubsession.hh" #include "VP8VideoRTPSink.hh" #include "MatroskaDemuxedTrack.hh" VP8VideoMatroskaFileServerMediaSubsession* VP8VideoMatroskaFileServerMediaSubsession ::createNew(MatroskaFileServerDemux& demux, unsigned trackNumber) { return new VP8VideoMatroskaFileServerMediaSubsession(demux, trackNumber); } VP8VideoMatroskaFileServerMediaSubsession ::VP8VideoMatroskaFileServerMediaSubsession(MatroskaFileServerDemux& demux, unsigned trackNumber) : FileServerMediaSubsession(demux.envir(), demux.fileName(), False), fOurDemux(demux), fTrackNumber(trackNumber) { } VP8VideoMatroskaFileServerMediaSubsession ::~VP8VideoMatroskaFileServerMediaSubsession() { } float VP8VideoMatroskaFileServerMediaSubsession::duration() const { return fOurDemux.fileDuration(); } void VP8VideoMatroskaFileServerMediaSubsession ::seekStreamSource(FramedSource* inputSource, double& seekNPT, double /*streamDuration*/, u_int64_t& /*numBytes*/) { ((MatroskaDemuxedTrack*)inputSource)->seekToTime(seekNPT); } FramedSource* VP8VideoMatroskaFileServerMediaSubsession ::createNewStreamSource(unsigned clientSessionId, unsigned& estBitrate) { estBitrate = 500; // kbps, estimate return fOurDemux.newDemuxedTrack(clientSessionId, fTrackNumber); } RTPSink* VP8VideoMatroskaFileServerMediaSubsession ::createNewRTPSink(Groupsock* rtpGroupsock, unsigned char rtpPayloadTypeIfDynamic, FramedSource* /*inputSource*/) { return VP8VideoRTPSink::createNew(envir(), rtpGroupsock, rtpPayloadTypeIfDynamic); } live/liveMedia/H265VideoStreamFramer.cpp000444 001751 000000 00000003107 12265042432 020267 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A filter that breaks up a H.265 Video Elementary Stream into NAL units. // Implementation #include "H265VideoStreamFramer.hh" H265VideoStreamFramer* H265VideoStreamFramer ::createNew(UsageEnvironment& env, FramedSource* inputSource, Boolean includeStartCodeInOutput) { return new H265VideoStreamFramer(env, inputSource, True, includeStartCodeInOutput); } H265VideoStreamFramer ::H265VideoStreamFramer(UsageEnvironment& env, FramedSource* inputSource, Boolean createParser, Boolean includeStartCodeInOutput) : H264or5VideoStreamFramer(265, env, inputSource, createParser, includeStartCodeInOutput) { } H265VideoStreamFramer::~H265VideoStreamFramer() { } Boolean H265VideoStreamFramer::isH265VideoStreamFramer() const { return True; } live/liveMedia/H265VideoStreamDiscreteFramer.cpp000444 001751 000000 00000003227 12265042432 021755 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A simplified version of "H265VideoStreamFramer" that takes only complete, // discrete frames (rather than an arbitrary byte stream) as input. // This avoids the parsing and data copying overhead of the full // "H265VideoStreamFramer". // Implementation #include "H265VideoStreamDiscreteFramer.hh" H265VideoStreamDiscreteFramer* H265VideoStreamDiscreteFramer::createNew(UsageEnvironment& env, FramedSource* inputSource) { return new H265VideoStreamDiscreteFramer(env, inputSource); } H265VideoStreamDiscreteFramer ::H265VideoStreamDiscreteFramer(UsageEnvironment& env, FramedSource* inputSource) : H264or5VideoStreamDiscreteFramer(265, env, inputSource) { } H265VideoStreamDiscreteFramer::~H265VideoStreamDiscreteFramer() { } Boolean H265VideoStreamDiscreteFramer::isH265VideoStreamFramer() const { return True; } live/liveMedia/H265VideoRTPSink.cpp000444 001751 000000 00000015565 12265042432 017204 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // RTP sink for H.265 video // Implementation #include "H265VideoRTPSink.hh" #include "H265VideoStreamFramer.hh" #include "Base64.hh" #include "BitVector.hh" #include "H264VideoRTPSource.hh" // for "parseSPropParameterSets()" ////////// H265VideoRTPSink implementation ////////// H265VideoRTPSink ::H265VideoRTPSink(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, u_int8_t const* vps, unsigned vpsSize, u_int8_t const* sps, unsigned spsSize, u_int8_t const* pps, unsigned ppsSize) : H264or5VideoRTPSink(265, env, RTPgs, rtpPayloadFormat, vps, vpsSize, sps, spsSize, pps, ppsSize) { } H265VideoRTPSink::~H265VideoRTPSink() { } H265VideoRTPSink* H265VideoRTPSink ::createNew(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat) { return new H265VideoRTPSink(env, RTPgs, rtpPayloadFormat); } H265VideoRTPSink* H265VideoRTPSink ::createNew(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, u_int8_t const* vps, unsigned vpsSize, u_int8_t const* sps, unsigned spsSize, u_int8_t const* pps, unsigned ppsSize) { return new H265VideoRTPSink(env, RTPgs, rtpPayloadFormat, vps, vpsSize, sps, spsSize, pps, ppsSize); } H265VideoRTPSink* H265VideoRTPSink ::createNew(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, char const* sPropVPSStr, char const* sPropSPSStr, char const* sPropPPSStr) { u_int8_t* vps = NULL; unsigned vpsSize = 0; u_int8_t* sps = NULL; unsigned spsSize = 0; u_int8_t* pps = NULL; unsigned ppsSize = 0; // Parse each 'sProp' string, extracting and then classifying the NAL unit(s) from each one. // We're 'liberal in what we accept'; it's OK if the strings don't contain the NAL unit type // implied by their names (or if one or more of the strings encode multiple NAL units). SPropRecord* sPropRecords[3]; unsigned numSPropRecords[3]; sPropRecords[0] = parseSPropParameterSets(sPropVPSStr, numSPropRecords[0]); sPropRecords[1] = parseSPropParameterSets(sPropSPSStr, numSPropRecords[1]); sPropRecords[2] = parseSPropParameterSets(sPropPPSStr, numSPropRecords[2]); for (unsigned j = 0; j < 3; ++j) { SPropRecord* records = sPropRecords[j]; unsigned numRecords = numSPropRecords[j]; for (unsigned i = 0; i < numRecords; ++i) { if (records[i].sPropLength == 0) continue; // bad data u_int8_t nal_unit_type = ((records[i].sPropBytes[0])&0x7E)>>1; if (nal_unit_type == 32/*VPS*/) { vps = records[i].sPropBytes; vpsSize = records[i].sPropLength; } else if (nal_unit_type == 33/*SPS*/) { sps = records[i].sPropBytes; spsSize = records[i].sPropLength; } else if (nal_unit_type == 34/*PPS*/) { pps = records[i].sPropBytes; ppsSize = records[i].sPropLength; } } } H265VideoRTPSink* result = new H265VideoRTPSink(env, RTPgs, rtpPayloadFormat, vps, vpsSize, sps, spsSize, pps, ppsSize); delete[] sPropRecords[0]; delete[] sPropRecords[1]; delete[] sPropRecords[2]; return result; } Boolean H265VideoRTPSink::sourceIsCompatibleWithUs(MediaSource& source) { // Our source must be an appropriate framer: return source.isH265VideoStreamFramer(); } char const* H265VideoRTPSink::auxSDPLine() { // Generate a new "a=fmtp:" line each time, using our VPS, SPS and PPS (if we have them), // otherwise parameters from our framer source (in case they've changed since the last time that // we were called): H264or5VideoStreamFramer* framerSource = NULL; u_int8_t* vps = fVPS; unsigned vpsSize = fVPSSize; u_int8_t* sps = fSPS; unsigned spsSize = fSPSSize; u_int8_t* pps = fPPS; unsigned ppsSize = fPPSSize; if (vps == NULL || sps == NULL || pps == NULL) { // We need to get VPS, SPS and PPS from our framer source: if (fOurFragmenter == NULL) return NULL; // we don't yet have a fragmenter (and therefore not a source) framerSource = (H264or5VideoStreamFramer*)(fOurFragmenter->inputSource()); if (framerSource == NULL) return NULL; // we don't yet have a source framerSource->getVPSandSPSandPPS(vps, vpsSize, sps, spsSize, pps, ppsSize); if (vps == NULL || sps == NULL || pps == NULL) return NULL; // our source isn't ready } // Set up the "a=fmtp:" SDP line for this stream. // First, extract from our 'profile_tier_level' bytes (that were set by our upstream 'framer') // several parameters that we'll put in this line: u_int8_t const* profileTierLevelHeaderBytes = framerSource->profileTierLevelHeaderBytes(); unsigned profile_space = profileTierLevelHeaderBytes[0]>>6; // general_profile_space unsigned profile_id = profileTierLevelHeaderBytes[0]&0x1F; // general_profile_idc unsigned tier_flag = (profileTierLevelHeaderBytes[0]>>5)&0x1; // general_tier_flag unsigned level_id = profileTierLevelHeaderBytes[11]; // general_level_idc u_int8_t const* interop_constraints = &profileTierLevelHeaderBytes[5]; char* sprop_vps = base64Encode((char*)vps, vpsSize); char* sprop_sps = base64Encode((char*)sps, spsSize); char* sprop_pps = base64Encode((char*)pps, ppsSize); char const* fmtpFmt = "a=fmtp:%d profile-space=%u" ";profile-id=%u" ";tier-flag=%u" ";level-id=%u" ";interop-constraints=%02X%02X%02X%02X%02X%02X" ";tx-mode=SST" ";sprop-vps=%s" ";sprop-sps=%s" ";sprop-pps=%s\r\n"; unsigned fmtpFmtSize = strlen(fmtpFmt) + 3 /* max num chars: rtpPayloadType */ + 1 /* num chars: profile_space */ + 2 /* max num chars: profile_id */ + 1 /* num chars: tier_flag */ + 3 /* max num chars: level_id */ + 12 /* num chars: interop_constraints */ + strlen(sprop_vps) + strlen(sprop_sps) + strlen(sprop_pps); char* fmtp = new char[fmtpFmtSize]; sprintf(fmtp, fmtpFmt, rtpPayloadType(), profile_space, profile_id, tier_flag, level_id, interop_constraints[0], interop_constraints[1], interop_constraints[2], interop_constraints[3], interop_constraints[4], interop_constraints[5], sprop_vps, sprop_sps, sprop_pps); delete[] sprop_vps; delete[] sprop_sps; delete[] sprop_pps; delete[] fFmtpSDPLine; fFmtpSDPLine = fmtp; return fFmtpSDPLine; } live/liveMedia/ByteStreamMemoryBufferSource.cpp000444 001751 000000 00000010507 12265042432 022130 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A class for streaming data from a (static) memory buffer, as if it were a file. // Implementation #include "ByteStreamMemoryBufferSource.hh" #include "GroupsockHelper.hh" ////////// ByteStreamMemoryBufferSource ////////// ByteStreamMemoryBufferSource* ByteStreamMemoryBufferSource::createNew(UsageEnvironment& env, u_int8_t* buffer, u_int64_t bufferSize, Boolean deleteBufferOnClose, unsigned preferredFrameSize, unsigned playTimePerFrame) { if (buffer == NULL) return NULL; return new ByteStreamMemoryBufferSource(env, buffer, bufferSize, deleteBufferOnClose, preferredFrameSize, playTimePerFrame); } ByteStreamMemoryBufferSource::ByteStreamMemoryBufferSource(UsageEnvironment& env, u_int8_t* buffer, u_int64_t bufferSize, Boolean deleteBufferOnClose, unsigned preferredFrameSize, unsigned playTimePerFrame) : FramedSource(env), fBuffer(buffer), fBufferSize(bufferSize), fCurIndex(0), fDeleteBufferOnClose(deleteBufferOnClose), fPreferredFrameSize(preferredFrameSize), fPlayTimePerFrame(playTimePerFrame), fLastPlayTime(0), fLimitNumBytesToStream(False), fNumBytesToStream(0) { } ByteStreamMemoryBufferSource::~ByteStreamMemoryBufferSource() { if (fDeleteBufferOnClose) delete[] fBuffer; } void ByteStreamMemoryBufferSource::seekToByteAbsolute(u_int64_t byteNumber, u_int64_t numBytesToStream) { fCurIndex = byteNumber; if (fCurIndex > fBufferSize) fCurIndex = fBufferSize; fNumBytesToStream = numBytesToStream; fLimitNumBytesToStream = fNumBytesToStream > 0; } void ByteStreamMemoryBufferSource::seekToByteRelative(int64_t offset) { int64_t newIndex = fCurIndex + offset; if (newIndex < 0) { fCurIndex = 0; } else { fCurIndex = (u_int64_t)offset; if (fCurIndex > fBufferSize) fCurIndex = fBufferSize; } } void ByteStreamMemoryBufferSource::doGetNextFrame() { if (fCurIndex >= fBufferSize || (fLimitNumBytesToStream && fNumBytesToStream == 0)) { handleClosure(this); return; } // Try to read as many bytes as will fit in the buffer provided (or "fPreferredFrameSize" if less) fFrameSize = fMaxSize; if (fLimitNumBytesToStream && fNumBytesToStream < (u_int64_t)fFrameSize) { fFrameSize = (unsigned)fNumBytesToStream; } if (fPreferredFrameSize > 0 && fPreferredFrameSize < fFrameSize) { fFrameSize = fPreferredFrameSize; } if (fCurIndex + fFrameSize > fBufferSize) { fFrameSize = (unsigned)(fBufferSize - fCurIndex); } memmove(fTo, &fBuffer[fCurIndex], fFrameSize); fCurIndex += fFrameSize; fNumBytesToStream -= fFrameSize; // Set the 'presentation time': if (fPlayTimePerFrame > 0 && fPreferredFrameSize > 0) { if (fPresentationTime.tv_sec == 0 && fPresentationTime.tv_usec == 0) { // This is the first frame, so use the current time: gettimeofday(&fPresentationTime, NULL); } else { // Increment by the play time of the previous data: unsigned uSeconds = fPresentationTime.tv_usec + fLastPlayTime; fPresentationTime.tv_sec += uSeconds/1000000; fPresentationTime.tv_usec = uSeconds%1000000; } // Remember the play time of this data: fLastPlayTime = (fPlayTimePerFrame*fFrameSize)/fPreferredFrameSize; fDurationInMicroseconds = fLastPlayTime; } else { // We don't know a specific play time duration for this data, // so just record the current time as being the 'presentation time': gettimeofday(&fPresentationTime, NULL); } // Inform the downstream object that it has data: FramedSource::afterGetting(this); } live/liveMedia/H265VideoMatroskaFileServerMediaSubsession.cpp000444 001751 000000 00000014610 12265042432 024466 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A 'ServerMediaSubsession' object that creates new, unicast, "RTPSink"s // on demand, from an H265 video track within a Matroska file. // Implementation #include "H265VideoMatroskaFileServerMediaSubsession.hh" #include "H265VideoStreamDiscreteFramer.hh" #include "MatroskaDemuxedTrack.hh" H265VideoMatroskaFileServerMediaSubsession* H265VideoMatroskaFileServerMediaSubsession ::createNew(MatroskaFileServerDemux& demux, unsigned trackNumber) { return new H265VideoMatroskaFileServerMediaSubsession(demux, trackNumber); } #define CHECK_PTR if (ptr >= limit) return #define NUM_BYTES_REMAINING (unsigned)(limit - ptr) H265VideoMatroskaFileServerMediaSubsession ::H265VideoMatroskaFileServerMediaSubsession(MatroskaFileServerDemux& demux, unsigned trackNumber) : H265VideoFileServerMediaSubsession(demux.envir(), demux.fileName(), False), fOurDemux(demux), fTrackNumber(trackNumber), fVPSSize(0), fVPS(NULL), fSPSSize(0), fSPS(NULL), fPPSSize(0), fPPS(NULL) { // Our track's 'Codec Private' data should contain VPS, SPS, and PPS NAL units. Copy these: unsigned numVPS_SPS_PPSBytes = 0; u_int8_t* VPS_SPS_PPSBytes = NULL; MatroskaTrack* track = fOurDemux.lookup(fTrackNumber); if (track->codecPrivateUsesH264FormatForH265) { // The data uses the H.264-style format (but including VPS NAL unit(s)). // The VPS,SPS,PPS NAL unit information starts at byte #5: if (track->codecPrivateSize >= 6) { numVPS_SPS_PPSBytes = track->codecPrivateSize - 5; VPS_SPS_PPSBytes = &track->codecPrivate[5]; } } else { // The data uses the proper H.265-style format. // The VPS,SPS,PPS NAL unit information starts at byte #22: if (track->codecPrivateSize >= 23) { numVPS_SPS_PPSBytes = track->codecPrivateSize - 22; VPS_SPS_PPSBytes = &track->codecPrivate[22]; } } // Extract, from "VPS_SPS_PPSBytes", one VPS NAL unit, one SPS NAL unit, and one PPS NAL unit. // (I hope one is all we need of each.) if (numVPS_SPS_PPSBytes == 0 || VPS_SPS_PPSBytes == NULL) return; // sanity check unsigned i; u_int8_t* ptr = VPS_SPS_PPSBytes; u_int8_t* limit = &VPS_SPS_PPSBytes[numVPS_SPS_PPSBytes]; if (track->codecPrivateUsesH264FormatForH265) { // The data uses the H.264-style format (but including VPS NAL unit(s)). while (NUM_BYTES_REMAINING > 0) { unsigned numNALUnits = (*ptr++)&0x1F; CHECK_PTR; for (i = 0; i < numNALUnits; ++i) { unsigned nalUnitLength = (*ptr++)<<8; CHECK_PTR; nalUnitLength |= *ptr++; CHECK_PTR; if (nalUnitLength > NUM_BYTES_REMAINING) return; u_int8_t nal_unit_type = (ptr[0]&0x7E)>>1; if (nal_unit_type == 32) { // VPS fVPSSize = nalUnitLength; delete[] fVPS; fVPS = new u_int8_t[nalUnitLength]; memmove(fVPS, ptr, nalUnitLength); } else if (nal_unit_type == 33) { // SPS fSPSSize = nalUnitLength; delete[] fSPS; fSPS = new u_int8_t[nalUnitLength]; memmove(fSPS, ptr, nalUnitLength); } else if (nal_unit_type == 34) { // PPS fPPSSize = nalUnitLength; delete[] fPPS; fPPS = new u_int8_t[nalUnitLength]; memmove(fPPS, ptr, nalUnitLength); } ptr += nalUnitLength; } } } else { // The data uses the proper H.265-style format. unsigned numOfArrays = *ptr++; CHECK_PTR; for (unsigned j = 0; j < numOfArrays; ++j) { ++ptr; CHECK_PTR; // skip the 'array_completeness'|'reserved'|'NAL_unit_type' byte unsigned numNalus = (*ptr++)<<8; CHECK_PTR; numNalus |= *ptr++; CHECK_PTR; for (i = 0; i < numNalus; ++i) { unsigned nalUnitLength = (*ptr++)<<8; CHECK_PTR; nalUnitLength |= *ptr++; CHECK_PTR; if (nalUnitLength > NUM_BYTES_REMAINING) return; u_int8_t nal_unit_type = (ptr[0]&0x7E)>>1; if (nal_unit_type == 32) { // VPS fVPSSize = nalUnitLength; delete[] fVPS; fVPS = new u_int8_t[nalUnitLength]; memmove(fVPS, ptr, nalUnitLength); } else if (nal_unit_type == 33) { // SPS fSPSSize = nalUnitLength; delete[] fSPS; fSPS = new u_int8_t[nalUnitLength]; memmove(fSPS, ptr, nalUnitLength); } else if (nal_unit_type == 34) { // PPS fPPSSize = nalUnitLength; delete[] fPPS; fPPS = new u_int8_t[nalUnitLength]; memmove(fPPS, ptr, nalUnitLength); } ptr += nalUnitLength; } } } } H265VideoMatroskaFileServerMediaSubsession ::~H265VideoMatroskaFileServerMediaSubsession() { delete[] fVPS; delete[] fSPS; delete[] fPPS; } float H265VideoMatroskaFileServerMediaSubsession::duration() const { return fOurDemux.fileDuration(); } void H265VideoMatroskaFileServerMediaSubsession ::seekStreamSource(FramedSource* inputSource, double& seekNPT, double /*streamDuration*/, u_int64_t& /*numBytes*/) { // "inputSource" is a framer. *Its* source is the demuxed track that we seek on: H265VideoStreamDiscreteFramer* framer = (H265VideoStreamDiscreteFramer*)inputSource; MatroskaDemuxedTrack* demuxedTrack = (MatroskaDemuxedTrack*)(framer->inputSource()); demuxedTrack->seekToTime(seekNPT); } FramedSource* H265VideoMatroskaFileServerMediaSubsession ::createNewStreamSource(unsigned clientSessionId, unsigned& estBitrate) { // Allow for the possibility of very large NAL units being fed to our "RTPSink" objects: OutPacketBuffer::maxSize = 300000; // bytes estBitrate = 500; // kbps, estimate // Create the video source: FramedSource* baseH265VideoSource = fOurDemux.newDemuxedTrack(clientSessionId, fTrackNumber); if (baseH265VideoSource == NULL) return NULL; // Create a framer for the Video stream: H265VideoStreamDiscreteFramer* framer = H265VideoStreamDiscreteFramer::createNew(envir(), baseH265VideoSource); framer->setVPSandSPSandPPS(fVPS, fVPSSize, fSPS, fSPSSize, fPPS, fPPSSize); return framer; } live/liveMedia/include/MPEG1or2VideoRTPSource.hh000444 001751 000000 00000003543 12265042432 021601 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // MPEG-1 or MPEG-2 Video RTP Sources // C++ header #ifndef _MPEG_1OR2_VIDEO_RTP_SOURCE_HH #define _MPEG_1OR2_VIDEO_RTP_SOURCE_HH #ifndef _MULTI_FRAMED_RTP_SOURCE_HH #include "MultiFramedRTPSource.hh" #endif class MPEG1or2VideoRTPSource: public MultiFramedRTPSource { public: static MPEG1or2VideoRTPSource* createNew(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat = 32, unsigned rtpPayloadFrequency = 90000); protected: virtual ~MPEG1or2VideoRTPSource(); private: MPEG1or2VideoRTPSource(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, unsigned rtpTimestampFrequency); // called only by createNew() private: // redefined virtual functions: virtual Boolean processSpecialHeader(BufferedPacket* packet, unsigned& resultSpecialHeaderSize); virtual Boolean packetIsUsableInJitterCalculation(unsigned char* packet, unsigned packetSize); virtual char const* MIMEtype() const; }; #endif live/liveMedia/include/FramedFileSource.hh000444 001751 000000 00000002310 12265042432 020755 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // Framed File Sources // C++ header #ifndef _FRAMED_FILE_SOURCE_HH #define _FRAMED_FILE_SOURCE_HH #ifndef _FRAMED_SOURCE_HH #include "FramedSource.hh" #endif class FramedFileSource: public FramedSource { protected: FramedFileSource(UsageEnvironment& env, FILE* fid); // abstract base class virtual ~FramedFileSource(); protected: FILE* fFid; }; #endif live/liveMedia/include/FramedFilter.hh000444 001751 000000 00000003270 12265042432 020150 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // Framed Filters // C++ header #ifndef _FRAMED_FILTER_HH #define _FRAMED_FILTER_HH #ifndef _FRAMED_SOURCE_HH #include "FramedSource.hh" #endif class FramedFilter: public FramedSource { public: FramedSource* inputSource() const { return fInputSource; } void reassignInputSource(FramedSource* newInputSource) { fInputSource = newInputSource; } // Call before destruction if you want to prevent the destructor from closing the input source void detachInputSource(); protected: FramedFilter(UsageEnvironment& env, FramedSource* inputSource); // abstract base class virtual ~FramedFilter(); protected: // Redefined virtual functions (with default 'null' implementations): virtual char const* MIMEtype() const; virtual void getAttributes() const; virtual void doStopGettingFrames(); protected: FramedSource* fInputSource; }; #endif live/liveMedia/include/FramedSource.hh000444 001751 000000 00000005757 12265042432 020177 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // Framed Sources // C++ header #ifndef _FRAMED_SOURCE_HH #define _FRAMED_SOURCE_HH #ifndef _NET_COMMON_H #include "NetCommon.h" #endif #ifndef _MEDIA_SOURCE_HH #include "MediaSource.hh" #endif class FramedSource: public MediaSource { public: static Boolean lookupByName(UsageEnvironment& env, char const* sourceName, FramedSource*& resultSource); typedef void (afterGettingFunc)(void* clientData, unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime, unsigned durationInMicroseconds); typedef void (onCloseFunc)(void* clientData); void getNextFrame(unsigned char* to, unsigned maxSize, afterGettingFunc* afterGettingFunc, void* afterGettingClientData, onCloseFunc* onCloseFunc, void* onCloseClientData); static void handleClosure(void* clientData); // This should be called (on ourself) if the source is discovered // to be closed (i.e., no longer readable) void stopGettingFrames(); virtual unsigned maxFrameSize() const; // size of the largest possible frame that we may serve, or 0 // if no such maximum is known (default) virtual void doGetNextFrame() = 0; // called by getNextFrame() Boolean isCurrentlyAwaitingData() const {return fIsCurrentlyAwaitingData;} static void afterGetting(FramedSource* source); // doGetNextFrame() should arrange for this to be called after the // frame has been read (*iff* it is read successfully) protected: FramedSource(UsageEnvironment& env); // abstract base class virtual ~FramedSource(); virtual void doStopGettingFrames(); protected: // The following variables are typically accessed/set by doGetNextFrame() unsigned char* fTo; // in unsigned fMaxSize; // in unsigned fFrameSize; // out unsigned fNumTruncatedBytes; // out struct timeval fPresentationTime; // out unsigned fDurationInMicroseconds; // out private: // redefined virtual functions: virtual Boolean isFramedSource() const; private: afterGettingFunc* fAfterGettingFunc; void* fAfterGettingClientData; onCloseFunc* fOnCloseFunc; void* fOnCloseClientData; Boolean fIsCurrentlyAwaitingData; }; #endif live/liveMedia/include/H263plusVideoRTPSource.hh000444 001751 000000 00000004054 12265042432 021671 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // H.263+ Video RTP Sources // C++ header #ifndef _H263_PLUS_VIDEO_RTP_SOURCE_HH #define _H263_PLUS_VIDEO_RTP_SOURCE_HH #ifndef _MULTI_FRAMED_RTP_SOURCE_HH #include "MultiFramedRTPSource.hh" #endif #define SPECIAL_HEADER_BUFFER_SIZE 1000 class H263plusVideoRTPSource: public MultiFramedRTPSource { public: static H263plusVideoRTPSource* createNew(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, unsigned rtpTimestampFrequency = 90000); // A data structure that stores copies of the special header bytes // from the most recent frame's RTP packets: unsigned char fNumSpecialHeaders; unsigned fSpecialHeaderBytesLength; unsigned char fSpecialHeaderBytes[SPECIAL_HEADER_BUFFER_SIZE]; unsigned fPacketSizes[256]; protected: virtual ~H263plusVideoRTPSource(); private: H263plusVideoRTPSource(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, unsigned rtpTimestampFrequency); // called only by createNew() private: // redefined virtual functions: virtual Boolean processSpecialHeader(BufferedPacket* packet, unsigned& resultSpecialHeaderSize); virtual char const* MIMEtype() const; }; #endif live/liveMedia/include/Locale.hh000444 001751 000000 00000005033 12265042432 017002 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // Support for temporarily setting the locale (e.g., to "C" or "POSIX") for (e.g.) parsing or printing // floating-point numbers in protocol headers, or calling toupper()/tolower() on human-input strings. // C++ header #ifndef _LOCALE_HH #define _LOCALE_HH // If you're on a system that (for whatever reason) doesn't have either the "setlocale()" or the "newlocale()" function, then // add "-DLOCALE_NOT_USED" to your "config.*" file. // If you're on a system that (for whatever reason) has "setlocale()" but not "newlocale()", then // add "-DXLOCALE_NOT_USED" to your "config.*" file. // (Note that -DLOCALE_NOT_USED implies -DXLOCALE_NOT_USED; you do not need both.) // Also, for Windows systems, we define "XLOCALE_NOT_USED" by default, because at least some Windows systems // (or their development environments) don't have "newlocale()". If, however, your Windows system *does* have "newlocale()", // then you can override this by defining "XLOCALE_USED" before #including this file. #ifdef XLOCALE_USED #undef LOCALE_NOT_USED #undef XLOCALE_NOT_USED #else #if defined(__WIN32__) || defined(_WIN32) #define XLOCALE_NOT_USED 1 #endif #endif #ifndef LOCALE_NOT_USED #include #ifndef XLOCALE_NOT_USED #include // because, on some systems, doesn't include ; this makes sure that we get both #endif #endif enum LocaleCategory { All, Numeric }; // define and implement more categories later, as needed class Locale { public: Locale(char const* newLocale, LocaleCategory category = All); virtual ~Locale(); private: #ifndef LOCALE_NOT_USED #ifndef XLOCALE_NOT_USED locale_t fLocale, fPrevLocale; #else int fCategoryNum; char* fPrevLocale; #endif #endif }; #endif live/liveMedia/include/MP3ADU.hh000444 001751 000000 00000005721 12265042432 016540 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // 'ADU' MP3 streams (for improved loss-tolerance) // C++ header #ifndef _MP3_ADU_HH #define _MP3_ADU_HH #ifndef _FRAMED_FILTER_HH #include "FramedFilter.hh" #endif class ADUFromMP3Source: public FramedFilter { public: static ADUFromMP3Source* createNew(UsageEnvironment& env, FramedSource* inputSource, Boolean includeADUdescriptors = True); void resetInput(); // This is called whenever there's a discontinuity in the input MP3 source // (e.g., due to seeking within the source). It causes any still-unprocessed // MP3 frame data within our queue to be discarded, so that it does not // erroneously get used by backpointers from the new MP3 frames. Boolean setScaleFactor(int scale); protected: ADUFromMP3Source(UsageEnvironment& env, FramedSource* inputSource, Boolean includeADUdescriptors); // called only by createNew() virtual ~ADUFromMP3Source(); private: // Redefined virtual functions: virtual void doGetNextFrame(); virtual char const* MIMEtype() const; private: Boolean doGetNextFrame1(); private: Boolean fAreEnqueueingMP3Frame; class SegmentQueue* fSegments; Boolean fIncludeADUdescriptors; unsigned fTotalDataSizeBeforePreviousRead; int fScale; unsigned fFrameCounter; }; class MP3FromADUSource: public FramedFilter { public: static MP3FromADUSource* createNew(UsageEnvironment& env, FramedSource* inputSource, Boolean includeADUdescriptors = True); protected: MP3FromADUSource(UsageEnvironment& env, FramedSource* inputSource, Boolean includeADUdescriptors); // called only by createNew() virtual ~MP3FromADUSource(); private: // Redefined virtual functions: virtual void doGetNextFrame(); virtual char const* MIMEtype() const; private: Boolean needToGetAnADU(); void insertDummyADUsIfNecessary(); Boolean generateFrameFromHeadADU(); private: Boolean fAreEnqueueingADU; class SegmentQueue* fSegments; }; // Definitions of external C functions that implement various MP3 operations: extern "C" int mp3ZeroOutSideInfo(unsigned char*, unsigned, unsigned); #endif live/liveMedia/include/MP3ADURTPSink.hh000444 001751 000000 00000003565 12265042432 017757 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // RTP sink for 'ADUized' MP3 frames ("mpa-robust") // C++ header #ifndef _MP3_ADU_RTP_SINK_HH #define _MP3_ADU_RTP_SINK_HH #ifndef _AUDIO_RTP_SINK_HH #include "AudioRTPSink.hh" #endif class MP3ADURTPSink: public AudioRTPSink { public: static MP3ADURTPSink* createNew(UsageEnvironment& env, Groupsock* RTPgs, unsigned char RTPPayloadType); protected: virtual ~MP3ADURTPSink(); private: MP3ADURTPSink(UsageEnvironment& env, Groupsock* RTPgs, unsigned char RTPPayloadType); // called only by createNew() private: // Redefined virtual functions: virtual void doSpecialFrameHandling(unsigned fragmentationOffset, unsigned char* frameStart, unsigned numBytesInFrame, struct timeval framePresentationTime, unsigned numRemainingBytes); virtual unsigned specialHeaderSize() const; private: unsigned fCurADUSize; // used when fragmenting over multiple RTP packets }; #endif live/liveMedia/include/MP3ADURTPSource.hh000444 001751 000000 00000003101 12265042432 020275 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // RTP source for 'ADUized' MP3 frames ("mpa-robust") // C++ header #ifndef _MP3_ADU_SOURCE_HH #define _MP3_ADU_SOURCE_HH #ifndef _MULTI_FRAMED_RTP_SOURCE_HH #include "MultiFramedRTPSource.hh" #endif class MP3ADURTPSource: public MultiFramedRTPSource { public: static MP3ADURTPSource* createNew(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, unsigned rtpTimestampFrequency = 90000); protected: virtual ~MP3ADURTPSource(); private: MP3ADURTPSource(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, unsigned rtpTimestampFrequency); // called only by createNew() private: // redefined virtual functions: virtual char const* MIMEtype() const; }; #endif live/liveMedia/include/MP3ADUTranscoder.hh000444 001751 000000 00000004075 12265042432 020566 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // Transcoder for ADUized MP3 frames // C++ header #ifndef _MP3_ADU_TRANSCODER_HH #define _MP3_ADU_TRANSCODER_HH #ifndef _FRAMED_FILTER_HH #include "FramedFilter.hh" #endif class MP3ADUTranscoder: public FramedFilter { public: static MP3ADUTranscoder* createNew(UsageEnvironment& env, unsigned outBitrate /* in kbps */, FramedSource* inputSource); unsigned outBitrate() const { return fOutBitrate; } protected: MP3ADUTranscoder(UsageEnvironment& env, unsigned outBitrate /* in kbps */, FramedSource* inputSource); // called only by createNew() virtual ~MP3ADUTranscoder(); private: // redefined virtual functions: virtual void doGetNextFrame(); virtual void getAttributes() const; private: static void afterGettingFrame(void* clientData, unsigned numBytesRead, unsigned numTruncatedBytes, struct timeval presentationTime, unsigned durationInMicroseconds); void afterGettingFrame1(unsigned numBytesRead, unsigned numTruncatedBytes, struct timeval presentationTime, unsigned durationInMicroseconds); private: unsigned fOutBitrate; // in kbps unsigned fAvailableBytesForBackpointer; unsigned char* fOrigADU; // used to store incoming ADU prior to transcoding }; #endif live/liveMedia/include/MP3ADUinterleaving.hh000444 001751 000000 00000007401 12265042432 021145 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // Interleaving of MP3 ADUs // C++ header #ifndef _MP3_ADU_INTERLEAVING_HH #define _MP3_ADU_INTERLEAVING_HH #ifndef _FRAMED_FILTER_HH #include "FramedFilter.hh" #endif // A data structure used to represent an interleaving #define MAX_CYCLE_SIZE 256 class Interleaving { public: Interleaving(unsigned cycleSize, unsigned char const* cycleArray); virtual ~Interleaving(); unsigned cycleSize() const {return fCycleSize;} unsigned char lookupInverseCycle(unsigned char index) const { return fInverseCycle[index]; } private: unsigned fCycleSize; unsigned char fInverseCycle[MAX_CYCLE_SIZE]; }; // This class is used only as a base for the following two: class MP3ADUinterleaverBase: public FramedFilter { protected: MP3ADUinterleaverBase(UsageEnvironment& env, FramedSource* inputSource); // abstract base class virtual ~MP3ADUinterleaverBase(); static FramedSource* getInputSource(UsageEnvironment& env, char const* inputSourceName); static void afterGettingFrame(void* clientData, unsigned numBytesRead, unsigned numTruncatedBytes, struct timeval presentationTime, unsigned durationInMicroseconds); virtual void afterGettingFrame(unsigned numBytesRead, struct timeval presentationTime, unsigned durationInMicroseconds) = 0; }; // This class is used to convert an ADU sequence from non-interleaved // to interleaved form: class MP3ADUinterleaver: public MP3ADUinterleaverBase { public: static MP3ADUinterleaver* createNew(UsageEnvironment& env, Interleaving const& interleaving, FramedSource* inputSource); protected: MP3ADUinterleaver(UsageEnvironment& env, Interleaving const& interleaving, FramedSource* inputSource); // called only by createNew() virtual ~MP3ADUinterleaver(); private: // redefined virtual functions: virtual void doGetNextFrame(); virtual void afterGettingFrame(unsigned numBytesRead, struct timeval presentationTime, unsigned durationInMicroseconds); private: void releaseOutgoingFrame(); private: Interleaving const fInterleaving; class InterleavingFrames* fFrames; unsigned char fPositionOfNextIncomingFrame; unsigned fII, fICC; }; // This class is used to convert an ADU sequence from interleaved // to non-interleaved form: class MP3ADUdeinterleaver: public MP3ADUinterleaverBase { public: static MP3ADUdeinterleaver* createNew(UsageEnvironment& env, FramedSource* inputSource); protected: MP3ADUdeinterleaver(UsageEnvironment& env, FramedSource* inputSource); // called only by createNew() virtual ~MP3ADUdeinterleaver(); private: // redefined virtual functions: virtual void doGetNextFrame(); virtual void afterGettingFrame(unsigned numBytesRead, struct timeval presentationTime, unsigned durationInMicroseconds); private: void releaseOutgoingFrame(); private: class DeinterleavingFrames* fFrames; unsigned fIIlastSeen, fICClastSeen; }; #endif live/liveMedia/include/MP3FileSource.hh000444 001751 000000 00000004235 12265042432 020166 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // MP3 File Sources // C++ header #ifndef _MP3_FILE_SOURCE_HH #define _MP3_FILE_SOURCE_HH #ifndef _FRAMED_FILE_SOURCE_HH #include "FramedFileSource.hh" #endif class MP3StreamState; // forward class MP3FileSource: public FramedFileSource { public: static MP3FileSource* createNew(UsageEnvironment& env, char const* fileName); float filePlayTime() const; unsigned fileSize() const; void setPresentationTimeScale(unsigned scale); void seekWithinFile(double seekNPT, double streamDuration); // if "streamDuration" is >0.0, then we limit the stream to that duration, before treating it as EOF protected: MP3FileSource(UsageEnvironment& env, FILE* fid); // called only by createNew() virtual ~MP3FileSource(); protected: void assignStream(FILE* fid, unsigned filesize); Boolean initializeStream(); MP3StreamState* streamState() {return fStreamState;} private: // redefined virtual functions: virtual void doGetNextFrame(); virtual char const* MIMEtype() const; virtual void getAttributes() const; private: virtual Boolean doGetNextFrame1(); private: MP3StreamState* fStreamState; Boolean fHaveJustInitialized; struct timeval fFirstFramePresentationTime; // set on stream init Boolean fLimitNumBytesToStream; unsigned fNumBytesToStream; // used iff "fLimitNumBytesToStream" is True }; #endif live/liveMedia/include/DeviceSource.hh000444 001751 000000 00000004430 12265042432 020163 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A template for a MediaSource encapsulating an audio/video input device // // NOTE: Sections of this code labeled "%%% TO BE WRITTEN %%%" are incomplete, and needto be written by the programmer // (depending on the features of the particulardevice). // C++ header #ifndef _DEVICE_SOURCE_HH #define _DEVICE_SOURCE_HH #ifndef _FRAMED_SOURCE_HH #include "FramedSource.hh" #endif // The following class can be used to define specific encoder parameters class DeviceParameters { //%%% TO BE WRITTEN %%% }; class DeviceSource: public FramedSource { public: static DeviceSource* createNew(UsageEnvironment& env, DeviceParameters params); public: static EventTriggerId eventTriggerId; // Note that this is defined here to be a static class variable, because this code is intended to illustrate how to // encapsulate a *single* device - not a set of devices. // You can, however, redefine this to be a non-static member variable. protected: DeviceSource(UsageEnvironment& env, DeviceParameters params); // called only by createNew(), or by subclass constructors virtual ~DeviceSource(); private: // redefined virtual functions: virtual void doGetNextFrame(); //virtual void doStopGettingFrames(); // optional private: static void deliverFrame0(void* clientData); void deliverFrame(); private: static unsigned referenceCount; // used to count how many instances of this class currently exist DeviceParameters fParams; }; #endif live/liveMedia/include/RTSPClient.hh000444 001751 000000 00000051610 12265042432 017534 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A generic RTSP client - for a single "rtsp://" URL // C++ header #ifndef _RTSP_CLIENT_HH #define _RTSP_CLIENT_HH #ifndef _MEDIA_SESSION_HH #include "MediaSession.hh" #endif #ifndef _NET_ADDRESS_HH #include "NetAddress.hh" #endif #ifndef _DIGEST_AUTHENTICATION_HH #include "DigestAuthentication.hh" #endif #ifndef _RTSP_SERVER_HH #include "RTSPServer.hh" // For the optional "HandlerForREGISTERCommand" mini-server #endif class RTSPClient: public Medium { public: static RTSPClient* createNew(UsageEnvironment& env, char const* rtspURL, int verbosityLevel = 0, char const* applicationName = NULL, portNumBits tunnelOverHTTPPortNum = 0, int socketNumToServer = -1); // If "tunnelOverHTTPPortNum" is non-zero, we tunnel RTSP (and RTP) // over a HTTP connection with the given port number, using the technique // described in Apple's document // If "socketNumToServer" is >= 0, then it is the socket number of an already-existing TCP connection to the server. // (In this case, "rtspURL" must point to the socket's endpoint, so that it can be accessed via the socket.) typedef void (responseHandler)(RTSPClient* rtspClient, int resultCode, char* resultString); // A function that is called in response to a RTSP command. The parameters are as follows: // "rtspClient": The "RTSPClient" object on which the original command was issued. // "resultCode": If zero, then the command completed successfully. If non-zero, then the command did not complete // successfully, and "resultCode" indicates the error, as follows: // A positive "resultCode" is a RTSP error code (for example, 404 means "not found") // A negative "resultCode" indicates a socket/network error; 0-"resultCode" is the standard "errno" code. // "resultString": A ('\0'-terminated) string returned along with the response, or else NULL. // In particular: // "resultString" for a successful "DESCRIBE" command will be the media session's SDP description. // "resultString" for a successful "OPTIONS" command will be a list of allowed commands. // Note that this string can be present (i.e., not NULL) even if "resultCode" is non-zero - i.e., an error message. // Also, "resultString" can be NULL, even if "resultCode" is zero (e.g., if the RTSP command succeeded, but without // including an appropriate result header). // Note also that this string is dynamically allocated, and must be freed by the handler (or the caller) // - using "delete[]". unsigned sendDescribeCommand(responseHandler* responseHandler, Authenticator* authenticator = NULL); // Issues a RTSP "DESCRIBE" command, then returns the "CSeq" sequence number that was used in the command. // The (programmer-supplied) "responseHandler" function is called later to handle the response // (or is called immediately - with an error code - if the command cannot be sent). // "authenticator" (optional) is used for access control. If you have username and password strings, you can use this by // passing an actual parameter that you created by creating an "Authenticator(username, password) object". // (Note that if you supply a non-NULL "authenticator" parameter, you need do this only for the first command you send.) unsigned sendOptionsCommand(responseHandler* responseHandler, Authenticator* authenticator = NULL); // Issues a RTSP "OPTIONS" command, then returns the "CSeq" sequence number that was used in the command. // (The "responseHandler" and "authenticator" parameters are as described for "sendDescribeCommand".) unsigned sendAnnounceCommand(char const* sdpDescription, responseHandler* responseHandler, Authenticator* authenticator = NULL); // Issues a RTSP "ANNOUNCE" command (with "sdpDescription" as parameter), // then returns the "CSeq" sequence number that was used in the command. // (The "responseHandler" and "authenticator" parameters are as described for "sendDescribeCommand".) unsigned sendSetupCommand(MediaSubsession& subsession, responseHandler* responseHandler, Boolean streamOutgoing = False, Boolean streamUsingTCP = False, Boolean forceMulticastOnUnspecified = False, Authenticator* authenticator = NULL); // Issues a RTSP "SETUP" command, then returns the "CSeq" sequence number that was used in the command. // (The "responseHandler" and "authenticator" parameters are as described for "sendDescribeCommand".) unsigned sendPlayCommand(MediaSession& session, responseHandler* responseHandler, double start = 0.0f, double end = -1.0f, float scale = 1.0f, Authenticator* authenticator = NULL); // Issues an aggregate RTSP "PLAY" command on "session", then returns the "CSeq" sequence number that was used in the command. // (Note: start=-1 means 'resume'; end=-1 means 'play to end') // (The "responseHandler" and "authenticator" parameters are as described for "sendDescribeCommand".) unsigned sendPlayCommand(MediaSubsession& subsession, responseHandler* responseHandler, double start = 0.0f, double end = -1.0f, float scale = 1.0f, Authenticator* authenticator = NULL); // Issues a RTSP "PLAY" command on "subsession", then returns the "CSeq" sequence number that was used in the command. // (Note: start=-1 means 'resume'; end=-1 means 'play to end') // (The "responseHandler" and "authenticator" parameters are as described for "sendDescribeCommand".) // Alternative forms of "sendPlayCommand()", used to send "PLAY" commands that include an 'absolute' time range: // (The "absStartTime" string (and "absEndTime" string, if present) *must* be of the form // "YYYYMMDDTHHMMSSZ" or "YYYYMMDDTHHMMSS.Z") unsigned sendPlayCommand(MediaSession& session, responseHandler* responseHandler, char const* absStartTime, char const* absEndTime = NULL, float scale = 1.0f, Authenticator* authenticator = NULL); unsigned sendPlayCommand(MediaSubsession& subsession, responseHandler* responseHandler, char const* absStartTime, char const* absEndTime = NULL, float scale = 1.0f, Authenticator* authenticator = NULL); unsigned sendPauseCommand(MediaSession& session, responseHandler* responseHandler, Authenticator* authenticator = NULL); // Issues an aggregate RTSP "PAUSE" command on "session", then returns the "CSeq" sequence number that was used in the command. // (The "responseHandler" and "authenticator" parameters are as described for "sendDescribeCommand".) unsigned sendPauseCommand(MediaSubsession& subsession, responseHandler* responseHandler, Authenticator* authenticator = NULL); // Issues a RTSP "PAUSE" command on "subsession", then returns the "CSeq" sequence number that was used in the command. // (The "responseHandler" and "authenticator" parameters are as described for "sendDescribeCommand".) unsigned sendRecordCommand(MediaSession& session, responseHandler* responseHandler, Authenticator* authenticator = NULL); // Issues an aggregate RTSP "RECORD" command on "session", then returns the "CSeq" sequence number that was used in the command. // (The "responseHandler" and "authenticator" parameters are as described for "sendDescribeCommand".) unsigned sendRecordCommand(MediaSubsession& subsession, responseHandler* responseHandler, Authenticator* authenticator = NULL); // Issues a RTSP "RECORD" command on "subsession", then returns the "CSeq" sequence number that was used in the command. // (The "responseHandler" and "authenticator" parameters are as described for "sendDescribeCommand".) unsigned sendTeardownCommand(MediaSession& session, responseHandler* responseHandler, Authenticator* authenticator = NULL); // Issues an aggregate RTSP "TEARDOWN" command on "session", then returns the "CSeq" sequence number that was used in the command. // (The "responseHandler" and "authenticator" parameters are as described for "sendDescribeCommand".) unsigned sendTeardownCommand(MediaSubsession& subsession, responseHandler* responseHandler, Authenticator* authenticator = NULL); // Issues a RTSP "TEARDOWN" command on "subsession", then returns the "CSeq" sequence number that was used in the command. // (The "responseHandler" and "authenticator" parameters are as described for "sendDescribeCommand".) unsigned sendSetParameterCommand(MediaSession& session, responseHandler* responseHandler, char const* parameterName, char const* parameterValue, Authenticator* authenticator = NULL); // Issues an aggregate RTSP "SET_PARAMETER" command on "session", then returns the "CSeq" sequence number that was used in the command. // (The "responseHandler" and "authenticator" parameters are as described for "sendDescribeCommand".) unsigned sendGetParameterCommand(MediaSession& session, responseHandler* responseHandler, char const* parameterName, Authenticator* authenticator = NULL); // Issues an aggregate RTSP "GET_PARAMETER" command on "session", then returns the "CSeq" sequence number that was used in the command. // (The "responseHandler" and "authenticator" parameters are as described for "sendDescribeCommand".) void sendDummyUDPPackets(MediaSession& session, unsigned numDummyPackets = 2); void sendDummyUDPPackets(MediaSubsession& subsession, unsigned numDummyPackets = 2); // Sends short 'dummy' (i.e., non-RTP or RTCP) UDP packets towards the server, to increase // the likelihood of RTP/RTCP packets from the server reaching us if we're behind a NAT. // (If we requested RTP-over-TCP streaming, then these functions have no effect.) // Our implementation automatically does this just prior to sending each "PLAY" command; // You should not call these functions yourself unless you know what you're doing. Boolean changeResponseHandler(unsigned cseq, responseHandler* newResponseHandler); // Changes the response handler for the previously-performed command (whose operation returned "cseq"). // (To turn off any response handling for the command, use a "newResponseHandler" value of NULL. This might be done as part // of an implementation of a 'timeout handler' on the command, for example.) // This function returns True iff "cseq" was for a valid previously-performed command (whose response is still unhandled). int socketNum() const { return fInputSocketNum; } static Boolean lookupByName(UsageEnvironment& env, char const* sourceName, RTSPClient*& resultClient); static Boolean parseRTSPURL(UsageEnvironment& env, char const* url, char*& username, char*& password, NetAddress& address, portNumBits& portNum, char const** urlSuffix = NULL); // Parses "url" as "rtsp://[[:]@][:][/]" // (Note that the returned "username" and "password" are either NULL, or heap-allocated strings that the caller must later delete[].) void setUserAgentString(char const* userAgentName); // sets an alternative string to be used in RTSP "User-Agent:" headers unsigned sessionTimeoutParameter() const { return fSessionTimeoutParameter; } char const* url() const { return fBaseURL; } static unsigned responseBufferSize; public: // Some compilers complain if this is "private:" // The state of a request-in-progress: class RequestRecord { public: RequestRecord(unsigned cseq, char const* commandName, responseHandler* handler, MediaSession* session = NULL, MediaSubsession* subsession = NULL, u_int32_t booleanFlags = 0, double start = 0.0f, double end = -1.0f, float scale = 1.0f, char const* contentStr = NULL); RequestRecord(unsigned cseq, responseHandler* handler, char const* absStartTime, char const* absEndTime = NULL, float scale = 1.0f, MediaSession* session = NULL, MediaSubsession* subsession = NULL); // alternative constructor for creating "PLAY" requests that include 'absolute' time values virtual ~RequestRecord(); RequestRecord*& next() { return fNext; } unsigned& cseq() { return fCSeq; } char const* commandName() const { return fCommandName; } MediaSession* session() const { return fSession; } MediaSubsession* subsession() const { return fSubsession; } u_int32_t booleanFlags() const { return fBooleanFlags; } double start() const { return fStart; } double end() const { return fEnd; } char const* absStartTime() const { return fAbsStartTime; } char const* absEndTime() const { return fAbsEndTime; } float scale() const { return fScale; } char* contentStr() const { return fContentStr; } responseHandler*& handler() { return fHandler; } private: RequestRecord* fNext; unsigned fCSeq; char const* fCommandName; MediaSession* fSession; MediaSubsession* fSubsession; u_int32_t fBooleanFlags; double fStart, fEnd; char *fAbsStartTime, *fAbsEndTime; // used for optional 'absolute' (i.e., "time=") range specifications float fScale; char* fContentStr; responseHandler* fHandler; }; protected: RTSPClient(UsageEnvironment& env, char const* rtspURL, int verbosityLevel, char const* applicationName, portNumBits tunnelOverHTTPPortNum, int socketNumToServer); // called only by createNew(); virtual ~RTSPClient(); void reset(); void setBaseURL(char const* url); int grabSocket(); // allows a subclass to reuse our input socket, so that it won't get closed when we're deleted virtual unsigned sendRequest(RequestRecord* request); virtual Boolean setRequestFields(RequestRecord* request, char*& cmdURL, Boolean& cmdURLWasAllocated, char const*& protocolStr, char*& extraHeaders, Boolean& extraHeadersWereAllocated); // used to implement "sendRequest()"; subclasses may reimplement this (e.g., when implementing a new command name) private: // redefined virtual functions virtual Boolean isRTSPClient() const; private: class RequestQueue { public: RequestQueue(); RequestQueue(RequestQueue& origQueue); // moves the queue contents to the new queue virtual ~RequestQueue(); void enqueue(RequestRecord* request); // "request" must not be NULL RequestRecord* dequeue(); void putAtHead(RequestRecord* request); // "request" must not be NULL RequestRecord* findByCSeq(unsigned cseq); Boolean isEmpty() const { return fHead == NULL; } private: RequestRecord* fHead; RequestRecord* fTail; }; void resetTCPSockets(); void resetResponseBuffer(); int openConnection(); // -1: failure; 0: pending; 1: success int connectToServer(int socketNum, portNumBits remotePortNum); // used to implement "openConnection()"; result values are the same char* createAuthenticatorString(char const* cmd, char const* url); void handleRequestError(RequestRecord* request); Boolean parseResponseCode(char const* line, unsigned& responseCode, char const*& responseString); void handleIncomingRequest(); static Boolean checkForHeader(char const* line, char const* headerName, unsigned headerNameLength, char const*& headerParams); Boolean parseTransportParams(char const* paramsStr, char*& serverAddressStr, portNumBits& serverPortNum, unsigned char& rtpChannelId, unsigned char& rtcpChannelId); Boolean parseScaleParam(char const* paramStr, float& scale); Boolean parseRTPInfoParams(char const*& paramStr, u_int16_t& seqNum, u_int32_t& timestamp); Boolean handleSETUPResponse(MediaSubsession& subsession, char const* sessionParamsStr, char const* transportParamsStr, Boolean streamUsingTCP); Boolean handlePLAYResponse(MediaSession& session, MediaSubsession& subsession, char const* scaleParamsStr, char const* rangeParamsStr, char const* rtpInfoParamsStr); Boolean handleTEARDOWNResponse(MediaSession& session, MediaSubsession& subsession); Boolean handleGET_PARAMETERResponse(char const* parameterName, char*& resultValueString); Boolean handleAuthenticationFailure(char const* wwwAuthenticateParamsStr); Boolean resendCommand(RequestRecord* request); char const* sessionURL(MediaSession const& session) const; static void handleAlternativeRequestByte(void*, u_int8_t requestByte); void handleAlternativeRequestByte1(u_int8_t requestByte); void constructSubsessionURL(MediaSubsession const& subsession, char const*& prefix, char const*& separator, char const*& suffix); // Support for tunneling RTSP-over-HTTP: Boolean setupHTTPTunneling1(); // send the HTTP "GET" static void responseHandlerForHTTP_GET(RTSPClient* rtspClient, int responseCode, char* responseString); void responseHandlerForHTTP_GET1(int responseCode, char* responseString); Boolean setupHTTPTunneling2(); // send the HTTP "POST" // Support for asynchronous connections to the server: static void connectionHandler(void*, int /*mask*/); void connectionHandler1(); // Support for handling data sent back by a server: static void incomingDataHandler(void*, int /*mask*/); void incomingDataHandler1(); void handleResponseBytes(int newBytesRead); protected: int fVerbosityLevel; unsigned fCSeq; // sequence number, used in consecutive requests Authenticator fCurrentAuthenticator; netAddressBits fServerAddress; private: portNumBits fTunnelOverHTTPPortNum; char* fUserAgentHeaderStr; unsigned fUserAgentHeaderStrLen; int fInputSocketNum, fOutputSocketNum; char* fBaseURL; unsigned char fTCPStreamIdCount; // used for (optional) RTP/TCP char* fLastSessionId; unsigned fSessionTimeoutParameter; // optionally set in response "Session:" headers char* fResponseBuffer; unsigned fResponseBytesAlreadySeen, fResponseBufferBytesLeft; RequestQueue fRequestsAwaitingConnection, fRequestsAwaitingHTTPTunneling, fRequestsAwaitingResponse; // Support for tunneling RTSP-over-HTTP: char fSessionCookie[33]; unsigned fSessionCookieCounter; Boolean fHTTPTunnelingConnectionIsPending; }; ////////// HandlerServerForREGISTERCommand ///////// // A simple server that creates a new "RTSPClient" object whenever a "REGISTER" request arrives (specifying the "rtsp://" URL // of a stream). The new "RTSPClient" object will be created with the specified URL, and passed to the provided handler function. typedef void onRTSPClientCreationFunc(RTSPClient* newRTSPClient, Boolean requestStreamingOverTCP); class HandlerServerForREGISTERCommand: public RTSPServer { public: static HandlerServerForREGISTERCommand* createNew(UsageEnvironment& env, onRTSPClientCreationFunc* creationFunc, Port ourPort = 0, UserAuthenticationDatabase* authDatabase = NULL, int verbosityLevel = 0, char const* applicationName = NULL); // If ourPort.num() == 0, we'll choose the port number ourself. (Use the following function to get it.) portNumBits serverPortNum() const { return ntohs(fRTSPServerPort.num()); } protected: HandlerServerForREGISTERCommand(UsageEnvironment& env, onRTSPClientCreationFunc* creationFunc, int ourSocket, Port ourPort, UserAuthenticationDatabase* authDatabase, int verbosityLevel, char const* applicationName); // called only by createNew(); virtual ~HandlerServerForREGISTERCommand(); virtual RTSPClient* createNewRTSPClient(char const* rtspURL, int verbosityLevel, char const* applicationName, int socketNumToServer); // This function - by default - creates a (base) "RTSPClient" object. If you want to create a subclass // of "RTSPClient" instead, then subclass this class, and redefine this virtual function. protected: // redefined virtual functions virtual char const* allowedCommandNames(); // we support "OPTIONS" and "REGISTER" only virtual Boolean weImplementREGISTER(char const* proxyURLSuffix, char*& responseStr); // redefined to return True virtual void implementCmd_REGISTER(char const* url, char const* urlSuffix, int socketToRemoteServer, Boolean deliverViaTCP, char const* proxyURLSuffix); private: onRTSPClientCreationFunc* fCreationFunc; int fVerbosityLevel; char* fApplicationName; }; #endif live/liveMedia/include/MP3Transcoder.hh000444 001751 000000 00000002607 12265042432 020233 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // MP3 Transcoder // C++ header #ifndef _MP3_TRANSCODER_HH #define _MP3_TRANSCODER_HH #ifndef _MP3_ADU_HH #include "MP3ADU.hh" #endif #ifndef _MP3_ADU_TRANSCODER_HH #include "MP3ADUTranscoder.hh" #endif class MP3Transcoder: public MP3FromADUSource { public: static MP3Transcoder* createNew(UsageEnvironment& env, unsigned outBitrate /* in kbps */, FramedSource* inputSource); protected: MP3Transcoder(UsageEnvironment& env, MP3ADUTranscoder* aduTranscoder); // called only by createNew() virtual ~MP3Transcoder(); }; #endif live/liveMedia/include/MPEG1or2AudioStreamFramer.hh000444 001751 000000 00000004577 12265042432 022346 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A filter that breaks up an MPEG (1,2) audio elementary stream into frames // C++ header #ifndef _MPEG_1OR2_AUDIO_STREAM_FRAMER_HH #define _MPEG_1OR2_AUDIO_STREAM_FRAMER_HH #ifndef _FRAMED_FILTER_HH #include "FramedFilter.hh" #endif class MPEG1or2AudioStreamFramer: public FramedFilter { public: static MPEG1or2AudioStreamFramer* createNew(UsageEnvironment& env, FramedSource* inputSource, Boolean syncWithInputSource = False); // If "syncWithInputSource" is True, the stream's presentation time // will be reset to that of the input source, whenever new data // is read from it. void flushInput(); // called if there is a discontinuity (seeking) in the input private: MPEG1or2AudioStreamFramer(UsageEnvironment& env, FramedSource* inputSource, Boolean syncWithInputSource); // called only by createNew() virtual ~MPEG1or2AudioStreamFramer(); static void continueReadProcessing(void* clientData, unsigned char* ptr, unsigned size, struct timeval presentationTime); void continueReadProcessing(); void resetPresentationTime(struct timeval newPresentationTime); // useful if we're being synced with a separate (e.g., video) stream private: // redefined virtual functions: virtual void doGetNextFrame(); private: void reset(); struct timeval currentFramePlayTime() const; private: Boolean fSyncWithInputSource; struct timeval fNextFramePresentationTime; private: // parsing state class MPEG1or2AudioStreamParser* fParser; friend class MPEG1or2AudioStreamParser; // hack }; #endif live/liveMedia/include/MPEG1or2VideoRTPSink.hh000444 001751 000000 00000005225 12265042432 021244 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // RTP sink for MPEG video (RFC 2250) // C++ header #ifndef _MPEG_1OR2_VIDEO_RTP_SINK_HH #define _MPEG_1OR2_VIDEO_RTP_SINK_HH #ifndef _VIDEO_RTP_SINK_HH #include "VideoRTPSink.hh" #endif class MPEG1or2VideoRTPSink: public VideoRTPSink { public: static MPEG1or2VideoRTPSink* createNew(UsageEnvironment& env, Groupsock* RTPgs); protected: MPEG1or2VideoRTPSink(UsageEnvironment& env, Groupsock* RTPgs); // called only by createNew() virtual ~MPEG1or2VideoRTPSink(); private: // redefined virtual functions: virtual Boolean sourceIsCompatibleWithUs(MediaSource& source); virtual void doSpecialFrameHandling(unsigned fragmentationOffset, unsigned char* frameStart, unsigned numBytesInFrame, struct timeval framePresentationTime, unsigned numRemainingBytes); virtual Boolean allowFragmentationAfterStart() const; virtual Boolean frameCanAppearAfterPacketStart(unsigned char const* frameStart, unsigned numBytesInFrame) const; virtual unsigned specialHeaderSize() const; private: // MPEG video-specific state, used to decide how to fill out the // video-specific header, and when to include multiple 'frames' in a // single outgoing RTP packet. Eventually we should somehow get this // state from the source (MPEG1or2VideoStreamFramer) instead, as the source // already has this info itself. struct { unsigned temporal_reference; unsigned char picture_coding_type; unsigned char vector_code_bits; // FBV,BFC,FFV,FFC from RFC 2250, sec. 3.4 } fPictureState; Boolean fPreviousFrameWasSlice; // used to implement frameCanAppearAfterPacketStart() Boolean fSequenceHeaderPresent; Boolean fPacketBeginsSlice, fPacketEndsSlice; }; #endif live/liveMedia/include/MPEG1or2DemuxedElementaryStream.hh000444 001751 000000 00000004451 12265042432 023560 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A MPEG 1 or 2 Elementary Stream, demultiplexed from a Program Stream // C++ header #ifndef _MPEG_1OR2_DEMUXED_ELEMENTARY_STREAM_HH #define _MPEG_1OR2_DEMUXED_ELEMENTARY_STREAM_HH #ifndef _MPEG_1OR2_DEMUX_HH #include "MPEG1or2Demux.hh" #endif class MPEG1or2DemuxedElementaryStream: public FramedSource { public: MPEG1or2Demux::SCR lastSeenSCR() const { return fLastSeenSCR; } unsigned char mpegVersion() const { return fMPEGversion; } MPEG1or2Demux& sourceDemux() const { return fOurSourceDemux; } private: // We are created only by a MPEG1or2Demux (a friend) MPEG1or2DemuxedElementaryStream(UsageEnvironment& env, u_int8_t streamIdTag, MPEG1or2Demux& sourceDemux); virtual ~MPEG1or2DemuxedElementaryStream(); private: // redefined virtual functions: virtual void doGetNextFrame(); virtual void doStopGettingFrames(); virtual char const* MIMEtype() const; virtual unsigned maxFrameSize() const; private: static void afterGettingFrame(void* clientData, unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime, unsigned durationInMicroseconds); void afterGettingFrame1(unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime, unsigned durationInMicroseconds); private: u_int8_t fOurStreamIdTag; MPEG1or2Demux& fOurSourceDemux; char const* fMIMEtype; MPEG1or2Demux::SCR fLastSeenSCR; unsigned char fMPEGversion; friend class MPEG1or2Demux; }; #endif live/liveMedia/include/MPEG1or2VideoStreamFramer.hh000444 001751 000000 00000003736 12265042432 022347 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A filter that breaks up an MPEG 1 or 2 video elementary stream into // frames for: Video_Sequence_Header, GOP_Header, Picture_Header // C++ header #ifndef _MPEG_1OR2_VIDEO_STREAM_FRAMER_HH #define _MPEG_1OR2_VIDEO_STREAM_FRAMER_HH #ifndef _MPEG_VIDEO_STREAM_FRAMER_HH #include "MPEGVideoStreamFramer.hh" #endif class MPEG1or2VideoStreamFramer: public MPEGVideoStreamFramer { public: static MPEG1or2VideoStreamFramer* createNew(UsageEnvironment& env, FramedSource* inputSource, Boolean iFramesOnly = False, double vshPeriod = 5.0 /* how often (in seconds) to inject a Video_Sequence_Header, if one doesn't already appear in the stream */); protected: MPEG1or2VideoStreamFramer(UsageEnvironment& env, FramedSource* inputSource, Boolean iFramesOnly, double vshPeriod, Boolean createParser = True); // called only by createNew(), or by subclass constructors virtual ~MPEG1or2VideoStreamFramer(); private: // redefined virtual functions: virtual Boolean isMPEG1or2VideoStreamFramer() const; private: double getCurrentPTS() const; friend class MPEG1or2VideoStreamParser; // hack }; #endif live/liveMedia/include/MPEGVideoStreamFramer.hh000444 001751 000000 00000005173 12265042432 021640 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A filter that breaks up an MPEG video elementary stream into // headers and frames // C++ header #ifndef _MPEG_VIDEO_STREAM_FRAMER_HH #define _MPEG_VIDEO_STREAM_FRAMER_HH #ifndef _FRAMED_FILTER_HH #include "FramedFilter.hh" #endif class TimeCode { public: TimeCode(); virtual ~TimeCode(); int operator==(TimeCode const& arg2); unsigned days, hours, minutes, seconds, pictures; }; class MPEGVideoStreamFramer: public FramedFilter { public: Boolean& pictureEndMarker() { return fPictureEndMarker; } // a hack for implementing the RTP 'M' bit void flushInput(); // called if there is a discontinuity (seeking) in the input protected: MPEGVideoStreamFramer(UsageEnvironment& env, FramedSource* inputSource); // we're an abstract base class virtual ~MPEGVideoStreamFramer(); void computePresentationTime(unsigned numAdditionalPictures); // sets "fPresentationTime" void setTimeCode(unsigned hours, unsigned minutes, unsigned seconds, unsigned pictures, unsigned picturesSinceLastGOP); private: // redefined virtual functions virtual void doGetNextFrame(); private: void reset(); static void continueReadProcessing(void* clientData, unsigned char* ptr, unsigned size, struct timeval presentationTime); void continueReadProcessing(); protected: double fFrameRate; // Note: For MPEG-4, this is really a 'tick rate' unsigned fPictureCount; // hack used to implement doGetNextFrame() Boolean fPictureEndMarker; struct timeval fPresentationTimeBase; // parsing state class MPEGVideoStreamParser* fParser; friend class MPEGVideoStreamParser; // hack private: TimeCode fCurGOPTimeCode, fPrevGOPTimeCode; unsigned fPicturesAdjustment; double fPictureTimeBase; unsigned fTcSecsBase; Boolean fHaveSeenFirstTimeCode; }; #endif live/liveMedia/include/MediaSink.hh000444 001751 000000 00000010717 12265042432 017454 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // Media Sinks // C++ header #ifndef _MEDIA_SINK_HH #define _MEDIA_SINK_HH #ifndef _FRAMED_SOURCE_HH #include "FramedSource.hh" #endif class MediaSink: public Medium { public: static Boolean lookupByName(UsageEnvironment& env, char const* sinkName, MediaSink*& resultSink); typedef void (afterPlayingFunc)(void* clientData); Boolean startPlaying(MediaSource& source, afterPlayingFunc* afterFunc, void* afterClientData); virtual void stopPlaying(); // Test for specific types of sink: virtual Boolean isRTPSink() const; FramedSource* source() const {return fSource;} protected: MediaSink(UsageEnvironment& env); // abstract base class virtual ~MediaSink(); virtual Boolean sourceIsCompatibleWithUs(MediaSource& source); // called by startPlaying() virtual Boolean continuePlaying() = 0; // called by startPlaying() static void onSourceClosure(void* clientData); // can be used in "getNextFrame()" calls void onSourceClosure(); // should be called (on ourselves) by continuePlaying() when it // discovers that the source we're playing from has closed. FramedSource* fSource; private: // redefined virtual functions: virtual Boolean isSink() const; private: // The following fields are used when we're being played: afterPlayingFunc* fAfterFunc; void* fAfterClientData; }; // A data structure that a sink may use for an output packet: class OutPacketBuffer { public: OutPacketBuffer(unsigned preferredPacketSize, unsigned maxPacketSize); ~OutPacketBuffer(); static unsigned maxSize; unsigned char* curPtr() const {return &fBuf[fPacketStart + fCurOffset];} unsigned totalBytesAvailable() const { return fLimit - (fPacketStart + fCurOffset); } unsigned totalBufferSize() const { return fLimit; } unsigned char* packet() const {return &fBuf[fPacketStart];} unsigned curPacketSize() const {return fCurOffset;} void increment(unsigned numBytes) {fCurOffset += numBytes;} void enqueue(unsigned char const* from, unsigned numBytes); void enqueueWord(u_int32_t word); void insert(unsigned char const* from, unsigned numBytes, unsigned toPosition); void insertWord(u_int32_t word, unsigned toPosition); void extract(unsigned char* to, unsigned numBytes, unsigned fromPosition); u_int32_t extractWord(unsigned fromPosition); void skipBytes(unsigned numBytes); Boolean isPreferredSize() const {return fCurOffset >= fPreferred;} Boolean wouldOverflow(unsigned numBytes) const { return (fCurOffset+numBytes) > fMax; } unsigned numOverflowBytes(unsigned numBytes) const { return (fCurOffset+numBytes) - fMax; } Boolean isTooBigForAPacket(unsigned numBytes) const { return numBytes > fMax; } void setOverflowData(unsigned overflowDataOffset, unsigned overflowDataSize, struct timeval const& presentationTime, unsigned durationInMicroseconds); unsigned overflowDataSize() const {return fOverflowDataSize;} struct timeval overflowPresentationTime() const {return fOverflowPresentationTime;} unsigned overflowDurationInMicroseconds() const {return fOverflowDurationInMicroseconds;} Boolean haveOverflowData() const {return fOverflowDataSize > 0;} void useOverflowData(); void adjustPacketStart(unsigned numBytes); void resetPacketStart(); void resetOffset() { fCurOffset = 0; } void resetOverflowData() { fOverflowDataOffset = fOverflowDataSize = 0; } private: unsigned fPacketStart, fCurOffset, fPreferred, fMax, fLimit; unsigned char* fBuf; unsigned fOverflowDataOffset, fOverflowDataSize; struct timeval fOverflowPresentationTime; unsigned fOverflowDurationInMicroseconds; }; #endif live/liveMedia/include/MediaSource.hh000444 001751 000000 00000003675 12265042432 020015 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // Media Sources // C++ header #ifndef _MEDIA_SOURCE_HH #define _MEDIA_SOURCE_HH #ifndef _MEDIA_HH #include "Media.hh" #endif class MediaSource: public Medium { public: static Boolean lookupByName(UsageEnvironment& env, char const* sourceName, MediaSource*& resultSource); virtual void getAttributes() const; // attributes are returned in "env's" 'result message' // The MIME type of this source: virtual char const* MIMEtype() const; // Test for specific types of source: virtual Boolean isFramedSource() const; virtual Boolean isRTPSource() const; virtual Boolean isMPEG1or2VideoStreamFramer() const; virtual Boolean isMPEG4VideoStreamFramer() const; virtual Boolean isH264VideoStreamFramer() const; virtual Boolean isH265VideoStreamFramer() const; virtual Boolean isDVVideoStreamFramer() const; virtual Boolean isJPEGVideoSource() const; virtual Boolean isAMRAudioSource() const; protected: MediaSource(UsageEnvironment& env); // abstract base class virtual ~MediaSource(); private: // redefined virtual functions: virtual Boolean isSource() const; }; #endif live/liveMedia/include/MultiFramedRTPSink.hh000444 001751 000000 00000013551 12265042432 021233 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // RTP sink for a common kind of payload format: Those which pack multiple, // complete codec frames (as many as possible) into each RTP packet. // C++ header #ifndef _MULTI_FRAMED_RTP_SINK_HH #define _MULTI_FRAMED_RTP_SINK_HH #ifndef _RTP_SINK_HH #include "RTPSink.hh" #endif class MultiFramedRTPSink: public RTPSink { public: void setPacketSizes(unsigned preferredPacketSize, unsigned maxPacketSize); typedef void (onSendErrorFunc)(void* clientData); void setOnSendErrorFunc(onSendErrorFunc* onSendErrorFunc, void* onSendErrorFuncData) { // Can be used to set a callback function to be called if there's an error sending RTP packets on our socket. fOnSendErrorFunc = onSendErrorFunc; fOnSendErrorData = onSendErrorFuncData; } protected: MultiFramedRTPSink(UsageEnvironment& env, Groupsock* rtpgs, unsigned char rtpPayloadType, unsigned rtpTimestampFrequency, char const* rtpPayloadFormatName, unsigned numChannels = 1); // we're a virtual base class virtual ~MultiFramedRTPSink(); virtual void doSpecialFrameHandling(unsigned fragmentationOffset, unsigned char* frameStart, unsigned numBytesInFrame, struct timeval framePresentationTime, unsigned numRemainingBytes); // perform any processing specific to the particular payload format virtual Boolean allowFragmentationAfterStart() const; // whether a frame can be fragmented if other frame(s) appear earlier // in the packet (by default: False) virtual Boolean allowOtherFramesAfterLastFragment() const; // whether other frames can be packed into a packet following the // final fragment of a previous, fragmented frame (by default: False) virtual Boolean frameCanAppearAfterPacketStart(unsigned char const* frameStart, unsigned numBytesInFrame) const; // whether this frame can appear in position >1 in a pkt (default: True) virtual unsigned specialHeaderSize() const; // returns the size of any special header used (following the RTP header) (default: 0) virtual unsigned frameSpecificHeaderSize() const; // returns the size of any frame-specific header used (before each frame // within the packet) (default: 0) virtual unsigned computeOverflowForNewFrame(unsigned newFrameSize) const; // returns the number of overflow bytes that would be produced by adding a new // frame of size "newFrameSize" to the current RTP packet. // (By default, this just calls "numOverflowBytes()", but subclasses can redefine // this to (e.g.) impose a granularity upon RTP payload fragments.) // Functions that might be called by doSpecialFrameHandling(), or other subclass virtual functions: Boolean isFirstPacket() const { return fIsFirstPacket; } Boolean isFirstFrameInPacket() const { return fNumFramesUsedSoFar == 0; } unsigned curFragmentationOffset() const { return fCurFragmentationOffset; } void setMarkerBit(); void setTimestamp(struct timeval framePresentationTime); void setSpecialHeaderWord(unsigned word, /* 32 bits, in host order */ unsigned wordPosition = 0); void setSpecialHeaderBytes(unsigned char const* bytes, unsigned numBytes, unsigned bytePosition = 0); void setFrameSpecificHeaderWord(unsigned word, /* 32 bits, in host order */ unsigned wordPosition = 0); void setFrameSpecificHeaderBytes(unsigned char const* bytes, unsigned numBytes, unsigned bytePosition = 0); void setFramePadding(unsigned numPaddingBytes); unsigned numFramesUsedSoFar() const { return fNumFramesUsedSoFar; } unsigned ourMaxPacketSize() const { return fOurMaxPacketSize; } public: // redefined virtual functions: virtual void stopPlaying(); protected: // redefined virtual functions: virtual Boolean continuePlaying(); private: void buildAndSendPacket(Boolean isFirstPacket); void packFrame(); void sendPacketIfNecessary(); static void sendNext(void* firstArg); friend void sendNext(void*); static void afterGettingFrame(void* clientData, unsigned numBytesRead, unsigned numTruncatedBytes, struct timeval presentationTime, unsigned durationInMicroseconds); void afterGettingFrame1(unsigned numBytesRead, unsigned numTruncatedBytes, struct timeval presentationTime, unsigned durationInMicroseconds); Boolean isTooBigForAPacket(unsigned numBytes) const; static void ourHandleClosure(void* clientData); private: OutPacketBuffer* fOutBuf; Boolean fNoFramesLeft; unsigned fNumFramesUsedSoFar; unsigned fCurFragmentationOffset; Boolean fPreviousFrameEndedFragmentation; Boolean fIsFirstPacket; struct timeval fNextSendTime; unsigned fTimestampPosition; unsigned fSpecialHeaderPosition; unsigned fSpecialHeaderSize; // size in bytes of any special header used unsigned fCurFrameSpecificHeaderPosition; unsigned fCurFrameSpecificHeaderSize; // size in bytes of cur frame-specific header unsigned fTotalFrameSpecificHeaderSizes; // size of all frame-specific hdrs in pkt unsigned fOurMaxPacketSize; onSendErrorFunc* fOnSendErrorFunc; void* fOnSendErrorData; }; #endif live/liveMedia/include/MultiFramedRTPSource.hh000444 001751 000000 00000012624 12265042432 021567 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // RTP source for a common kind of payload format: Those which pack multiple, // complete codec frames (as many as possible) into each RTP packet. // C++ header #ifndef _MULTI_FRAMED_RTP_SOURCE_HH #define _MULTI_FRAMED_RTP_SOURCE_HH #ifndef _RTP_SOURCE_HH #include "RTPSource.hh" #endif class BufferedPacket; // forward class BufferedPacketFactory; // forward class MultiFramedRTPSource: public RTPSource { protected: MultiFramedRTPSource(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, unsigned rtpTimestampFrequency, BufferedPacketFactory* packetFactory = NULL); // virtual base class virtual ~MultiFramedRTPSource(); virtual Boolean processSpecialHeader(BufferedPacket* packet, unsigned& resultSpecialHeaderSize); // Subclasses redefine this to handle any special, payload format // specific header that follows the RTP header. virtual Boolean packetIsUsableInJitterCalculation(unsigned char* packet, unsigned packetSize); // The default implementation returns True, but this can be redefined protected: Boolean fCurrentPacketBeginsFrame; Boolean fCurrentPacketCompletesFrame; protected: // redefined virtual functions: virtual void doStopGettingFrames(); private: // redefined virtual functions: virtual void doGetNextFrame(); virtual void setPacketReorderingThresholdTime(unsigned uSeconds); private: void reset(); void doGetNextFrame1(); static void networkReadHandler(MultiFramedRTPSource* source, int /*mask*/); void networkReadHandler1(); Boolean fAreDoingNetworkReads; BufferedPacket* fPacketReadInProgress; Boolean fNeedDelivery; Boolean fPacketLossInFragmentedFrame; unsigned char* fSavedTo; unsigned fSavedMaxSize; // A buffer to (optionally) hold incoming pkts that have been reorderered class ReorderingPacketBuffer* fReorderingBuffer; }; // A 'packet data' class that's used to implement the above. // Note that this can be subclassed - if desired - to redefine // "nextEnclosedFrameSize()". class BufferedPacket { public: BufferedPacket(); virtual ~BufferedPacket(); Boolean hasUsableData() const { return fTail > fHead; } unsigned useCount() const { return fUseCount; } Boolean fillInData(RTPInterface& rtpInterface, Boolean& packetReadWasIncomplete); void assignMiscParams(unsigned short rtpSeqNo, unsigned rtpTimestamp, struct timeval presentationTime, Boolean hasBeenSyncedUsingRTCP, Boolean rtpMarkerBit, struct timeval timeReceived); void skip(unsigned numBytes); // used to skip over an initial header void removePadding(unsigned numBytes); // used to remove trailing bytes void appendData(unsigned char* newData, unsigned numBytes); void use(unsigned char* to, unsigned toSize, unsigned& bytesUsed, unsigned& bytesTruncated, unsigned short& rtpSeqNo, unsigned& rtpTimestamp, struct timeval& presentationTime, Boolean& hasBeenSyncedUsingRTCP, Boolean& rtpMarkerBit); BufferedPacket*& nextPacket() { return fNextPacket; } unsigned short rtpSeqNo() const { return fRTPSeqNo; } struct timeval const& timeReceived() const { return fTimeReceived; } unsigned char* data() const { return &fBuf[fHead]; } unsigned dataSize() const { return fTail-fHead; } Boolean rtpMarkerBit() const { return fRTPMarkerBit; } Boolean& isFirstPacket() { return fIsFirstPacket; } unsigned bytesAvailable() const { return fPacketSize - fTail; } protected: virtual void reset(); virtual unsigned nextEnclosedFrameSize(unsigned char*& framePtr, unsigned dataSize); // The above function has been deprecated. Instead, new subclasses should use: virtual void getNextEnclosedFrameParameters(unsigned char*& framePtr, unsigned dataSize, unsigned& frameSize, unsigned& frameDurationInMicroseconds); unsigned fPacketSize; unsigned char* fBuf; unsigned fHead; unsigned fTail; private: BufferedPacket* fNextPacket; // used to link together packets unsigned fUseCount; unsigned short fRTPSeqNo; unsigned fRTPTimestamp; struct timeval fPresentationTime; // corresponding to "fRTPTimestamp" Boolean fHasBeenSyncedUsingRTCP; Boolean fRTPMarkerBit; Boolean fIsFirstPacket; struct timeval fTimeReceived; }; // A 'factory' class for creating "BufferedPacket" objects. // If you want to subclass "BufferedPacket", then you'll also // want to subclass this, to redefine createNewPacket() class BufferedPacketFactory { public: BufferedPacketFactory(); virtual ~BufferedPacketFactory(); virtual BufferedPacket* createNewPacket(MultiFramedRTPSource* ourSource); }; #endif live/liveMedia/include/MPEG2IndexFromTransportStream.hh000444 001751 000000 00000006246 12265042432 023331 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A filter that produces a sequence of I-frame indices from a MPEG-2 Transport Stream // C++ header #ifndef _MPEG2_IFRAME_INDEX_FROM_TRANSPORT_STREAM_HH #define _MPEG2_IFRAME_INDEX_FROM_TRANSPORT_STREAM_HH #ifndef _FRAMED_FILTER_HH #include "FramedFilter.hh" #endif #ifndef TRANSPORT_PACKET_SIZE #define TRANSPORT_PACKET_SIZE 188 #endif #ifndef MAX_PES_PACKET_SIZE #define MAX_PES_PACKET_SIZE 65536 #endif class IndexRecord; // forward class MPEG2IFrameIndexFromTransportStream: public FramedFilter { public: static MPEG2IFrameIndexFromTransportStream* createNew(UsageEnvironment& env, FramedSource* inputSource); protected: MPEG2IFrameIndexFromTransportStream(UsageEnvironment& env, FramedSource* inputSource); // called only by createNew() virtual ~MPEG2IFrameIndexFromTransportStream(); private: // Redefined virtual functions: virtual void doGetNextFrame(); private: static void afterGettingFrame(void* clientData, unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime, unsigned durationInMicroseconds); void afterGettingFrame1(unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime, unsigned durationInMicroseconds); static void handleInputClosure(void* clientData); void handleInputClosure1(); void analyzePAT(unsigned char* pkt, unsigned size); void analyzePMT(unsigned char* pkt, unsigned size); Boolean deliverIndexRecord(); Boolean parseFrame(); Boolean parseToNextCode(unsigned char& nextCode); void compactParseBuffer(); void addToTail(IndexRecord* newIndexRecord); private: Boolean fIsH264; // True iff the video is H.264 (encapsulated in a Transport Stream) Boolean fIsH265; // True iff the video is H.265 (encapsulated in a Transport Stream) unsigned long fInputTransportPacketCounter; unsigned fClosureNumber; u_int8_t fLastContinuityCounter; float fFirstPCR, fLastPCR; Boolean fHaveSeenFirstPCR; u_int16_t fPMT_PID, fVideo_PID; // Note: We assume: 1 program per Transport Stream; 1 video stream per program unsigned char fInputBuffer[TRANSPORT_PACKET_SIZE]; unsigned char* fParseBuffer; unsigned fParseBufferSize; unsigned fParseBufferFrameStart; unsigned fParseBufferParseEnd; unsigned fParseBufferDataEnd; IndexRecord* fHeadIndexRecord; IndexRecord* fTailIndexRecord; }; #endif live/liveMedia/include/QCELPAudioRTPSource.hh000444 001751 000000 00000002606 12265042432 021203 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // Qualcomm "PureVoice" (aka. "QCELP") Audio RTP Sources // C++ header #ifndef _QCELP_AUDIO_RTP_SOURCE_HH #define _QCELP_AUDIO_RTP_SOURCE_HH #ifndef _RTP_SOURCE_HH #include "RTPSource.hh" #endif class QCELPAudioRTPSource { public: static FramedSource* createNew(UsageEnvironment& env, Groupsock* RTPgs, RTPSource*& resultRTPSource, unsigned char rtpPayloadFormat = 12, unsigned rtpTimestampFrequency = 8000); // This returns a source to read from, but "resultRTPSource" will // point to RTP-related state. }; #endif live/liveMedia/include/QuickTimeFileSink.hh000444 001751 000000 00000014672 12265042432 021134 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A sink that generates a QuickTime file from a composite media session // C++ header #ifndef _QUICKTIME_FILE_SINK_HH #define _QUICKTIME_FILE_SINK_HH #ifndef _MEDIA_SESSION_HH #include "MediaSession.hh" #endif class QuickTimeFileSink: public Medium { public: static QuickTimeFileSink* createNew(UsageEnvironment& env, MediaSession& inputSession, char const* outputFileName, unsigned bufferSize = 20000, unsigned short movieWidth = 240, unsigned short movieHeight = 180, unsigned movieFPS = 15, Boolean packetLossCompensate = False, Boolean syncStreams = False, Boolean generateHintTracks = False, Boolean generateMP4Format = False); typedef void (afterPlayingFunc)(void* clientData); Boolean startPlaying(afterPlayingFunc* afterFunc, void* afterClientData); unsigned numActiveSubsessions() const { return fNumSubsessions; } private: QuickTimeFileSink(UsageEnvironment& env, MediaSession& inputSession, char const* outputFileName, unsigned bufferSize, unsigned short movieWidth, unsigned short movieHeight, unsigned movieFPS, Boolean packetLossCompensate, Boolean syncStreams, Boolean generateHintTracks, Boolean generateMP4Format); // called only by createNew() virtual ~QuickTimeFileSink(); Boolean continuePlaying(); static void afterGettingFrame(void* clientData, unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime, unsigned durationInMicroseconds); static void onSourceClosure(void* clientData); void onSourceClosure1(); static void onRTCPBye(void* clientData); void completeOutputFile(); private: friend class SubsessionIOState; MediaSession& fInputSession; FILE* fOutFid; unsigned fBufferSize; Boolean fPacketLossCompensate; Boolean fSyncStreams, fGenerateMP4Format; struct timeval fNewestSyncTime, fFirstDataTime; Boolean fAreCurrentlyBeingPlayed; afterPlayingFunc* fAfterFunc; void* fAfterClientData; unsigned fAppleCreationTime; unsigned fLargestRTPtimestampFrequency; unsigned fNumSubsessions, fNumSyncedSubsessions; struct timeval fStartTime; Boolean fHaveCompletedOutputFile; private: ///// Definitions specific to the QuickTime file format: unsigned addWord64(u_int64_t word); unsigned addWord(unsigned word); unsigned addHalfWord(unsigned short halfWord); unsigned addByte(unsigned char byte) { putc(byte, fOutFid); return 1; } unsigned addZeroWords(unsigned numWords); unsigned add4ByteString(char const* str); unsigned addArbitraryString(char const* str, Boolean oneByteLength = True); unsigned addAtomHeader(char const* atomName); unsigned addAtomHeader64(char const* atomName); // strlen(atomName) must be 4 void setWord(int64_t filePosn, unsigned size); void setWord64(int64_t filePosn, u_int64_t size); unsigned movieTimeScale() const {return fLargestRTPtimestampFrequency;} // Define member functions for outputting various types of atom: #define _atom(name) unsigned addAtom_##name() _atom(ftyp); // for MP4 format files _atom(moov); _atom(mvhd); _atom(iods); // for MP4 format files _atom(trak); _atom(tkhd); _atom(edts); _atom(elst); _atom(tref); _atom(hint); _atom(mdia); _atom(mdhd); _atom(hdlr); _atom(minf); _atom(smhd); _atom(vmhd); _atom(gmhd); _atom(gmin); unsigned addAtom_hdlr2(); _atom(dinf); _atom(dref); _atom(alis); _atom(stbl); _atom(stsd); unsigned addAtom_genericMedia(); unsigned addAtom_soundMediaGeneral(); _atom(ulaw); _atom(alaw); _atom(Qclp); _atom(wave); _atom(frma); _atom(Fclp); _atom(Hclp); _atom(mp4a); // _atom(wave); // _atom(frma); _atom(esds); _atom(srcq); _atom(h263); _atom(avc1); _atom(avcC); _atom(mp4v); _atom(rtp); _atom(tims); _atom(stts); _atom(stss); _atom(stsc); _atom(stsz); _atom(co64); _atom(udta); _atom(name); _atom(hnti); _atom(sdp); _atom(hinf); _atom(totl); _atom(npck); _atom(tpay); _atom(trpy); _atom(nump); _atom(tpyl); _atom(dmed); _atom(dimm); _atom(drep); _atom(tmin); _atom(tmax); _atom(pmax); _atom(dmax); _atom(payt); unsigned addAtom_dummy(); private: unsigned short fMovieWidth, fMovieHeight; unsigned fMovieFPS; int64_t fMDATposition; int64_t fMVHD_durationPosn; unsigned fMaxTrackDurationM; // in movie time units class SubsessionIOState* fCurrentIOState; }; #endif live/liveMedia/include/QuickTimeGenericRTPSource.hh000444 001751 000000 00000004462 12265042432 022547 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // RTP Sources containing generic QuickTime stream data, as defined in // // C++ header #ifndef _QUICKTIME_GENERIC_RTP_SOURCE_HH #define _QUICKTIME_GENERIC_RTP_SOURCE_HH #ifndef _MULTI_FRAMED_RTP_SOURCE_HH #include "MultiFramedRTPSource.hh" #endif class QuickTimeGenericRTPSource: public MultiFramedRTPSource { public: static QuickTimeGenericRTPSource* createNew(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, unsigned rtpTimestampFrequency, char const* mimeTypeString); // QuickTime-specific information, set from the QuickTime header // in each packet. This, along with the data following the header, // is used by receivers. struct QTState { char PCK; unsigned timescale; char* sdAtom; unsigned sdAtomSize; unsigned short width, height; // later add other state as needed ##### } qtState; protected: virtual ~QuickTimeGenericRTPSource(); private: QuickTimeGenericRTPSource(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, unsigned rtpTimestampFrequency, char const* mimeTypeString); // called only by createNew() private: // redefined virtual functions: virtual Boolean processSpecialHeader(BufferedPacket* packet, unsigned& resultSpecialHeaderSize); virtual char const* MIMEtype() const; private: char const* fMIMEtypeString; }; #endif live/liveMedia/include/liveMedia_version.hh000444 001751 000000 00000000446 12265042432 021252 0ustar00rsfwheel000000 000000 // Version information for the "liveMedia" library // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. #ifndef _LIVEMEDIA_VERSION_HH #define _LIVEMEDIA_VERSION_HH #define LIVEMEDIA_LIBRARY_VERSION_STRING "2014.01.13" #define LIVEMEDIA_LIBRARY_VERSION_INT 1389571200 #endif live/liveMedia/include/MPEG1or2FileServerDemux.hh000444 001751 000000 00000004645 12265042432 022041 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A server demultiplexer for a MPEG 1 or 2 Program Stream // C++ header #ifndef _MPEG_1OR2_FILE_SERVER_DEMUX_HH #define _MPEG_1OR2_FILE_SERVER_DEMUX_HH #ifndef _SERVER_MEDIA_SESSION_HH #include "ServerMediaSession.hh" #endif #ifndef _MPEG_1OR2_DEMUXED_ELEMENTARY_STREAM_HH #include "MPEG1or2DemuxedElementaryStream.hh" #endif class MPEG1or2FileServerDemux: public Medium { public: static MPEG1or2FileServerDemux* createNew(UsageEnvironment& env, char const* fileName, Boolean reuseFirstSource); ServerMediaSubsession* newAudioServerMediaSubsession(); // MPEG-1 or 2 audio ServerMediaSubsession* newVideoServerMediaSubsession(Boolean iFramesOnly = False, double vshPeriod = 5.0 /* how often (in seconds) to inject a Video_Sequence_Header, if one doesn't already appear in the stream */); ServerMediaSubsession* newAC3AudioServerMediaSubsession(); // AC-3 audio (from VOB) unsigned fileSize() const { return fFileSize; } float fileDuration() const { return fFileDuration; } private: MPEG1or2FileServerDemux(UsageEnvironment& env, char const* fileName, Boolean reuseFirstSource); // called only by createNew(); virtual ~MPEG1or2FileServerDemux(); private: friend class MPEG1or2DemuxedServerMediaSubsession; MPEG1or2DemuxedElementaryStream* newElementaryStream(unsigned clientSessionId, u_int8_t streamIdTag); private: char const* fFileName; unsigned fFileSize; float fFileDuration; Boolean fReuseFirstSource; MPEG1or2Demux* fSession0Demux; MPEG1or2Demux* fLastCreatedDemux; unsigned fLastClientSessionId; }; #endif live/liveMedia/include/StreamReplicator.hh000444 001751 000000 00000007573 12265042432 021076 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // An class that can be used to create (possibly multiple) 'replicas' of an incoming stream. // C++ header #ifndef _STREAM_REPLICATOR_HH #define _STREAM_REPLICATOR_HH #ifndef _FRAMED_SOURCE_HH #include "FramedSource.hh" #endif class StreamReplica; // forward class StreamReplicator: public Medium { public: static StreamReplicator* createNew(UsageEnvironment& env, FramedSource* inputSource, Boolean deleteWhenLastReplicaDies = True); // If "deleteWhenLastReplicaDies" is True (the default), then the "StreamReplicator" object is deleted when (and only when) // all replicas have been deleted. (In this case, you must *not* call "Medium::close()" on the "StreamReplicator" object, // unless you never created any replicas from it to begin with.) // If "deleteWhenLastReplicaDies" is False, then the "StreamReplicator" object remains in existence, even when all replicas // have been deleted. (This allows you to create new replicas later, if you wish.) In this case, you delete the // "StreamReplicator" object by calling "Medium::close()" on it - but you must do so only when "numReplicas()" returns 0. FramedSource* createStreamReplica(); unsigned numReplicas() const { return fNumReplicas; } FramedSource* inputSource() const { return fInputSource; } // Call before destruction if you want to prevent the destructor from closing the input source void detachInputSource() { fInputSource = NULL; } protected: StreamReplicator(UsageEnvironment& env, FramedSource* inputSource, Boolean deleteWhenLastReplicaDies); // called only by "createNew()" virtual ~StreamReplicator(); private: // Routines called by replicas to implement frame delivery, and the stopping/restarting/deletion of replicas: friend class StreamReplica; void getNextFrame(StreamReplica* replica); void deactivateStreamReplica(StreamReplica* replica); void removeStreamReplica(StreamReplica* replica); private: static void afterGettingFrame(void* clientData, unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime, unsigned durationInMicroseconds); void afterGettingFrame(unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime, unsigned durationInMicroseconds); static void onSourceClosure(void* clientData); void onSourceClosure(); void deliverReceivedFrame(); private: FramedSource* fInputSource; Boolean fDeleteWhenLastReplicaDies, fInputSourceHasClosed; unsigned fNumReplicas, fNumActiveReplicas, fNumDeliveriesMadeSoFar; int fFrameIndex; // 0 or 1; used to figure out if a replica is requesting the current frame, or the next frame StreamReplica* fMasterReplica; // the first replica that requests each frame. We use its buffer when copying to the others. StreamReplica* fReplicasAwaitingCurrentFrame; // other than the 'master' replica StreamReplica* fReplicasAwaitingNextFrame; // replicas that have already received the current frame, and have asked for the next }; #endif live/liveMedia/include/MediaSession.hh000444 001751 000000 00000036523 12265042432 020176 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A data structure that represents a session that consists of // potentially multiple (audio and/or video) sub-sessions // (This data structure is used for media *receivers* - i.e., clients. // For media streamers, use "ServerMediaSession" instead.) // C++ header /* NOTE: To support receiving your own custom RTP payload format, you must first define a new subclass of "MultiFramedRTPSource" (or "BasicUDPSource") that implements it. Then define your own subclass of "MediaSession" and "MediaSubsession", as follows: - In your subclass of "MediaSession" (named, for example, "myMediaSession"): - Define and implement your own static member function static myMediaSession* createNew(UsageEnvironment& env, char const* sdpDescription); and call this - instead of "MediaSession::createNew()" - in your application, when you create a new "MediaSession" object. - Reimplement the "createNewMediaSubsession()" virtual function, as follows: MediaSubsession* myMediaSession::createNewMediaSubsession() { return new myMediaSubsession(*this); } - In your subclass of "MediaSubsession" (named, for example, "myMediaSubsession"): - Reimplement the "createSourceObjects()" virtual function, perhaps similar to this: Boolean myMediaSubsession::createSourceObjects(int useSpecialRTPoffset) { if (strcmp(fCodecName, "X-MY-RTP-PAYLOAD-FORMAT") == 0) { // This subsession uses our custom RTP payload format: fReadSource = fRTPSource = myRTPPayloadFormatRTPSource::createNew( ); return True; } else { // This subsession uses some other RTP payload format - perhaps one that we already implement: return ::createSourceObjects(useSpecialRTPoffset); } } */ #ifndef _MEDIA_SESSION_HH #define _MEDIA_SESSION_HH #ifndef _RTCP_HH #include "RTCP.hh" #endif #ifndef _FRAMED_FILTER_HH #include "FramedFilter.hh" #endif class MediaSubsession; // forward class MediaSession: public Medium { public: static MediaSession* createNew(UsageEnvironment& env, char const* sdpDescription); static Boolean lookupByName(UsageEnvironment& env, char const* sourceName, MediaSession*& resultSession); Boolean hasSubsessions() const { return fSubsessionsHead != NULL; } char* connectionEndpointName() const { return fConnectionEndpointName; } char const* CNAME() const { return fCNAME; } struct in_addr const& sourceFilterAddr() const { return fSourceFilterAddr; } float& scale() { return fScale; } char* mediaSessionType() const { return fMediaSessionType; } char* sessionName() const { return fSessionName; } char* sessionDescription() const { return fSessionDescription; } char const* controlPath() const { return fControlPath; } double& playStartTime() { return fMaxPlayStartTime; } double& playEndTime() { return fMaxPlayEndTime; } char* absStartTime() const; char* absEndTime() const; // Used only to set the local fields: char*& _absStartTime() { return fAbsStartTime; } char*& _absEndTime() { return fAbsEndTime; } Boolean initiateByMediaType(char const* mimeType, MediaSubsession*& resultSubsession, int useSpecialRTPoffset = -1); // Initiates the first subsession with the specified MIME type // Returns the resulting subsession, or 'multi source' (not both) protected: // redefined virtual functions virtual Boolean isMediaSession() const; protected: MediaSession(UsageEnvironment& env); // called only by createNew(); virtual ~MediaSession(); virtual MediaSubsession* createNewMediaSubsession(); Boolean initializeWithSDP(char const* sdpDescription); Boolean parseSDPLine(char const* input, char const*& nextLine); Boolean parseSDPLine_s(char const* sdpLine); Boolean parseSDPLine_i(char const* sdpLine); Boolean parseSDPLine_c(char const* sdpLine); Boolean parseSDPAttribute_type(char const* sdpLine); Boolean parseSDPAttribute_control(char const* sdpLine); Boolean parseSDPAttribute_range(char const* sdpLine); Boolean parseSDPAttribute_source_filter(char const* sdpLine); static char* lookupPayloadFormat(unsigned char rtpPayloadType, unsigned& rtpTimestampFrequency, unsigned& numChannels); static unsigned guessRTPTimestampFrequency(char const* mediumName, char const* codecName); protected: friend class MediaSubsessionIterator; char* fCNAME; // used for RTCP // Linkage fields: MediaSubsession* fSubsessionsHead; MediaSubsession* fSubsessionsTail; // Fields set from a SDP description: char* fConnectionEndpointName; double fMaxPlayStartTime; double fMaxPlayEndTime; char* fAbsStartTime; char* fAbsEndTime; struct in_addr fSourceFilterAddr; // used for SSM float fScale; // set from a RTSP "Scale:" header char* fMediaSessionType; // holds a=type value char* fSessionName; // holds s= value char* fSessionDescription; // holds i= value char* fControlPath; // holds optional a=control: string }; class MediaSubsessionIterator { public: MediaSubsessionIterator(MediaSession const& session); virtual ~MediaSubsessionIterator(); MediaSubsession* next(); // NULL if none void reset(); private: MediaSession const& fOurSession; MediaSubsession* fNextPtr; }; class MediaSubsession { public: MediaSession& parentSession() { return fParent; } MediaSession const& parentSession() const { return fParent; } unsigned short clientPortNum() const { return fClientPortNum; } unsigned char rtpPayloadFormat() const { return fRTPPayloadFormat; } char const* savedSDPLines() const { return fSavedSDPLines; } char const* mediumName() const { return fMediumName; } char const* codecName() const { return fCodecName; } char const* protocolName() const { return fProtocolName; } char const* controlPath() const { return fControlPath; } Boolean isSSM() const { return fSourceFilterAddr.s_addr != 0; } unsigned short videoWidth() const { return fVideoWidth; } unsigned short videoHeight() const { return fVideoHeight; } unsigned videoFPS() const { return fVideoFPS; } unsigned numChannels() const { return fNumChannels; } float& scale() { return fScale; } RTPSource* rtpSource() { return fRTPSource; } RTCPInstance* rtcpInstance() { return fRTCPInstance; } unsigned rtpTimestampFrequency() const { return fRTPTimestampFrequency; } FramedSource* readSource() { return fReadSource; } // This is the source that client sinks read from. It is usually // (but not necessarily) the same as "rtpSource()" void addFilter(FramedFilter* filter); // Changes "readSource()" to "filter" (which must have just been created with "readSource()" as its input) double playStartTime() const; double playEndTime() const; char* absStartTime() const; char* absEndTime() const; // Used only to set the local fields: double& _playStartTime() { return fPlayStartTime; } double& _playEndTime() { return fPlayEndTime; } char*& _absStartTime() { return fAbsStartTime; } char*& _absEndTime() { return fAbsEndTime; } Boolean initiate(int useSpecialRTPoffset = -1); // Creates a "RTPSource" for this subsession. (Has no effect if it's // already been created.) Returns True iff this succeeds. void deInitiate(); // Destroys any previously created RTPSource Boolean setClientPortNum(unsigned short portNum); // Sets the preferred client port number that any "RTPSource" for // this subsession would use. (By default, the client port number // is gotten from the original SDP description, or - if the SDP // description does not specfy a client port number - an ephemeral // (even) port number is chosen.) This routine must *not* be // called after initiate(). void receiveRawMP3ADUs() { fReceiveRawMP3ADUs = True; } // optional hack for audio/MPA-ROBUST; must not be called after Initiate() void receiveRawJPEGFrames() { fReceiveRawJPEGFrames = True; } // optional hack for video/JPEG; must not be called after Initiate() char*& connectionEndpointName() { return fConnectionEndpointName; } char const* connectionEndpointName() const { return fConnectionEndpointName; } // 'Bandwidth' parameter, set in the "b=" SDP line: unsigned bandwidth() const { return fBandwidth; } // Various parameters set in "a=fmtp:" SDP lines: unsigned fmtp_auxiliarydatasizelength() const { return fAuxiliarydatasizelength; } unsigned fmtp_constantduration() const { return fConstantduration; } unsigned fmtp_constantsize() const { return fConstantsize; } unsigned fmtp_crc() const { return fCRC; } unsigned fmtp_ctsdeltalength() const { return fCtsdeltalength; } unsigned fmtp_de_interleavebuffersize() const { return fDe_interleavebuffersize; } unsigned fmtp_dtsdeltalength() const { return fDtsdeltalength; } unsigned fmtp_indexdeltalength() const { return fIndexdeltalength; } unsigned fmtp_indexlength() const { return fIndexlength; } unsigned fmtp_interleaving() const { return fInterleaving; } unsigned fmtp_maxdisplacement() const { return fMaxdisplacement; } unsigned fmtp_objecttype() const { return fObjecttype; } unsigned fmtp_octetalign() const { return fOctetalign; } unsigned fmtp_profile_level_id() const { return fProfile_level_id; } unsigned fmtp_robustsorting() const { return fRobustsorting; } unsigned fmtp_sizelength() const { return fSizelength; } unsigned fmtp_streamstateindication() const { return fStreamstateindication; } unsigned fmtp_streamtype() const { return fStreamtype; } Boolean fmtp_cpresent() const { return fCpresent; } Boolean fmtp_randomaccessindication() const { return fRandomaccessindication; } char const* fmtp_config() const { return fConfig; } char const* fmtp_configuration() const { return fmtp_config(); } char const* fmtp_mode() const { return fMode; } char const* fmtp_spropparametersets() const { return fSpropParameterSets; } char const* fmtp_emphasis() const { return fEmphasis; } char const* fmtp_channelorder() const { return fChannelOrder; } netAddressBits connectionEndpointAddress() const; // Converts "fConnectionEndpointName" to an address (or 0 if unknown) void setDestinations(netAddressBits defaultDestAddress); // Uses "fConnectionEndpointName" and "serverPortNum" to set // the destination address and port of the RTP and RTCP objects. // This is typically called by RTSP clients after doing "SETUP". char const* sessionId() const { return fSessionId; } void setSessionId(char const* sessionId); // Public fields that external callers can use to keep state. // (They are responsible for all storage management on these fields) unsigned short serverPortNum; // in host byte order (used by RTSP) unsigned char rtpChannelId, rtcpChannelId; // used by RTSP (for RTP/TCP) MediaSink* sink; // callers can use this to keep track of who's playing us void* miscPtr; // callers can use this for whatever they want // Parameters set from a RTSP "RTP-Info:" header: struct { u_int16_t seqNum; u_int32_t timestamp; Boolean infoIsNew; // not part of the RTSP header; instead, set whenever this struct is filled in } rtpInfo; double getNormalPlayTime(struct timeval const& presentationTime); // Computes the stream's "Normal Play Time" (NPT) from the given "presentationTime". // (For the definition of "Normal Play Time", see RFC 2326, section 3.6.) // This function is useful only if the "rtpInfo" structure was previously filled in // (e.g., by a "RTP-Info:" header in a RTSP response). // Also, for this function to work properly, the RTP stream's presentation times must (eventually) be // synchronized via RTCP. // (Note: If this function returns a negative number, then the result should be ignored by the caller.) protected: friend class MediaSession; friend class MediaSubsessionIterator; MediaSubsession(MediaSession& parent); virtual ~MediaSubsession(); UsageEnvironment& env() { return fParent.envir(); } void setNext(MediaSubsession* next) { fNext = next; } Boolean parseSDPLine_c(char const* sdpLine); Boolean parseSDPLine_b(char const* sdpLine); Boolean parseSDPAttribute_rtpmap(char const* sdpLine); Boolean parseSDPAttribute_control(char const* sdpLine); Boolean parseSDPAttribute_range(char const* sdpLine); Boolean parseSDPAttribute_fmtp(char const* sdpLine); Boolean parseSDPAttribute_source_filter(char const* sdpLine); Boolean parseSDPAttribute_x_dimensions(char const* sdpLine); Boolean parseSDPAttribute_framerate(char const* sdpLine); virtual Boolean createSourceObjects(int useSpecialRTPoffset); // create "fRTPSource" and "fReadSource" member objects, after we've been initialized via SDP protected: // Linkage fields: MediaSession& fParent; MediaSubsession* fNext; // Fields set from a SDP description: char* fConnectionEndpointName; // may also be set by RTSP SETUP response unsigned short fClientPortNum; // in host byte order // This field is also set by initiate() unsigned char fRTPPayloadFormat; char* fSavedSDPLines; char* fMediumName; char* fCodecName; char* fProtocolName; unsigned fRTPTimestampFrequency; char* fControlPath; // holds optional a=control: string struct in_addr fSourceFilterAddr; // used for SSM unsigned fBandwidth; // in kilobits-per-second, from b= line // Parameters set by "a=fmtp:" SDP lines: unsigned fAuxiliarydatasizelength, fConstantduration, fConstantsize; unsigned fCRC, fCtsdeltalength, fDe_interleavebuffersize, fDtsdeltalength; unsigned fIndexdeltalength, fIndexlength, fInterleaving; unsigned fMaxdisplacement, fObjecttype; unsigned fOctetalign, fProfile_level_id, fRobustsorting; unsigned fSizelength, fStreamstateindication, fStreamtype; Boolean fCpresent, fRandomaccessindication; char *fConfig, *fMode, *fSpropParameterSets, *fEmphasis, *fChannelOrder; double fPlayStartTime; double fPlayEndTime; char* fAbsStartTime; char* fAbsEndTime; unsigned short fVideoWidth, fVideoHeight; // screen dimensions (set by an optional a=x-dimensions: , line) unsigned fVideoFPS; // frame rate (set by an optional "a=framerate: " or "a=x-framerate: " line) unsigned fNumChannels; // optionally set by "a=rtpmap:" lines for audio sessions. Default: 1 float fScale; // set from a RTSP "Scale:" header double fNPT_PTS_Offset; // set by "getNormalPlayTime()"; add this to a PTS to get NPT // Fields set or used by initiate(): Groupsock* fRTPSocket; Groupsock* fRTCPSocket; // works even for unicast RTPSource* fRTPSource; RTCPInstance* fRTCPInstance; FramedSource* fReadSource; Boolean fReceiveRawMP3ADUs, fReceiveRawJPEGFrames; // Other fields: char* fSessionId; // used by RTSP }; #endif live/liveMedia/include/TextRTPSink.hh000444 001751 000000 00000002655 12265042432 017751 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A generic RTP sink for text codecs (abstract base class) // C++ header #ifndef _TEXT_RTP_SINK_HH #define _TEXT_RTP_SINK_HH #ifndef _MULTI_FRAMED_RTP_SINK_HH #include "MultiFramedRTPSink.hh" #endif class TextRTPSink: public MultiFramedRTPSink { protected: TextRTPSink(UsageEnvironment& env, Groupsock* rtpgs, unsigned char rtpPayloadType, unsigned rtpTimestampFrequency, char const* rtpPayloadFormatName); // (we're an abstract base class) virtual ~TextRTPSink(); private: // redefined virtual functions: virtual char const* sdpMediaType() const; }; #endif live/liveMedia/include/RTPSource.hh000444 001751 000000 00000021172 12265042432 017433 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // RTP Sources // C++ header #ifndef _RTP_SOURCE_HH #define _RTP_SOURCE_HH #ifndef _FRAMED_SOURCE_HH #include "FramedSource.hh" #endif #ifndef _RTP_INTERFACE_HH #include "RTPInterface.hh" #endif class RTPReceptionStatsDB; // forward class RTPSource: public FramedSource { public: static Boolean lookupByName(UsageEnvironment& env, char const* sourceName, RTPSource*& resultSource); Boolean curPacketMarkerBit() const { return fCurPacketMarkerBit; } unsigned char rtpPayloadFormat() const { return fRTPPayloadFormat; } virtual Boolean hasBeenSynchronizedUsingRTCP(); Groupsock* RTPgs() const { return fRTPInterface.gs(); } virtual void setPacketReorderingThresholdTime(unsigned uSeconds) = 0; // used by RTCP: u_int32_t SSRC() const { return fSSRC; } // Note: This is *our* SSRC, not the SSRC in incoming RTP packets. // later need a means of changing the SSRC if there's a collision ##### unsigned timestampFrequency() const {return fTimestampFrequency;} RTPReceptionStatsDB& receptionStatsDB() const { return *fReceptionStatsDB; } u_int32_t lastReceivedSSRC() const { return fLastReceivedSSRC; } // Note: This is the SSRC in the most recently received RTP packet; not *our* SSRC Boolean& enableRTCPReports() { return fEnableRTCPReports; } Boolean const& enableRTCPReports() const { return fEnableRTCPReports; } void setStreamSocket(int sockNum, unsigned char streamChannelId) { // hack to allow sending RTP over TCP (RFC 2236, section 10.12) fRTPInterface.setStreamSocket(sockNum, streamChannelId); } void setAuxilliaryReadHandler(AuxHandlerFunc* handlerFunc, void* handlerClientData) { fRTPInterface.setAuxilliaryReadHandler(handlerFunc, handlerClientData); } // Note that RTP receivers will usually not need to call either of the following two functions, because // RTP sequence numbers and timestamps are usually not useful to receivers. // (Our implementation of RTP reception already does all needed handling of RTP sequence numbers and timestamps.) u_int16_t curPacketRTPSeqNum() const { return fCurPacketRTPSeqNum; } private: friend class MediaSubsession; // "MediaSubsession" is the only outside class that ever needs to see RTP timestamps! u_int32_t curPacketRTPTimestamp() const { return fCurPacketRTPTimestamp; } protected: RTPSource(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, u_int32_t rtpTimestampFrequency); // abstract base class virtual ~RTPSource(); protected: RTPInterface fRTPInterface; u_int16_t fCurPacketRTPSeqNum; u_int32_t fCurPacketRTPTimestamp; Boolean fCurPacketMarkerBit; Boolean fCurPacketHasBeenSynchronizedUsingRTCP; u_int32_t fLastReceivedSSRC; private: // redefined virtual functions: virtual Boolean isRTPSource() const; virtual void getAttributes() const; private: unsigned char fRTPPayloadFormat; unsigned fTimestampFrequency; u_int32_t fSSRC; Boolean fEnableRTCPReports; // whether RTCP "RR" reports should be sent for this source (default: True) RTPReceptionStatsDB* fReceptionStatsDB; }; class RTPReceptionStats; // forward class RTPReceptionStatsDB { public: unsigned totNumPacketsReceived() const { return fTotNumPacketsReceived; } unsigned numActiveSourcesSinceLastReset() const { return fNumActiveSourcesSinceLastReset; } void reset(); // resets periodic stats (called each time they're used to // generate a reception report) class Iterator { public: Iterator(RTPReceptionStatsDB& receptionStatsDB); virtual ~Iterator(); RTPReceptionStats* next(Boolean includeInactiveSources = False); // NULL if none private: HashTable::Iterator* fIter; }; // The following is called whenever a RTP packet is received: void noteIncomingPacket(u_int32_t SSRC, u_int16_t seqNum, u_int32_t rtpTimestamp, unsigned timestampFrequency, Boolean useForJitterCalculation, struct timeval& resultPresentationTime, Boolean& resultHasBeenSyncedUsingRTCP, unsigned packetSize /* payload only */); // The following is called whenever a RTCP SR packet is received: void noteIncomingSR(u_int32_t SSRC, u_int32_t ntpTimestampMSW, u_int32_t ntpTimestampLSW, u_int32_t rtpTimestamp); // The following is called when a RTCP BYE packet is received: void removeRecord(u_int32_t SSRC); RTPReceptionStats* lookup(u_int32_t SSRC) const; protected: // constructor and destructor, called only by RTPSource: friend class RTPSource; RTPReceptionStatsDB(); virtual ~RTPReceptionStatsDB(); protected: void add(u_int32_t SSRC, RTPReceptionStats* stats); protected: friend class Iterator; unsigned fNumActiveSourcesSinceLastReset; private: HashTable* fTable; unsigned fTotNumPacketsReceived; // for all SSRCs }; class RTPReceptionStats { public: u_int32_t SSRC() const { return fSSRC; } unsigned numPacketsReceivedSinceLastReset() const { return fNumPacketsReceivedSinceLastReset; } unsigned totNumPacketsReceived() const { return fTotNumPacketsReceived; } double totNumKBytesReceived() const; unsigned totNumPacketsExpected() const { return (fHighestExtSeqNumReceived - fBaseExtSeqNumReceived) + 1; } unsigned baseExtSeqNumReceived() const { return fBaseExtSeqNumReceived; } unsigned lastResetExtSeqNumReceived() const { return fLastResetExtSeqNumReceived; } unsigned highestExtSeqNumReceived() const { return fHighestExtSeqNumReceived; } unsigned jitter() const; unsigned lastReceivedSR_NTPmsw() const { return fLastReceivedSR_NTPmsw; } unsigned lastReceivedSR_NTPlsw() const { return fLastReceivedSR_NTPlsw; } struct timeval const& lastReceivedSR_time() const { return fLastReceivedSR_time; } unsigned minInterPacketGapUS() const { return fMinInterPacketGapUS; } unsigned maxInterPacketGapUS() const { return fMaxInterPacketGapUS; } struct timeval const& totalInterPacketGaps() const { return fTotalInterPacketGaps; } protected: // called only by RTPReceptionStatsDB: friend class RTPReceptionStatsDB; RTPReceptionStats(u_int32_t SSRC, u_int16_t initialSeqNum); RTPReceptionStats(u_int32_t SSRC); virtual ~RTPReceptionStats(); private: void noteIncomingPacket(u_int16_t seqNum, u_int32_t rtpTimestamp, unsigned timestampFrequency, Boolean useForJitterCalculation, struct timeval& resultPresentationTime, Boolean& resultHasBeenSyncedUsingRTCP, unsigned packetSize /* payload only */); void noteIncomingSR(u_int32_t ntpTimestampMSW, u_int32_t ntpTimestampLSW, u_int32_t rtpTimestamp); void init(u_int32_t SSRC); void initSeqNum(u_int16_t initialSeqNum); void reset(); // resets periodic stats (called each time they're used to // generate a reception report) protected: u_int32_t fSSRC; unsigned fNumPacketsReceivedSinceLastReset; unsigned fTotNumPacketsReceived; u_int32_t fTotBytesReceived_hi, fTotBytesReceived_lo; Boolean fHaveSeenInitialSequenceNumber; unsigned fBaseExtSeqNumReceived; unsigned fLastResetExtSeqNumReceived; unsigned fHighestExtSeqNumReceived; int fLastTransit; // used in the jitter calculation u_int32_t fPreviousPacketRTPTimestamp; double fJitter; // The following are recorded whenever we receive a RTCP SR for this SSRC: unsigned fLastReceivedSR_NTPmsw; // NTP timestamp (from SR), most-signif unsigned fLastReceivedSR_NTPlsw; // NTP timestamp (from SR), least-signif struct timeval fLastReceivedSR_time; struct timeval fLastPacketReceptionTime; unsigned fMinInterPacketGapUS, fMaxInterPacketGapUS; struct timeval fTotalInterPacketGaps; private: // Used to convert from RTP timestamp to 'wall clock' time: Boolean fHasBeenSynchronized; u_int32_t fSyncTimestamp; struct timeval fSyncTime; }; Boolean seqNumLT(u_int16_t s1, u_int16_t s2); // a 'less-than' on 16-bit sequence numbers #endif live/liveMedia/include/RTSPServer.hh000444 001751 000000 00000050602 12265042432 017564 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A RTSP server // C++ header #ifndef _RTSP_SERVER_HH #define _RTSP_SERVER_HH #ifndef _SERVER_MEDIA_SESSION_HH #include "ServerMediaSession.hh" #endif #ifndef _NET_ADDRESS_HH #include #endif #ifndef _DIGEST_AUTHENTICATION_HH #include "DigestAuthentication.hh" #endif // A data structure used for optional user/password authentication: class UserAuthenticationDatabase { public: UserAuthenticationDatabase(char const* realm = NULL, Boolean passwordsAreMD5 = False); // If "passwordsAreMD5" is True, then each password stored into, or removed from, // the database is actually the value computed // by md5(::) virtual ~UserAuthenticationDatabase(); virtual void addUserRecord(char const* username, char const* password); virtual void removeUserRecord(char const* username); virtual char const* lookupPassword(char const* username); // returns NULL if the user name was not present char const* realm() { return fRealm; } Boolean passwordsAreMD5() { return fPasswordsAreMD5; } protected: HashTable* fTable; char* fRealm; Boolean fPasswordsAreMD5; }; #ifndef RTSP_BUFFER_SIZE #define RTSP_BUFFER_SIZE 10000 // for incoming requests, and outgoing responses #endif class RTSPServer: public Medium { public: static RTSPServer* createNew(UsageEnvironment& env, Port ourPort = 554, UserAuthenticationDatabase* authDatabase = NULL, unsigned reclamationTestSeconds = 65); // If ourPort.num() == 0, we'll choose the port number // Note: The caller is responsible for reclaiming "authDatabase" // If "reclamationTestSeconds" > 0, then the "RTSPClientSession" state for // each client will get reclaimed (and the corresponding RTP stream(s) // torn down) if no RTSP commands - or RTCP "RR" packets - from the // client are received in at least "reclamationTestSeconds" seconds. static Boolean lookupByName(UsageEnvironment& env, char const* name, RTSPServer*& resultServer); void addServerMediaSession(ServerMediaSession* serverMediaSession); virtual ServerMediaSession* lookupServerMediaSession(char const* streamName); void removeServerMediaSession(ServerMediaSession* serverMediaSession); // Removes the "ServerMediaSession" object from our lookup table, so it will no longer be accessible by new RTSP clients. // (However, any *existing* RTSP client sessions that use this "ServerMediaSession" object will continue streaming. // The "ServerMediaSession" object will not get deleted until all of these RTSP client sessions have closed.) // (To both delete the "ServerMediaSession" object *and* close all RTSP client sessions that use it, // call "deleteServerMediaSession(serverMediaSession)" instead.) void removeServerMediaSession(char const* streamName); // ditto void closeAllClientSessionsForServerMediaSession(ServerMediaSession* serverMediaSession); // Closes (from the server) all RTSP client sessions that are currently using this "ServerMediaSession" object. // Note, however, that the "ServerMediaSession" object remains accessible by new RTSP clients. void closeAllClientSessionsForServerMediaSession(char const* streamName); // ditto void deleteServerMediaSession(ServerMediaSession* serverMediaSession); // Equivalent to: // "closeAllClientSessionsForServerMediaSession(serverMediaSession); removeServerMediaSession(serverMediaSession);" void deleteServerMediaSession(char const* streamName); // Equivalent to: // "closeAllClientSessionsForServerMediaSession(streamName); removeServerMediaSession(streamName); typedef void (responseHandlerForREGISTER)(RTSPServer* rtspServer, unsigned requestId, int resultCode, char* resultString); unsigned registerStream(ServerMediaSession* serverMediaSession, char const* remoteClientNameOrAddress, portNumBits remoteClientPortNum, responseHandlerForREGISTER* responseHandler, char const* username = NULL, char const* password = NULL, Boolean receiveOurStreamViaTCP = False, char const* proxyURLSuffix = NULL); // 'Register' the stream represented by "serverMediaSession" with the given remote client (specifed by name and port number). // This is done using our custom "REGISTER" RTSP command. // The function returns a unique number that can be used to identify the request; this number is also passed to "responseHandler". // When a response is received from the remote client (or the "REGISTER" request fails), the specified response handler // (if non-NULL) is called. (Note that the "resultString" passed to the handler was dynamically allocated, // and should be delete[]d by the handler after use.) // If "receiveOurStreamViaTCP" is True, then we're requesting that the remote client access our stream using RTP/RTCP-over-TCP. // (Otherwise, the remote client may choose regular RTP/RTCP-over-UDP streaming.) // "proxyURLSuffix" (optional) is used only when the remote client is also a proxy server. // It tells the proxy server the suffix that it should use in its "rtsp://" URL (when front-end clients access the stream) char* rtspURL(ServerMediaSession const* serverMediaSession, int clientSocket = -1) const; // returns a "rtsp://" URL that could be used to access the // specified session (which must already have been added to // us using "addServerMediaSession()". // This string is dynamically allocated; caller should delete[] // (If "clientSocket" is non-negative, then it is used (by calling "getsockname()") to determine // the IP address to be used in the URL.) char* rtspURLPrefix(int clientSocket = -1) const; // like "rtspURL()", except that it returns just the common prefix used by // each session's "rtsp://" URL. // This string is dynamically allocated; caller should delete[] UserAuthenticationDatabase* setAuthenticationDatabase(UserAuthenticationDatabase* newDB); // Changes the server's authentication database to "newDB", returning a pointer to the old database (if there was one). // "newDB" may be NULL (you can use this to disable authentication at runtime, if desired). Boolean setUpTunnelingOverHTTP(Port httpPort); // (Attempts to) enable RTSP-over-HTTP tunneling on the specified port. // Returns True iff the specified port can be used in this way (i.e., it's not already being used for a separate HTTP server). // Note: RTSP-over-HTTP tunneling is described in http://developer.apple.com/quicktime/icefloe/dispatch028.html portNumBits httpServerPortNum() const; // in host byte order. (Returns 0 if not present.) protected: RTSPServer(UsageEnvironment& env, int ourSocket, Port ourPort, UserAuthenticationDatabase* authDatabase, unsigned reclamationTestSeconds); // called only by createNew(); virtual ~RTSPServer(); static int setUpOurSocket(UsageEnvironment& env, Port& ourPort); virtual char const* allowedCommandNames(); // used to implement "RTSPClientConnection::handleCmd_OPTIONS()" virtual Boolean weImplementREGISTER(char const* proxyURLSuffix, char*& responseStr); // used to implement "RTSPClientConnection::handleCmd_REGISTER()" // Note: "responseStr" is dynamically allocated (or NULL), and should be delete[]d after the call virtual void implementCmd_REGISTER(char const* url, char const* urlSuffix, int socketToRemoteServer, Boolean deliverViaTCP, char const* proxyURLSuffix); // used to implement "RTSPClientConnection::handleCmd_REGISTER()" virtual UserAuthenticationDatabase* getAuthenticationDatabaseForCommand(char const* cmdName); virtual Boolean specialClientAccessCheck(int clientSocket, struct sockaddr_in& clientAddr, char const* urlSuffix); // a hook that allows subclassed servers to do server-specific access checking // on each client (e.g., based on client IP address), without using digest authentication. virtual Boolean specialClientUserAccessCheck(int clientSocket, struct sockaddr_in& clientAddr, char const* urlSuffix, char const *username); // another hook that allows subclassed servers to do server-specific access checking // - this time after normal digest authentication has already taken place (and would otherwise allow access). // (This test can only be used to further restrict access, not to grant additional access.) private: // redefined virtual functions virtual Boolean isRTSPServer() const; public: // should be protected, but some old compilers complain otherwise class RTSPClientSession; // forward // The state of a TCP connection used by a RTSP client: class RTSPClientConnection { public: RTSPClientConnection(RTSPServer& ourServer, int clientSocket, struct sockaddr_in clientAddr); virtual ~RTSPClientConnection(); // A data structure that's used to implement the "REGISTER" command: class ParamsForREGISTER { public: ParamsForREGISTER(RTSPClientConnection* ourConnection, char const* url, char const* urlSuffix, Boolean reuseConnection, Boolean deliverViaTCP, char const* proxyURLSuffix); virtual ~ParamsForREGISTER(); private: friend class RTSPClientConnection; RTSPClientConnection* fOurConnection; char* fURL; char* fURLSuffix; Boolean fReuseConnection, fDeliverViaTCP; char* fProxyURLSuffix; }; protected: friend class RTSPClientSession; // Make the handler functions for each command virtual, to allow subclasses to reimplement them, if necessary: virtual void handleCmd_OPTIONS(); // You probably won't need to subclass/reimplement this function; reimplement "RTSPServer::allowedCommandNames()" instead. virtual void handleCmd_GET_PARAMETER(char const* fullRequestStr); // when operating on the entire server virtual void handleCmd_SET_PARAMETER(char const* fullRequestStr); // when operating on the entire server virtual void handleCmd_DESCRIBE(char const* urlPreSuffix, char const* urlSuffix, char const* fullRequestStr); virtual void handleCmd_REGISTER(char const* url, char const* urlSuffix, char const* fullRequestStr, Boolean reuseConnection, Boolean deliverViaTCP, char const* proxyURLSuffix); // You probably won't need to subclass/reimplement this function; // reimplement "RTSPServer::weImplementREGISTER()" and "RTSPServer::implementCmd_REGISTER()" instead. virtual void handleCmd_bad(); virtual void handleCmd_notSupported(); virtual void handleCmd_notFound(); virtual void handleCmd_sessionNotFound(); virtual void handleCmd_unsupportedTransport(); // Support for optional RTSP-over-HTTP tunneling: virtual Boolean parseHTTPRequestString(char* resultCmdName, unsigned resultCmdNameMaxSize, char* urlSuffix, unsigned urlSuffixMaxSize, char* sessionCookie, unsigned sessionCookieMaxSize, char* acceptStr, unsigned acceptStrMaxSize); virtual void handleHTTPCmd_notSupported(); virtual void handleHTTPCmd_notFound(); virtual void handleHTTPCmd_OPTIONS(); virtual void handleHTTPCmd_TunnelingGET(char const* sessionCookie); virtual Boolean handleHTTPCmd_TunnelingPOST(char const* sessionCookie, unsigned char const* extraData, unsigned extraDataSize); virtual void handleHTTPCmd_StreamingGET(char const* urlSuffix, char const* fullRequestStr); protected: UsageEnvironment& envir() { return fOurServer.envir(); } void resetRequestBuffer(); void closeSockets(); static void incomingRequestHandler(void*, int /*mask*/); void incomingRequestHandler1(); static void handleAlternativeRequestByte(void*, u_int8_t requestByte); void handleAlternativeRequestByte1(u_int8_t requestByte); void handleRequestBytes(int newBytesRead); Boolean authenticationOK(char const* cmdName, char const* urlSuffix, char const* fullRequestStr); void changeClientInputSocket(int newSocketNum, unsigned char const* extraData, unsigned extraDataSize); // used to implement RTSP-over-HTTP tunneling static void continueHandlingREGISTER(ParamsForREGISTER* params); virtual void continueHandlingREGISTER1(ParamsForREGISTER* params); // Shortcuts for setting up a RTSP response (prior to sending it): void setRTSPResponse(char const* responseStr); void setRTSPResponse(char const* responseStr, u_int32_t sessionId); void setRTSPResponse(char const* responseStr, char const* contentStr); void setRTSPResponse(char const* responseStr, u_int32_t sessionId, char const* contentStr); RTSPServer& fOurServer; Boolean fIsActive; int fClientInputSocket, fClientOutputSocket; struct sockaddr_in fClientAddr; unsigned char fRequestBuffer[RTSP_BUFFER_SIZE]; unsigned fRequestBytesAlreadySeen, fRequestBufferBytesLeft; unsigned char* fLastCRLF; unsigned char fResponseBuffer[RTSP_BUFFER_SIZE]; unsigned fRecursionCount; char const* fCurrentCSeq; Authenticator fCurrentAuthenticator; // used if access control is needed char* fOurSessionCookie; // used for optional RTSP-over-HTTP tunneling unsigned fBase64RemainderCount; // used for optional RTSP-over-HTTP tunneling (possible values: 0,1,2,3) }; // The state of an individual client session (using one or more sequential TCP connections) handled by a RTSP server: class RTSPClientSession { public: RTSPClientSession(RTSPServer& ourServer, u_int32_t sessionId); virtual ~RTSPClientSession(); protected: friend class RTSPServer; friend class RTSPClientConnection; // Make the handler functions for each command virtual, to allow subclasses to redefine them: virtual void handleCmd_SETUP(RTSPClientConnection* ourClientConnection, char const* urlPreSuffix, char const* urlSuffix, char const* fullRequestStr); virtual void handleCmd_withinSession(RTSPClientConnection* ourClientConnection, char const* cmdName, char const* urlPreSuffix, char const* urlSuffix, char const* fullRequestStr); virtual void handleCmd_TEARDOWN(RTSPClientConnection* ourClientConnection, ServerMediaSubsession* subsession); virtual void handleCmd_PLAY(RTSPClientConnection* ourClientConnection, ServerMediaSubsession* subsession, char const* fullRequestStr); virtual void handleCmd_PAUSE(RTSPClientConnection* ourClientConnection, ServerMediaSubsession* subsession); virtual void handleCmd_GET_PARAMETER(RTSPClientConnection* ourClientConnection, ServerMediaSubsession* subsession, char const* fullRequestStr); virtual void handleCmd_SET_PARAMETER(RTSPClientConnection* ourClientConnection, ServerMediaSubsession* subsession, char const* fullRequestStr); protected: UsageEnvironment& envir() { return fOurServer.envir(); } void reclaimStreamStates(); Boolean isMulticast() const { return fIsMulticast; } void noteLiveness(); static void noteClientLiveness(RTSPClientSession* clientSession); static void livenessTimeoutTask(RTSPClientSession* clientSession); // Shortcuts for setting up a RTSP response (prior to sending it): void setRTSPResponse(RTSPClientConnection* ourClientConnection, char const* responseStr) { ourClientConnection->setRTSPResponse(responseStr); } void setRTSPResponse(RTSPClientConnection* ourClientConnection, char const* responseStr, u_int32_t sessionId) { ourClientConnection->setRTSPResponse(responseStr, sessionId); } void setRTSPResponse(RTSPClientConnection* ourClientConnection, char const* responseStr, char const* contentStr) { ourClientConnection->setRTSPResponse(responseStr, contentStr); } void setRTSPResponse(RTSPClientConnection* ourClientConnection, char const* responseStr, u_int32_t sessionId, char const* contentStr) { ourClientConnection->setRTSPResponse(responseStr, sessionId, contentStr); } protected: RTSPServer& fOurServer; u_int32_t fOurSessionId; ServerMediaSession* fOurServerMediaSession; Boolean fIsMulticast, fStreamAfterSETUP; unsigned char fTCPStreamIdCount; // used for (optional) RTP/TCP Boolean usesTCPTransport() const { return fTCPStreamIdCount > 0; } TaskToken fLivenessCheckTask; unsigned fNumStreamStates; struct streamState { ServerMediaSubsession* subsession; void* streamToken; } * fStreamStates; }; protected: // If you subclass "RTSPClientConnection", then you must also redefine this virtual function in order // to create new objects of your subclass: virtual RTSPClientConnection* createNewClientConnection(int clientSocket, struct sockaddr_in clientAddr); // If you subclass "RTSPClientSession", then you must also redefine this virtual function in order // to create new objects of your subclass: virtual RTSPClientSession* createNewClientSession(u_int32_t sessionId); // An iterator over our "ServerMediaSession" objects: class ServerMediaSessionIterator { public: ServerMediaSessionIterator(RTSPServer& server); virtual ~ServerMediaSessionIterator(); ServerMediaSession* next(); private: HashTable::Iterator* fOurIterator; }; private: static void incomingConnectionHandlerRTSP(void*, int /*mask*/); void incomingConnectionHandlerRTSP1(); static void incomingConnectionHandlerHTTP(void*, int /*mask*/); void incomingConnectionHandlerHTTP1(); void incomingConnectionHandler(int serverSocket); protected: Port fRTSPServerPort; private: friend class RTSPClientConnection; friend class RTSPClientSession; friend class ServerMediaSessionIterator; friend class RegisterRequestRecord; int fRTSPServerSocket; int fHTTPServerSocket; // for optional RTSP-over-HTTP tunneling Port fHTTPServerPort; // ditto HashTable* fServerMediaSessions; // maps 'stream name' strings to "ServerMediaSession" objects HashTable* fClientConnections; // the "ClientConnection" objects that we're using HashTable* fClientConnectionsForHTTPTunneling; // maps client-supplied 'session cookie' strings to "RTSPClientConnection"s // (used only for optional RTSP-over-HTTP tunneling) HashTable* fClientSessions; // maps 'session id' strings to "RTSPClientSession" objects HashTable* fPendingRegisterRequests; unsigned fRegisterRequestCounter; UserAuthenticationDatabase* fAuthDB; unsigned fReclamationTestSeconds; }; ////////// A subclass of "RTSPServer" that implements the "REGISTER" command to set up proxying on the specified URL ////////// class RTSPServerWithREGISTERProxying: public RTSPServer { public: static RTSPServerWithREGISTERProxying* createNew(UsageEnvironment& env, Port ourPort = 554, UserAuthenticationDatabase* authDatabase = NULL, UserAuthenticationDatabase* authDatabaseForREGISTER = NULL, unsigned reclamationTestSeconds = 65, Boolean streamRTPOverTCP = False, int verbosityLevelForProxying = 0); protected: RTSPServerWithREGISTERProxying(UsageEnvironment& env, int ourSocket, Port ourPort, UserAuthenticationDatabase* authDatabase, UserAuthenticationDatabase* authDatabaseForREGISTER, unsigned reclamationTestSeconds, Boolean streamRTPOverTCP, int verbosityLevelForProxying); // called only by createNew(); virtual ~RTSPServerWithREGISTERProxying(); protected: // redefined virtual functions virtual char const* allowedCommandNames(); virtual Boolean weImplementREGISTER(char const* proxyURLSuffix, char*& responseStr); virtual void implementCmd_REGISTER(char const* url, char const* urlSuffix, int socketToRemoteServer, Boolean deliverViaTCP, char const* proxyURLSuffix); virtual UserAuthenticationDatabase* getAuthenticationDatabaseForCommand(char const* cmdName); private: Boolean fStreamRTPOverTCP; int fVerbosityLevelForProxying; unsigned fRegisteredProxyCounter; char* fAllowedCommandNames; UserAuthenticationDatabase* fAuthDBForREGISTER; }; #endif live/liveMedia/include/SimpleRTPSource.hh000444 001751 000000 00000004765 12265042432 020616 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A RTP source for a simple RTP payload format that // - doesn't have any special headers following the RTP header // (if necessary, the "offset" parameter can be used to specify a // special header that we just skip over) // - doesn't have any special framing apart from the packet data itself // C++ header #ifndef _SIMPLE_RTP_SOURCE_HH #define _SIMPLE_RTP_SOURCE_HH #ifndef _MULTI_FRAMED_RTP_SOURCE_HH #include "MultiFramedRTPSource.hh" #endif class SimpleRTPSource: public MultiFramedRTPSource { public: static SimpleRTPSource* createNew(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, unsigned rtpTimestampFrequency, char const* mimeTypeString, unsigned offset = 0, Boolean doNormalMBitRule = True); // "doNormalMBitRule" means: If the medium is not audio, use the RTP "M" // bit on each incoming packet to indicate the last (or only) fragment // of a frame. Otherwise (i.e., if "doNormalMBitRule" is False, or the medium is "audio"), the "M" bit is ignored. protected: virtual ~SimpleRTPSource(); protected: SimpleRTPSource(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, unsigned rtpTimestampFrequency, char const* mimeTypeString, unsigned offset, Boolean doNormalMBitRule); // called only by createNew() private: // redefined virtual functions: virtual Boolean processSpecialHeader(BufferedPacket* packet, unsigned& resultSpecialHeaderSize); virtual char const* MIMEtype() const; private: char const* fMIMEtypeString; unsigned fOffset; Boolean fUseMBitForFrameEnd; }; #endif live/liveMedia/include/ByteStreamMultiFileSource.hh000444 001751 000000 00000004542 12265042432 022662 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A source that consists of multiple byte-stream files, read sequentially // C++ header #ifndef _BYTE_STREAM_MULTI_FILE_SOURCE_HH #define _BYTE_STREAM_MULTI_FILE_SOURCE_HH #ifndef _BYTE_STREAM_FILE_SOURCE_HH #include "ByteStreamFileSource.hh" #endif class ByteStreamMultiFileSource: public FramedSource { public: static ByteStreamMultiFileSource* createNew(UsageEnvironment& env, char const** fileNameArray, unsigned preferredFrameSize = 0, unsigned playTimePerFrame = 0); // A 'filename' of NULL indicates the end of the array Boolean haveStartedNewFile() const { return fHaveStartedNewFile; } // True iff the most recently delivered frame was the first from a newly-opened file protected: ByteStreamMultiFileSource(UsageEnvironment& env, char const** fileNameArray, unsigned preferredFrameSize, unsigned playTimePerFrame); // called only by createNew() virtual ~ByteStreamMultiFileSource(); private: // redefined virtual functions: virtual void doGetNextFrame(); private: static void onSourceClosure(void* clientData); void onSourceClosure1(); static void afterGettingFrame(void* clientData, unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime, unsigned durationInMicroseconds); private: unsigned fPreferredFrameSize; unsigned fPlayTimePerFrame; unsigned fNumSources; unsigned fCurrentlyReadSourceNumber; Boolean fHaveStartedNewFile; char const** fFileNameArray; ByteStreamFileSource** fSourceArray; }; #endif live/liveMedia/include/BasicUDPSource.hh000444 001751 000000 00000003273 12265042432 020362 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A simple UDP source, where every UDP payload is a complete frame // C++ header #ifndef _BASIC_UDP_SOURCE_HH #define _BASIC_UDP_SOURCE_HH #ifndef _FRAMED_SOURCE_HH #include "FramedSource.hh" #endif #ifndef _GROUPSOCK_HH #include "Groupsock.hh" #endif class BasicUDPSource: public FramedSource { public: static BasicUDPSource* createNew(UsageEnvironment& env, Groupsock* inputGS); virtual ~BasicUDPSource(); Groupsock* gs() const { return fInputGS; } private: BasicUDPSource(UsageEnvironment& env, Groupsock* inputGS); // called only by createNew() static void incomingPacketHandler(BasicUDPSource* source, int mask); void incomingPacketHandler1(); private: // redefined virtual functions: virtual void doGetNextFrame(); virtual void doStopGettingFrames(); private: Groupsock* fInputGS; Boolean fHaveStartedReading; }; #endif live/liveMedia/include/GSMAudioRTPSink.hh000444 001751 000000 00000002673 12265042432 020435 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // RTP sink for GSM audio // C++ header #ifndef _GSM_AUDIO_RTP_SINK_HH #define _GSM_AUDIO_RTP_SINK_HH #ifndef _AUDIO_RTP_SINK_HH #include "AudioRTPSink.hh" #endif class GSMAudioRTPSink: public AudioRTPSink { public: static GSMAudioRTPSink* createNew(UsageEnvironment& env, Groupsock* RTPgs); protected: GSMAudioRTPSink(UsageEnvironment& env, Groupsock* RTPgs); // called only by createNew() virtual ~GSMAudioRTPSink(); private: // redefined virtual functions: virtual Boolean frameCanAppearAfterPacketStart(unsigned char const* frameStart, unsigned numBytesInFrame) const; }; #endif live/liveMedia/include/H263plusVideoRTPSink.hh000444 001751 000000 00000004023 12265042432 021331 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // RTP sink for H.263+ video (RFC 4629) // C++ header #ifndef _H263_PLUS_VIDEO_RTP_SINK_HH #define _H263_PLUS_VIDEO_RTP_SINK_HH #ifndef _VIDEO_RTP_SINK_HH #include "VideoRTPSink.hh" #endif class H263plusVideoRTPSink: public VideoRTPSink { public: static H263plusVideoRTPSink* createNew(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, u_int32_t rtpTimestampFrequency = 90000); protected: H263plusVideoRTPSink(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, u_int32_t rtpTimestampFrequency); // called only by createNew() virtual ~H263plusVideoRTPSink(); private: // redefined virtual functions: virtual void doSpecialFrameHandling(unsigned fragmentationOffset, unsigned char* frameStart, unsigned numBytesInFrame, struct timeval framePresentationTime, unsigned numRemainingBytes); virtual Boolean frameCanAppearAfterPacketStart(unsigned char const* frameStart, unsigned numBytesInFrame) const; virtual unsigned specialHeaderSize() const; }; #endif live/liveMedia/include/SimpleRTPSink.hh000444 001751 000000 00000005730 12265042432 020253 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A simple RTP sink that packs frames into each outgoing // packet, without any fragmentation or special headers. // C++ header #ifndef _SIMPLE_RTP_SINK_HH #define _SIMPLE_RTP_SINK_HH #ifndef _MULTI_FRAMED_RTP_SINK_HH #include "MultiFramedRTPSink.hh" #endif class SimpleRTPSink: public MultiFramedRTPSink { public: static SimpleRTPSink* createNew(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, unsigned rtpTimestampFrequency, char const* sdpMediaTypeString, char const* rtpPayloadFormatName, unsigned numChannels = 1, Boolean allowMultipleFramesPerPacket = True, Boolean doNormalMBitRule = True); // "doNormalMBitRule" means: If the medium (i.e., "sdpMediaTypeString") is other than "audio", set the RTP "M" bit // on each outgoing packet iff it contains the last (or only) fragment of a frame. // Otherwise (i.e., if "doNormalMBitRule" is False, or the medium is "audio"), leave the "M" bit unset. void setMBitOnNextPacket() { fSetMBitOnNextPacket = True; } // hack for optionally setting the RTP 'M' bit from outside the class protected: SimpleRTPSink(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, unsigned rtpTimestampFrequency, char const* sdpMediaTypeString, char const* rtpPayloadFormatName, unsigned numChannels, Boolean allowMultipleFramesPerPacket, Boolean doNormalMBitRule); // called only by createNew() virtual ~SimpleRTPSink(); protected: // redefined virtual functions virtual void doSpecialFrameHandling(unsigned fragmentationOffset, unsigned char* frameStart, unsigned numBytesInFrame, struct timeval framePresentationTime, unsigned numRemainingBytes); virtual Boolean frameCanAppearAfterPacketStart(unsigned char const* frameStart, unsigned numBytesInFrame) const; virtual char const* sdpMediaType() const; private: char const* fSDPMediaTypeString; Boolean fAllowMultipleFramesPerPacket; Boolean fSetMBitOnLastFrames, fSetMBitOnNextPacket; }; #endif live/liveMedia/include/MPEG1or2Demux.hh000444 001751 000000 00000011564 12265042432 020050 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // Demultiplexer for a MPEG 1 or 2 Program Stream // C++ header #ifndef _MPEG_1OR2_DEMUX_HH #define _MPEG_1OR2_DEMUX_HH #ifndef _FRAMED_SOURCE_HH #include "FramedSource.hh" #endif class MPEG1or2DemuxedElementaryStream; // forward class MPEG1or2Demux: public Medium { public: static MPEG1or2Demux* createNew(UsageEnvironment& env, FramedSource* inputSource, Boolean reclaimWhenLastESDies = False); // If "reclaimWhenLastESDies" is True, the the demux is deleted when // all "MPEG1or2DemuxedElementaryStream"s that we created get deleted. MPEG1or2DemuxedElementaryStream* newElementaryStream(u_int8_t streamIdTag); // Specialized versions of the above for audio and video: MPEG1or2DemuxedElementaryStream* newAudioStream(); MPEG1or2DemuxedElementaryStream* newVideoStream(); // A hack for getting raw, undemuxed PES packets from the Program Stream: MPEG1or2DemuxedElementaryStream* newRawPESStream(); void getNextFrame(u_int8_t streamIdTag, unsigned char* to, unsigned maxSize, FramedSource::afterGettingFunc* afterGettingFunc, void* afterGettingClientData, FramedSource::onCloseFunc* onCloseFunc, void* onCloseClientData); // similar to FramedSource::getNextFrame(), except that it also // takes a stream id tag as parameter. void stopGettingFrames(u_int8_t streamIdTag); // similar to FramedSource::stopGettingFrames(), except that it also // takes a stream id tag as parameter. static void handleClosure(void* clientData); // This should be called (on ourself) if the source is discovered // to be closed (i.e., no longer readable) FramedSource* inputSource() const { return fInputSource; } class SCR { public: SCR(); u_int8_t highBit; u_int32_t remainingBits; u_int16_t extension; Boolean isValid; }; SCR& lastSeenSCR() { return fLastSeenSCR; } unsigned char mpegVersion() const { return fMPEGversion; } void flushInput(); // should be called before any 'seek' on the underlying source private: MPEG1or2Demux(UsageEnvironment& env, FramedSource* inputSource, Boolean reclaimWhenLastESDies); // called only by createNew() virtual ~MPEG1or2Demux(); void registerReadInterest(u_int8_t streamIdTag, unsigned char* to, unsigned maxSize, FramedSource::afterGettingFunc* afterGettingFunc, void* afterGettingClientData, FramedSource::onCloseFunc* onCloseFunc, void* onCloseClientData); Boolean useSavedData(u_int8_t streamIdTag, unsigned char* to, unsigned maxSize, FramedSource::afterGettingFunc* afterGettingFunc, void* afterGettingClientData); static void continueReadProcessing(void* clientData, unsigned char* ptr, unsigned size, struct timeval presentationTime); void continueReadProcessing(); private: friend class MPEG1or2DemuxedElementaryStream; void noteElementaryStreamDeletion(MPEG1or2DemuxedElementaryStream* es); private: FramedSource* fInputSource; SCR fLastSeenSCR; unsigned char fMPEGversion; unsigned char fNextAudioStreamNumber; unsigned char fNextVideoStreamNumber; Boolean fReclaimWhenLastESDies; unsigned fNumOutstandingESs; // A descriptor for each possible stream id tag: typedef struct OutputDescriptor { // input parameters unsigned char* to; unsigned maxSize; FramedSource::afterGettingFunc* fAfterGettingFunc; void* afterGettingClientData; FramedSource::onCloseFunc* fOnCloseFunc; void* onCloseClientData; // output parameters unsigned frameSize; struct timeval presentationTime; class SavedData; // forward SavedData* savedDataHead; SavedData* savedDataTail; unsigned savedDataTotalSize; // status parameters Boolean isPotentiallyReadable; Boolean isCurrentlyActive; Boolean isCurrentlyAwaitingData; } OutputDescriptor_t; OutputDescriptor_t fOutput[256]; unsigned fNumPendingReads; Boolean fHaveUndeliveredData; private: // parsing state class MPEGProgramStreamParser* fParser; friend class MPEGProgramStreamParser; // hack }; #endif live/liveMedia/include/ServerMediaSession.hh000444 001751 000000 00000017641 12265042432 021365 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A data structure that represents a session that consists of // potentially multiple (audio and/or video) sub-sessions // (This data structure is used for media *streamers* - i.e., servers. // For media receivers, use "MediaSession" instead.) // C++ header #ifndef _SERVER_MEDIA_SESSION_HH #define _SERVER_MEDIA_SESSION_HH #ifndef _MEDIA_HH #include "Media.hh" #endif #ifndef _FRAMED_SOURCE_HH #include "FramedSource.hh" #endif #ifndef _GROUPEID_HH #include "GroupEId.hh" #endif #ifndef _RTP_INTERFACE_HH #include "RTPInterface.hh" // for ServerRequestAlternativeByteHandler #endif class ServerMediaSubsession; // forward class ServerMediaSession: public Medium { public: static ServerMediaSession* createNew(UsageEnvironment& env, char const* streamName = NULL, char const* info = NULL, char const* description = NULL, Boolean isSSM = False, char const* miscSDPLines = NULL); static Boolean lookupByName(UsageEnvironment& env, char const* mediumName, ServerMediaSession*& resultSession); char* generateSDPDescription(); // based on the entire session // Note: The caller is responsible for freeing the returned string char const* streamName() const { return fStreamName; } Boolean addSubsession(ServerMediaSubsession* subsession); unsigned numSubsessions() const { return fSubsessionCounter; } void testScaleFactor(float& scale); // sets "scale" to the actual supported scale float duration() const; // a result == 0 means an unbounded session (the default) // a result < 0 means: subsession durations differ; the result is -(the largest). // a result > 0 means: this is the duration of a bounded session unsigned referenceCount() const { return fReferenceCount; } void incrementReferenceCount() { ++fReferenceCount; } void decrementReferenceCount() { if (fReferenceCount > 0) --fReferenceCount; } Boolean& deleteWhenUnreferenced() { return fDeleteWhenUnreferenced; } void deleteAllSubsessions(); // Removes and deletes all subsessions added by "addSubsession()", returning us to an 'empty' state // Note: If you have already added this "ServerMediaSession" to a "RTSPServer" then, before calling this function, // you must first close any client connections that use it, // by calling "RTSPServer::closeAllClientSessionsForServerMediaSession()". protected: ServerMediaSession(UsageEnvironment& env, char const* streamName, char const* info, char const* description, Boolean isSSM, char const* miscSDPLines); // called only by "createNew()" virtual ~ServerMediaSession(); private: // redefined virtual functions virtual Boolean isServerMediaSession() const; private: Boolean fIsSSM; // Linkage fields: friend class ServerMediaSubsessionIterator; ServerMediaSubsession* fSubsessionsHead; ServerMediaSubsession* fSubsessionsTail; unsigned fSubsessionCounter; char* fStreamName; char* fInfoSDPString; char* fDescriptionSDPString; char* fMiscSDPLines; struct timeval fCreationTime; unsigned fReferenceCount; Boolean fDeleteWhenUnreferenced; }; class ServerMediaSubsessionIterator { public: ServerMediaSubsessionIterator(ServerMediaSession& session); virtual ~ServerMediaSubsessionIterator(); ServerMediaSubsession* next(); // NULL if none void reset(); private: ServerMediaSession& fOurSession; ServerMediaSubsession* fNextPtr; }; class ServerMediaSubsession: public Medium { public: unsigned trackNumber() const { return fTrackNumber; } char const* trackId(); virtual char const* sdpLines() = 0; virtual void getStreamParameters(unsigned clientSessionId, // in netAddressBits clientAddress, // in Port const& clientRTPPort, // in Port const& clientRTCPPort, // in int tcpSocketNum, // in (-1 means use UDP, not TCP) unsigned char rtpChannelId, // in (used if TCP) unsigned char rtcpChannelId, // in (used if TCP) netAddressBits& destinationAddress, // in out u_int8_t& destinationTTL, // in out Boolean& isMulticast, // out Port& serverRTPPort, // out Port& serverRTCPPort, // out void*& streamToken // out ) = 0; virtual void startStream(unsigned clientSessionId, void* streamToken, TaskFunc* rtcpRRHandler, void* rtcpRRHandlerClientData, unsigned short& rtpSeqNum, unsigned& rtpTimestamp, ServerRequestAlternativeByteHandler* serverRequestAlternativeByteHandler, void* serverRequestAlternativeByteHandlerClientData) = 0; virtual void pauseStream(unsigned clientSessionId, void* streamToken); virtual void seekStream(unsigned clientSessionId, void* streamToken, double& seekNPT, double streamDuration, u_int64_t& numBytes); // This routine is used to seek by relative (i.e., NPT) time. // "streamDuration", if >0.0, specifies how much data to stream, past "seekNPT". (If <=0.0, all remaining data is streamed.) // "numBytes" returns the size (in bytes) of the data to be streamed, or 0 if unknown or unlimited. virtual void seekStream(unsigned clientSessionId, void* streamToken, char*& absStart, char*& absEnd); // This routine is used to seek by 'absolute' time. // "absStart" should be a string of the form "YYYYMMDDTHHMMSSZ" or "YYYYMMDDTHHMMSS.Z". // "absEnd" should be either NULL (for no end time), or a string of the same form as "absStart". // These strings may be modified in-place, or can be reassigned to a newly-allocated value (after delete[]ing the original). virtual void nullSeekStream(unsigned clientSessionId, void* streamToken); // Called whenever we're handling a "PLAY" command without a specified start time. virtual void setStreamScale(unsigned clientSessionId, void* streamToken, float scale); virtual float getCurrentNPT(void* streamToken); virtual FramedSource* getStreamSource(void* streamToken); virtual void deleteStream(unsigned clientSessionId, void*& streamToken); virtual void testScaleFactor(float& scale); // sets "scale" to the actual supported scale virtual float duration() const; // returns 0 for an unbounded session (the default) // returns > 0 for a bounded session virtual void getAbsoluteTimeRange(char*& absStartTime, char*& absEndTime) const; // Subclasses can reimplement this iff they support seeking by 'absolute' time. // The following may be called by (e.g.) SIP servers, for which the // address and port number fields in SDP descriptions need to be non-zero: void setServerAddressAndPortForSDP(netAddressBits addressBits, portNumBits portBits); protected: // we're a virtual base class ServerMediaSubsession(UsageEnvironment& env); virtual ~ServerMediaSubsession(); char const* rangeSDPLine() const; // returns a string to be delete[]d ServerMediaSession* fParentSession; netAddressBits fServerAddressForSDP; portNumBits fPortNumForSDP; private: friend class ServerMediaSession; friend class ServerMediaSubsessionIterator; ServerMediaSubsession* fNext; unsigned fTrackNumber; // within an enclosing ServerMediaSession char const* fTrackId; }; #endif live/liveMedia/include/JPEGVideoRTPSource.hh000444 001751 000000 00000003725 12265042432 021074 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // JPEG Video (RFC 2435) RTP Sources // C++ header #ifndef _JPEG_VIDEO_RTP_SOURCE_HH #define _JPEG_VIDEO_RTP_SOURCE_HH #ifndef _MULTI_FRAMED_RTP_SOURCE_HH #include "MultiFramedRTPSource.hh" #endif #define MAX_JPEG_HEADER_SIZE 1024 class JPEGVideoRTPSource: public MultiFramedRTPSource { public: static JPEGVideoRTPSource* createNew(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat = 26, unsigned rtpPayloadFrequency = 90000, unsigned defaultWidth = 0, unsigned defaultHeight = 0); protected: virtual ~JPEGVideoRTPSource(); private: JPEGVideoRTPSource(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, unsigned rtpTimestampFrequency, unsigned defaultWidth, unsigned defaultHeight); // called only by createNew() // Image dimensions from the SDP description, if any unsigned fDefaultWidth, fDefaultHeight; private: // redefined virtual functions: virtual Boolean processSpecialHeader(BufferedPacket* packet, unsigned& resultSpecialHeaderSize); virtual char const* MIMEtype() const; }; #endif live/liveMedia/include/MPEG4VideoFileServerMediaSubsession.hh000444 001751 000000 00000004403 12265042432 024413 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A 'ServerMediaSubsession' object that creates new, unicast, "RTPSink"s // on demand, from a MPEG-4 video file. // C++ header #ifndef _MPEG4_VIDEO_FILE_SERVER_MEDIA_SUBSESSION_HH #define _MPEG4_VIDEO_FILE_SERVER_MEDIA_SUBSESSION_HH #ifndef _FILE_SERVER_MEDIA_SUBSESSION_HH #include "FileServerMediaSubsession.hh" #endif class MPEG4VideoFileServerMediaSubsession: public FileServerMediaSubsession{ public: static MPEG4VideoFileServerMediaSubsession* createNew(UsageEnvironment& env, char const* fileName, Boolean reuseFirstSource); // Used to implement "getAuxSDPLine()": void checkForAuxSDPLine1(); void afterPlayingDummy1(); protected: MPEG4VideoFileServerMediaSubsession(UsageEnvironment& env, char const* fileName, Boolean reuseFirstSource); // called only by createNew(); virtual ~MPEG4VideoFileServerMediaSubsession(); void setDoneFlag() { fDoneFlag = ~0; } protected: // redefined virtual functions virtual char const* getAuxSDPLine(RTPSink* rtpSink, FramedSource* inputSource); virtual FramedSource* createNewStreamSource(unsigned clientSessionId, unsigned& estBitrate); virtual RTPSink* createNewRTPSink(Groupsock* rtpGroupsock, unsigned char rtpPayloadTypeIfDynamic, FramedSource* inputSource); private: char* fAuxSDPLine; char fDoneFlag; // used when setting up "fAuxSDPLine" RTPSink* fDummyRTPSink; // ditto }; #endif live/liveMedia/include/H264VideoRTPSink.hh000444 001751 000000 00000004366 12265042432 020440 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // RTP sink for H.264 video (RFC 3984) // C++ header #ifndef _H264_VIDEO_RTP_SINK_HH #define _H264_VIDEO_RTP_SINK_HH #ifndef _H264_OR_5_VIDEO_RTP_SINK_HH #include "H264or5VideoRTPSink.hh" #endif class H264VideoRTPSink: public H264or5VideoRTPSink { public: static H264VideoRTPSink* createNew(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat); static H264VideoRTPSink* createNew(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, u_int8_t const* sps, unsigned spsSize, u_int8_t const* pps, unsigned ppsSize); // an optional variant of "createNew()", useful if we know, in advance, // the stream's SPS and PPS NAL units. static H264VideoRTPSink* createNew(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, char const* sPropParameterSetsStr); // an optional variant of "createNew()", useful if we know, in advance, // the stream's SPS and PPS NAL units. protected: H264VideoRTPSink(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, u_int8_t const* sps = NULL, unsigned spsSize = 0, u_int8_t const* pps = NULL, unsigned ppsSize = 0); // called only by createNew() virtual ~H264VideoRTPSink(); protected: // redefined virtual functions: virtual char const* auxSDPLine(); private: // redefined virtual functions: virtual Boolean sourceIsCompatibleWithUs(MediaSource& source); }; #endif live/liveMedia/include/MPEG4LATMAudioRTPSource.hh000444 001751 000000 00000007616 12265042432 021637 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // MPEG-4 audio, using LATM multiplexing // C++ header #ifndef _MPEG4_LATM_AUDIO_RTP_SOURCE_HH #define _MPEG4_LATM_AUDIO_RTP_SOURCE_HH #ifndef _MULTI_FRAMED_RTP_SOURCE_HH #include "MultiFramedRTPSource.hh" #endif class MPEG4LATMAudioRTPSource: public MultiFramedRTPSource { public: static MPEG4LATMAudioRTPSource* createNew(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, unsigned rtpTimestampFrequency); // By default, the LATM data length field is included at the beginning of each // returned frame. To omit this field, call the following: void omitLATMDataLengthField(); Boolean returnedFrameIncludesLATMDataLengthField() const { return fIncludeLATMDataLengthField; } protected: virtual ~MPEG4LATMAudioRTPSource(); private: MPEG4LATMAudioRTPSource(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, unsigned rtpTimestampFrequency); // called only by createNew() private: // redefined virtual functions: virtual Boolean processSpecialHeader(BufferedPacket* packet, unsigned& resultSpecialHeaderSize); virtual char const* MIMEtype() const; private: Boolean fIncludeLATMDataLengthField; }; // A utility for parsing a "StreamMuxConfig" string Boolean parseStreamMuxConfigStr(char const* configStr, // result parameters: Boolean& audioMuxVersion, Boolean& allStreamsSameTimeFraming, unsigned char& numSubFrames, unsigned char& numProgram, unsigned char& numLayer, unsigned char*& audioSpecificConfig, unsigned& audioSpecificConfigSize); // Parses "configStr" as a sequence of hexadecimal digits, representing // a "StreamMuxConfig" (as defined in ISO.IEC 14496-3, table 1.21). // Returns, in "audioSpecificConfig", a binary representation of // the enclosed "AudioSpecificConfig" structure (of size // "audioSpecificConfigSize" bytes). The memory for this is allocated // dynamically by this function; the caller is responsible for // freeing it. Other values, that precede "AudioSpecificConfig", // are returned in the other parameters. // Returns True iff the parsing succeeds. // IMPORTANT NOTE: The implementation of this function currently assumes // that everything after the first "numLayer" field is an // "AudioSpecificConfig". Therefore, it will not work properly if // "audioMuxVersion" != 0, "numProgram" > 0, or "numLayer" > 0. // Also, any 'other data' or CRC info will be included at // the end of "audioSpecificConfig". unsigned char* parseStreamMuxConfigStr(char const* configStr, // result parameter: unsigned& audioSpecificConfigSize); // A variant of the above that returns just the "AudioSpecificConfig" data // (or NULL) if the parsing failed, without bothering with the other // result parameters. unsigned char* parseGeneralConfigStr(char const* configStr, // result parameter: unsigned& configSize); // A routine that parses an arbitrary config string, returning // the result in binary form. #endif live/liveMedia/include/H265VideoRTPSink.hh000444 001751 000000 00000004617 12265042432 020440 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // RTP sink for H.265 video // C++ header #ifndef _H265_VIDEO_RTP_SINK_HH #define _H265_VIDEO_RTP_SINK_HH #ifndef _H264_OR_5_VIDEO_RTP_SINK_HH #include "H264or5VideoRTPSink.hh" #endif class H265VideoRTPSink: public H264or5VideoRTPSink { public: static H265VideoRTPSink* createNew(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat); static H265VideoRTPSink* createNew(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, u_int8_t const* vps, unsigned vpsSize, u_int8_t const* sps, unsigned spsSize, u_int8_t const* pps, unsigned ppsSize); // an optional variant of "createNew()", useful if we know, in advance, // the stream's VPS, SPS and PPS NAL units. static H265VideoRTPSink* createNew(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, char const* sPropVPSStr, char const* sPropSPSStr=NULL, char const* sPropPPSStr=NULL); // an optional variant of "createNew()", useful if we know, in advance, // the stream's VPS, SPS and PPS NAL units. protected: H265VideoRTPSink(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, u_int8_t const* vps = NULL, unsigned vpsSize = 0, u_int8_t const* sps = NULL, unsigned spsSize = 0, u_int8_t const* pps = NULL, unsigned ppsSize = 0); // called only by createNew() virtual ~H265VideoRTPSink(); protected: // redefined virtual functions: virtual char const* auxSDPLine(); private: // redefined virtual functions: virtual Boolean sourceIsCompatibleWithUs(MediaSource& source); }; #endif live/liveMedia/include/InputFile.hh000444 001751 000000 00000005047 12265042432 017507 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // Common routines for opening/closing named input files // C++ header #ifndef _INPUT_FILE_HH #define _INPUT_FILE_HH #include #include #if (defined(__WIN32__) || defined(_WIN32) || defined(_WIN32_WCE)) #ifndef _WIN32_WCE // Include header files that might be needed by Windows (in code that uses this header file): #include #include #endif #define READ_FROM_FILES_SYNCHRONOUSLY 1 // Because Windows is a silly toy operating system that doesn't (reliably) treat // open files as being readable sockets (which can be handled within the default // "BasicTaskScheduler" event loop, using "select()"), we implement file reading // in Windows using synchronous, rather than asynchronous, I/O. This can severely // limit the scalability of servers using this code that run on Windows. // If this is a problem for you, then either use a better operating system, // or else write your own Windows-specific event loop ("TaskScheduler" subclass) // that can handle readable data in Windows open files as an event. #endif #ifndef _WIN32_WCE #include #endif FILE* OpenInputFile(UsageEnvironment& env, char const* fileName); void CloseInputFile(FILE* fid); u_int64_t GetFileSize(char const* fileName, FILE* fid); // 0 means zero-length, unbounded, or unknown int64_t SeekFile64(FILE *fid, int64_t offset, int whence); // A platform-independent routine for seeking within (possibly) large files int64_t TellFile64(FILE *fid); // A platform-independent routine for reporting the position within // (possibly) large files Boolean FileIsSeekable(FILE *fid); // Tests whether "fid" is seekable, by trying to seek within it. #endif live/liveMedia/include/MatroskaFile.hh000444 001751 000000 00000014775 12265042432 020201 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A class that encapsulates a Matroska file. // C++ header #ifndef _MATROSKA_FILE_HH #define _MATROSKA_FILE_HH #ifndef _MEDIA_HH #include "Media.hh" #endif #ifndef _HASH_TABLE_HH #include "HashTable.hh" #endif class MatroskaTrack; // forward class MatroskaDemux; // forward class MatroskaFile: public Medium { public: typedef void (onCreationFunc)(MatroskaFile* newFile, void* clientData); static void createNew(UsageEnvironment& env, char const* fileName, onCreationFunc* onCreation, void* onCreationClientData, char const* preferredLanguage = "eng"); // Note: Unlike most "createNew()" functions, this one doesn't return a new object immediately. Instead, because this class // requires file reading (to parse the Matroska 'Track' headers) before a new object can be initialized, the creation of a new // object is signalled by calling - from the event loop - an 'onCreationFunc' that is passed as a parameter to "createNew()". MatroskaTrack* lookup(unsigned trackNumber) const; // Create a demultiplexor for extracting tracks from this file. (Separate clients will typically have separate demultiplexors.) MatroskaDemux* newDemux(); // Parameters of the file ('Segment'); set when the file is parsed: unsigned timecodeScale() { return fTimecodeScale; } // in nanoseconds float segmentDuration() { return fSegmentDuration; } // in units of "timecodeScale()" float fileDuration(); // in seconds char const* fileName() const { return fFileName; } unsigned chosenVideoTrackNumber() { return fChosenVideoTrackNumber; } unsigned chosenAudioTrackNumber() { return fChosenAudioTrackNumber; } unsigned chosenSubtitleTrackNumber() { return fChosenSubtitleTrackNumber; } private: MatroskaFile(UsageEnvironment& env, char const* fileName, onCreationFunc* onCreation, void* onCreationClientData, char const* preferredLanguage); // called only by createNew() virtual ~MatroskaFile(); static void handleEndOfTrackHeaderParsing(void* clientData); void handleEndOfTrackHeaderParsing(); void addTrack(MatroskaTrack* newTrack, unsigned trackNumber); void addCuePoint(double cueTime, u_int64_t clusterOffsetInFile, unsigned blockNumWithinCluster); Boolean lookupCuePoint(double& cueTime, u_int64_t& resultClusterOffsetInFile, unsigned& resultBlockNumWithinCluster); void printCuePoints(FILE* fid); void removeDemux(MatroskaDemux* demux); private: friend class MatroskaFileParser; friend class MatroskaDemux; char const* fFileName; onCreationFunc* fOnCreation; void* fOnCreationClientData; char const* fPreferredLanguage; unsigned fTimecodeScale; // in nanoseconds float fSegmentDuration; // in units of "fTimecodeScale" u_int64_t fSegmentDataOffset, fClusterOffset, fCuesOffset; class MatroskaTrackTable* fTrackTable; HashTable* fDemuxesTable; class CuePoint* fCuePoints; unsigned fChosenVideoTrackNumber, fChosenAudioTrackNumber, fChosenSubtitleTrackNumber; class MatroskaFileParser* fParserForInitialization; }; // We define our own track type codes as bits (powers of 2), so we can use the set of track types as a bitmap, representing a set: // (Note that MATROSKA_TRACK_TYPE_OTHER must be last, and have the largest value.) #define MATROSKA_TRACK_TYPE_VIDEO 0x01 #define MATROSKA_TRACK_TYPE_AUDIO 0x02 #define MATROSKA_TRACK_TYPE_SUBTITLE 0x04 #define MATROSKA_TRACK_TYPE_OTHER 0x08 class MatroskaTrack { public: MatroskaTrack(); virtual ~MatroskaTrack(); // track parameters unsigned trackNumber; u_int8_t trackType; Boolean isEnabled, isDefault, isForced; unsigned defaultDuration; char* name; char* language; char* codecID; unsigned samplingFrequency; unsigned numChannels; char const* mimeType; unsigned codecPrivateSize; u_int8_t* codecPrivate; Boolean codecPrivateUsesH264FormatForH265; // a hack specifically for H.265 video tracks unsigned headerStrippedBytesSize; u_int8_t* headerStrippedBytes; unsigned subframeSizeSize; // 0 means: frames do not have subframes (the default behavior) Boolean haveSubframes() const { return subframeSizeSize > 0; } }; class MatroskaDemux: public Medium { public: FramedSource* newDemuxedTrack(); FramedSource* newDemuxedTrack(unsigned& resultTrackNumber); // Returns a new stream ("FramedSource" subclass) that represents the next preferred media // track (video, audio, subtitle - in that order) from the file. (Preferred media tracks // are based on the file's language preference.) // This function returns NULL when no more media tracks exist. FramedSource* newDemuxedTrackByTrackNumber(unsigned trackNumber); // As above, but creates a new stream for a specific track number within the Matroska file. // (You should not call this function more than once with the same track number.) // Note: We assume that: // - Every track created by "newDemuxedTrack()" is later read // - All calls to "newDemuxedTrack()" are made before any track is read protected: friend class MatroskaFile; friend class MatroskaFileParser; class MatroskaDemuxedTrack* lookupDemuxedTrack(unsigned trackNumber); MatroskaDemux(MatroskaFile& ourFile); // we're created only by a "MatroskaFile" (a friend) virtual ~MatroskaDemux(); private: friend class MatroskaDemuxedTrack; void removeTrack(unsigned trackNumber); void continueReading(); // called by a demuxed track to tell us that it has a pending read ("doGetNextFrame()") void seekToTime(double& seekNPT); static void handleEndOfFile(void* clientData); void handleEndOfFile(); private: MatroskaFile& fOurFile; class MatroskaFileParser* fOurParser; HashTable* fDemuxedTracksTable; // Used to implement "newServerMediaSubsession()": u_int8_t fNextTrackTypeToCheck; }; #endif live/liveMedia/include/AC3AudioRTPSink.hh000444 001751 000000 00000004104 12265042432 020344 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // RTP sink for AC3 audio // C++ header #ifndef _AC3_AUDIO_RTP_SINK_HH #define _AC3_AUDIO_RTP_SINK_HH #ifndef _AUDIO_RTP_SINK_HH #include "AudioRTPSink.hh" #endif class AC3AudioRTPSink: public AudioRTPSink { public: static AC3AudioRTPSink* createNew(UsageEnvironment& env, Groupsock* RTPgs, u_int8_t rtpPayloadFormat, u_int32_t rtpTimestampFrequency); protected: AC3AudioRTPSink(UsageEnvironment& env, Groupsock* RTPgs, u_int8_t rtpPayloadFormat, u_int32_t rtpTimestampFrequency); // called only by createNew() virtual ~AC3AudioRTPSink(); private: // redefined virtual functions: virtual Boolean frameCanAppearAfterPacketStart(unsigned char const* frameStart, unsigned numBytesInFrame) const; virtual void doSpecialFrameHandling(unsigned fragmentationOffset, unsigned char* frameStart, unsigned numBytesInFrame, struct timeval framePresentationTime, unsigned numRemainingBytes); virtual unsigned specialHeaderSize() const; private: unsigned char fTotNumFragmentsUsed; // used only if a frame gets fragmented across multiple packets }; #endif live/liveMedia/include/MPEG4GenericRTPSink.hh000444 001751 000000 00000005007 12265042432 021130 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // MPEG4-GENERIC ("audio", "video", or "application") RTP stream sinks // C++ header #ifndef _MPEG4_GENERIC_RTP_SINK_HH #define _MPEG4_GENERIC_RTP_SINK_HH #ifndef _MULTI_FRAMED_RTP_SINK_HH #include "MultiFramedRTPSink.hh" #endif class MPEG4GenericRTPSink: public MultiFramedRTPSink { public: static MPEG4GenericRTPSink* createNew(UsageEnvironment& env, Groupsock* RTPgs, u_int8_t rtpPayloadFormat, u_int32_t rtpTimestampFrequency, char const* sdpMediaTypeString, char const* mpeg4Mode, char const* configString, unsigned numChannels = 1); protected: MPEG4GenericRTPSink(UsageEnvironment& env, Groupsock* RTPgs, u_int8_t rtpPayloadFormat, u_int32_t rtpTimestampFrequency, char const* sdpMediaTypeString, char const* mpeg4Mode, char const* configString, unsigned numChannels); // called only by createNew() virtual ~MPEG4GenericRTPSink(); private: // redefined virtual functions: virtual Boolean frameCanAppearAfterPacketStart(unsigned char const* frameStart, unsigned numBytesInFrame) const; virtual void doSpecialFrameHandling(unsigned fragmentationOffset, unsigned char* frameStart, unsigned numBytesInFrame, struct timeval framePresentationTime, unsigned numRemainingBytes); virtual unsigned specialHeaderSize() const; virtual char const* sdpMediaType() const; virtual char const* auxSDPLine(); // for the "a=fmtp:" SDP line private: char const* fSDPMediaTypeString; char const* fMPEG4Mode; char const* fConfigString; char* fFmtpSDPLine; }; #endif live/liveMedia/include/SIPClient.hh000444 001751 000000 00000012266 12265042432 017403 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A generic SIP client // C++ header #ifndef _SIP_CLIENT_HH #define _SIP_CLIENT_HH #ifndef _MEDIA_SESSION_HH #include "MediaSession.hh" #endif #ifndef _NET_ADDRESS_HH #include "NetAddress.hh" #endif #ifndef _DIGEST_AUTHENTICATION_HH #include "DigestAuthentication.hh" #endif // Possible states in the "INVITE" transition diagram (RFC 3261, Figure 5) enum inviteClientState { Calling, Proceeding, Completed, Terminated }; class SIPClient: public Medium { public: static SIPClient* createNew(UsageEnvironment& env, unsigned char desiredAudioRTPPayloadFormat, char const* mimeSubtype = NULL, int verbosityLevel = 0, char const* applicationName = NULL); void setProxyServer(unsigned proxyServerAddress, portNumBits proxyServerPortNum); void setClientStartPortNum(portNumBits clientStartPortNum) { fClientStartPortNum = clientStartPortNum; } char* invite(char const* url, Authenticator* authenticator = NULL); // Issues a SIP "INVITE" command // Returns the session SDP description if this command succeeds char* inviteWithPassword(char const* url, char const* username, char const* password); // Uses "invite()" to do an "INVITE" - first // without using "password", then (if we get an Unauthorized // response) with an authentication response computed from "password" Boolean sendACK(); // on current call Boolean sendBYE(); // on current call static Boolean parseSIPURL(UsageEnvironment& env, char const* url, NetAddress& address, portNumBits& portNum); // (ignores any "[:]@" in "url") static Boolean parseSIPURLUsernamePassword(char const* url, char*& username, char*& password); char const* getInviteSdpReply() const { return fInviteSDPDescriptionReturned; } void setUserAgentString(char const* userAgentName); // sets an alternative string to be used in SIP "User-Agent:" headers protected: virtual ~SIPClient(); private: SIPClient(UsageEnvironment& env, unsigned char desiredAudioRTPPayloadFormat, char const* mimeSubtype, int verbosityLevel, char const* applicationName); // called only by createNew(); void reset(); // Routines used to implement invite*(): char* invite1(Authenticator* authenticator); Boolean processURL(char const* url); Boolean sendINVITE(); static void inviteResponseHandler(void* clientData, int mask); void doInviteStateMachine(unsigned responseCode); void doInviteStateTerminated(unsigned responseCode); TaskToken fTimerA, fTimerB, fTimerD; static void timerAHandler(void* clientData); static void timerBHandler(void* clientData); static void timerDHandler(void* clientData); unsigned const fT1; // in microseconds unsigned fTimerALen; // in microseconds; initially fT1, then doubles unsigned fTimerACount; // Routines used to implement all commands: char* createAuthenticatorString(Authenticator const* authenticator, char const* cmd, char const* url); Boolean sendRequest(char const* requestString, unsigned requestLength); unsigned getResponseCode(); unsigned getResponse(char*& responseBuffer, unsigned responseBufferSize); Boolean parseResponseCode(char const* line, unsigned& responseCode); private: // Set for all calls: unsigned char fDesiredAudioRTPPayloadFormat; char* fMIMESubtype; unsigned fMIMESubtypeSize; int fVerbosityLevel; unsigned fCSeq; // sequence number, used in consecutive requests char const* fApplicationName; unsigned fApplicationNameSize; char const* fOurAddressStr; unsigned fOurAddressStrSize; portNumBits fOurPortNum; Groupsock* fOurSocket; char* fUserAgentHeaderStr; unsigned fUserAgentHeaderStrLen; // Set for each call: char const* fURL; unsigned fURLSize; struct in_addr fServerAddress; portNumBits fServerPortNum; // in host order portNumBits fClientStartPortNum; // in host order unsigned fCallId, fFromTag; // set by us char const* fToTagStr; // set by the responder unsigned fToTagStrSize; Authenticator fValidAuthenticator; char const* fUserName; // 'user' name used in "From:" & "Contact:" lines unsigned fUserNameSize; char* fInviteSDPDescription; char* fInviteSDPDescriptionReturned; char* fInviteCmd; unsigned fInviteCmdSize; Authenticator* fWorkingAuthenticator; inviteClientState fInviteClientState; char fEventLoopStopFlag; }; #endif live/liveMedia/include/MPEG4ESVideoRTPSource.hh000444 001751 000000 00000003335 12265042432 021410 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // MP4V-ES video RTP stream sources // C++ header #ifndef _MPEG4_ES_VIDEO_RTP_SOURCE_HH #define _MPEG4_ES_VIDEO_RTP_SOURCE_HH #ifndef _MULTI_FRAMED_RTP_SOURCE_HH #include "MultiFramedRTPSource.hh" #endif class MPEG4ESVideoRTPSource: public MultiFramedRTPSource { public: static MPEG4ESVideoRTPSource* createNew(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, unsigned rtpTimestampFrequency); protected: virtual ~MPEG4ESVideoRTPSource(); private: MPEG4ESVideoRTPSource(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, unsigned rtpTimestampFrequency); // called only by createNew() private: // redefined virtual functions: virtual Boolean processSpecialHeader(BufferedPacket* packet, unsigned& resultSpecialHeaderSize); virtual char const* MIMEtype() const; }; #endif live/liveMedia/include/H261VideoRTPSource.hh000444 001751 000000 00000003475 12265042432 020771 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // H.261 Video RTP Sources // C++ header #ifndef _H261_VIDEO_RTP_SOURCE_HH #define _H261_VIDEO_RTP_SOURCE_HH #ifndef _MULTI_FRAMED_RTP_SOURCE_HH #include "MultiFramedRTPSource.hh" #endif class H261VideoRTPSource: public MultiFramedRTPSource { public: static H261VideoRTPSource* createNew(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat = 31, unsigned rtpTimestampFrequency = 90000); u_int32_t lastSpecialHeader() const {return fLastSpecialHeader;} protected: virtual ~H261VideoRTPSource(); private: H261VideoRTPSource(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, unsigned rtpTimestampFrequency); // called only by createNew() private: // redefined virtual functions: virtual Boolean processSpecialHeader(BufferedPacket* packet, unsigned& resultSpecialHeaderSize); virtual char const* MIMEtype() const; private: u_int32_t fLastSpecialHeader; }; #endif live/liveMedia/include/MPEG4GenericRTPSource.hh000444 001751 000000 00000005145 12265042432 021467 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // MPEG4-GENERIC ("audio", "video", or "application") RTP stream sources // C++ header #ifndef _MPEG4_GENERIC_RTP_SOURCE_HH #define _MPEG4_GENERIC_RTP_SOURCE_HH #ifndef _MULTI_FRAMED_RTP_SOURCE_HH #include "MultiFramedRTPSource.hh" #endif class MPEG4GenericRTPSource: public MultiFramedRTPSource { public: static MPEG4GenericRTPSource* createNew(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, unsigned rtpTimestampFrequency, char const* mediumName, char const* mode, unsigned sizeLength, unsigned indexLength, unsigned indexDeltaLength // add other parameters later ); // mediumName is "audio", "video", or "application" // it *cannot* be NULL protected: virtual ~MPEG4GenericRTPSource(); private: MPEG4GenericRTPSource(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, unsigned rtpTimestampFrequency, char const* mediumName, char const* mode, unsigned sizeLength, unsigned indexLength, unsigned indexDeltaLength ); // called only by createNew() private: // redefined virtual functions: virtual Boolean processSpecialHeader(BufferedPacket* packet, unsigned& resultSpecialHeaderSize); virtual char const* MIMEtype() const; private: char* fMIMEType; char* fMode; unsigned fSizeLength, fIndexLength, fIndexDeltaLength; unsigned fNumAUHeaders; // in the most recently read packet unsigned fNextAUHeader; // index of the next AU Header to read struct AUHeader* fAUHeaders; friend class MPEG4GenericBufferedPacket; }; // A function that looks up the sampling frequency from an // "AudioSpecificConfig" string. (0 means 'unknown') unsigned samplingFrequencyFromAudioSpecificConfig(char const* configStr); #endif live/liveMedia/include/AC3AudioStreamFramer.hh000444 001751 000000 00000004571 12265042432 021452 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A filter that breaks up an AC3 audio elementary stream into frames // C++ header #ifndef _AC3_AUDIO_STREAM_FRAMER_HH #define _AC3_AUDIO_STREAM_FRAMER_HH #ifndef _FRAMED_FILTER_HH #include "FramedFilter.hh" #endif class AC3AudioStreamFramer: public FramedFilter { public: static AC3AudioStreamFramer* createNew(UsageEnvironment& env, FramedSource* inputSource, unsigned char streamCode = 0); // If "streamCode" != 0, then we assume that there's a 1-byte code at the beginning of each chunk of data that we read from // our source. If that code is not the value we want, we discard the chunk of data. // However, if "streamCode" == 0 (the default), then we don't expect this 1-byte code. unsigned samplingRate(); void flushInput(); // called if there is a discontinuity (seeking) in the input private: AC3AudioStreamFramer(UsageEnvironment& env, FramedSource* inputSource, unsigned char streamCode); // called only by createNew() virtual ~AC3AudioStreamFramer(); static void handleNewData(void* clientData, unsigned char* ptr, unsigned size, struct timeval presentationTime); void handleNewData(unsigned char* ptr, unsigned size); void parseNextFrame(); private: // redefined virtual functions: virtual void doGetNextFrame(); private: struct timeval currentFramePlayTime() const; private: struct timeval fNextFramePresentationTime; private: // parsing state class AC3AudioStreamParser* fParser; unsigned char fOurStreamCode; friend class AC3AudioStreamParser; // hack }; #endif live/liveMedia/include/AC3AudioRTPSource.hh000444 001751 000000 00000003300 12265042432 020675 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // AC3 Audio RTP Sources // C++ header #ifndef _AC3_AUDIO_RTP_SOURCE_HH #define _AC3_AUDIO_RTP_SOURCE_HH #ifndef _MULTI_FRAMED_RTP_SOURCE_HH #include "MultiFramedRTPSource.hh" #endif class AC3AudioRTPSource: public MultiFramedRTPSource { public: static AC3AudioRTPSource* createNew(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, unsigned rtpTimestampFrequency); protected: virtual ~AC3AudioRTPSource(); private: AC3AudioRTPSource(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, unsigned rtpTimestampFrequency); // called only by createNew() private: // redefined virtual functions: virtual Boolean processSpecialHeader(BufferedPacket* packet, unsigned& resultSpecialHeaderSize); virtual char const* MIMEtype() const; }; #endif live/liveMedia/include/AudioInputDevice.hh000444 001751 000000 00000004431 12265042432 021005 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // Generic audio input device (such as a microphone, or an input sound card) // C++ header #ifndef _AUDIO_INPUT_DEVICE_HH #define _AUDIO_INPUT_DEVICE_HH #ifndef _FRAMED_SOURCE_HH #include "FramedSource.hh" #endif class AudioPortNames { public: AudioPortNames(); virtual ~AudioPortNames(); unsigned numPorts; char** portName; }; class AudioInputDevice: public FramedSource { public: unsigned char bitsPerSample() const { return fBitsPerSample; } unsigned char numChannels() const { return fNumChannels; } unsigned samplingFrequency() const { return fSamplingFrequency; } virtual Boolean setInputPort(int portIndex) = 0; virtual double getAverageLevel() const = 0; static AudioInputDevice* createNew(UsageEnvironment& env, int inputPortNumber, unsigned char bitsPerSample, unsigned char numChannels, unsigned samplingFrequency, unsigned granularityInMS = 20); static AudioPortNames* getPortNames(); static char** allowedDeviceNames; // If this is set to non-NULL, then it's a NULL-terminated array of strings // of device names that we are allowed to access. protected: AudioInputDevice(UsageEnvironment& env, unsigned char bitsPerSample, unsigned char numChannels, unsigned samplingFrequency, unsigned granularityInMS); // we're an abstract base class virtual ~AudioInputDevice(); protected: unsigned char fBitsPerSample, fNumChannels; unsigned fSamplingFrequency; unsigned fGranularityInMS; }; #endif live/liveMedia/include/uLawAudioFilter.hh000444 001751 000000 00000014124 12265042432 020644 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // Filters for converting between raw PCM audio and uLaw // C++ header #ifndef _ULAW_AUDIO_FILTER_HH #define _ULAW_AUDIO_FILTER_HH #ifndef _FRAMED_FILTER_HH #include "FramedFilter.hh" #endif ////////// 16-bit PCM (in various byte orderings) -> 8-bit u-Law ////////// class uLawFromPCMAudioSource: public FramedFilter { public: static uLawFromPCMAudioSource* createNew(UsageEnvironment& env, FramedSource* inputSource, int byteOrdering = 0); // "byteOrdering" == 0 => host order (the default) // "byteOrdering" == 1 => little-endian order // "byteOrdering" == 2 => network (i.e., big-endian) order protected: uLawFromPCMAudioSource(UsageEnvironment& env, FramedSource* inputSource, int byteOrdering); // called only by createNew() virtual ~uLawFromPCMAudioSource(); private: // Redefined virtual functions: virtual void doGetNextFrame(); private: static void afterGettingFrame(void* clientData, unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime, unsigned durationInMicroseconds); void afterGettingFrame1(unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime, unsigned durationInMicroseconds); private: int fByteOrdering; unsigned char* fInputBuffer; unsigned fInputBufferSize; }; ////////// u-Law -> 16-bit PCM (in host order) ////////// class PCMFromuLawAudioSource: public FramedFilter { public: static PCMFromuLawAudioSource* createNew(UsageEnvironment& env, FramedSource* inputSource); protected: PCMFromuLawAudioSource(UsageEnvironment& env, FramedSource* inputSource); // called only by createNew() virtual ~PCMFromuLawAudioSource(); private: // Redefined virtual functions: virtual void doGetNextFrame(); private: static void afterGettingFrame(void* clientData, unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime, unsigned durationInMicroseconds); void afterGettingFrame1(unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime, unsigned durationInMicroseconds); private: unsigned char* fInputBuffer; unsigned fInputBufferSize; }; ////////// 16-bit values (in host order) -> 16-bit network order ////////// class NetworkFromHostOrder16: public FramedFilter { public: static NetworkFromHostOrder16* createNew(UsageEnvironment& env, FramedSource* inputSource); protected: NetworkFromHostOrder16(UsageEnvironment& env, FramedSource* inputSource); // called only by createNew() virtual ~NetworkFromHostOrder16(); private: // Redefined virtual functions: virtual void doGetNextFrame(); private: static void afterGettingFrame(void* clientData, unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime, unsigned durationInMicroseconds); void afterGettingFrame1(unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime, unsigned durationInMicroseconds); }; ////////// 16-bit values (in network order) -> 16-bit host order ////////// class HostFromNetworkOrder16: public FramedFilter { public: static HostFromNetworkOrder16* createNew(UsageEnvironment& env, FramedSource* inputSource); protected: HostFromNetworkOrder16(UsageEnvironment& env, FramedSource* inputSource); // called only by createNew() virtual ~HostFromNetworkOrder16(); private: // Redefined virtual functions: virtual void doGetNextFrame(); private: static void afterGettingFrame(void* clientData, unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime, unsigned durationInMicroseconds); void afterGettingFrame1(unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime, unsigned durationInMicroseconds); }; ////////// 16-bit values: little-endian <-> big-endian ////////// class EndianSwap16: public FramedFilter { public: static EndianSwap16* createNew(UsageEnvironment& env, FramedSource* inputSource); protected: EndianSwap16(UsageEnvironment& env, FramedSource* inputSource); // called only by createNew() virtual ~EndianSwap16(); private: // Redefined virtual functions: virtual void doGetNextFrame(); private: static void afterGettingFrame(void* clientData, unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime, unsigned durationInMicroseconds); void afterGettingFrame1(unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime, unsigned durationInMicroseconds); }; ////////// 24-bit values: little-endian <-> big-endian ////////// class EndianSwap24: public FramedFilter { public: static EndianSwap24* createNew(UsageEnvironment& env, FramedSource* inputSource); protected: EndianSwap24(UsageEnvironment& env, FramedSource* inputSource); // called only by createNew() virtual ~EndianSwap24(); private: // Redefined virtual functions: virtual void doGetNextFrame(); private: static void afterGettingFrame(void* clientData, unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime, unsigned durationInMicroseconds); void afterGettingFrame1(unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime, unsigned durationInMicroseconds); }; #endif live/liveMedia/include/WAVAudioFileSource.hh000444 001751 000000 00000005017 12265042432 021205 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A WAV audio file source // NOTE: Samples are returned in little-endian order (the same order in which // they were stored in the file). // C++ header #ifndef _WAV_AUDIO_FILE_SOURCE_HH #define _WAV_AUDIO_FILE_SOURCE_HH #ifndef _AUDIO_INPUT_DEVICE_HH #include "AudioInputDevice.hh" #endif typedef enum { WA_PCM = 0x01, WA_PCMA = 0x06, WA_PCMU = 0x07, WA_IMA_ADPCM = 0x11, WA_UNKNOWN } WAV_AUDIO_FORMAT; class WAVAudioFileSource: public AudioInputDevice { public: static WAVAudioFileSource* createNew(UsageEnvironment& env, char const* fileName); unsigned numPCMBytes() const; void setScaleFactor(int scale); void seekToPCMByte(unsigned byteNumber, unsigned numBytesToStream); // if "numBytesToStream" is >0, then we limit the stream to that number of bytes, before treating it as EOF unsigned char getAudioFormat(); protected: WAVAudioFileSource(UsageEnvironment& env, FILE* fid); // called only by createNew() virtual ~WAVAudioFileSource(); static void fileReadableHandler(WAVAudioFileSource* source, int mask); void doReadFromFile(); private: // redefined virtual functions: virtual void doGetNextFrame(); virtual void doStopGettingFrames(); virtual Boolean setInputPort(int portIndex); virtual double getAverageLevel() const; protected: unsigned fPreferredFrameSize; private: FILE* fFid; double fPlayTimePerSample; // useconds Boolean fFidIsSeekable; unsigned fLastPlayTime; // useconds Boolean fHaveStartedReading; unsigned fWAVHeaderSize; unsigned fFileSize; int fScaleFactor; Boolean fLimitNumBytesToStream; unsigned fNumBytesToStream; // used iff "fLimitNumBytesToStream" is True unsigned char fAudioFormat; }; #endif live/liveMedia/include/JPEGVideoRTPSink.hh000444 001751 000000 00000003621 12265042432 020533 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // RTP sink for JPEG video (RFC 2435) // C++ header #ifndef _JPEG_VIDEO_RTP_SINK_HH #define _JPEG_VIDEO_RTP_SINK_HH #ifndef _VIDEO_RTP_SINK_HH #include "VideoRTPSink.hh" #endif class JPEGVideoRTPSink: public VideoRTPSink { public: static JPEGVideoRTPSink* createNew(UsageEnvironment& env, Groupsock* RTPgs); protected: JPEGVideoRTPSink(UsageEnvironment& env, Groupsock* RTPgs); // called only by createNew() virtual ~JPEGVideoRTPSink(); private: // redefined virtual functions: virtual Boolean sourceIsCompatibleWithUs(MediaSource& source); virtual void doSpecialFrameHandling(unsigned fragmentationOffset, unsigned char* frameStart, unsigned numBytesInFrame, struct timeval framePresentationTime, unsigned numRemainingBytes); virtual Boolean frameCanAppearAfterPacketStart(unsigned char const* frameStart, unsigned numBytesInFrame) const; virtual unsigned specialHeaderSize() const; }; #endif live/liveMedia/include/JPEGVideoSource.hh000444 001751 000000 00000004023 12265042432 020476 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // JPEG video sources // C++ header #ifndef _JPEG_VIDEO_SOURCE_HH #define _JPEG_VIDEO_SOURCE_HH #ifndef _FRAMED_SOURCE_HH #include "FramedSource.hh" #endif class JPEGVideoSource: public FramedSource { public: virtual u_int8_t type() = 0; virtual u_int8_t qFactor() = 0; virtual u_int8_t width() = 0; // # pixels/8 (or 0 for 2048 pixels) virtual u_int8_t height() = 0; // # pixels/8 (or 0 for 2048 pixels) virtual u_int8_t const* quantizationTables(u_int8_t& precision, u_int16_t& length); // If "qFactor()" returns a value >= 128, then this function is called // to tell us the quantization tables that are being used. // (The default implementation of this function just returns NULL.) // "precision" and "length" are as defined in RFC 2435, section 3.1.8. virtual u_int16_t restartInterval(); // If restart intervals are being used (i.e., 64 <= type() <= 127), then this function must be // redefined - by a subclass - to return a non-zero value. protected: JPEGVideoSource(UsageEnvironment& env); // abstract base class virtual ~JPEGVideoSource(); private: // redefined virtual functions: virtual Boolean isJPEGVideoSource() const; }; #endif live/liveMedia/include/MPEG1or2AudioRTPSink.hh000444 001751 000000 00000003367 12265042432 021244 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // RTP sink for MPEG audio (RFC 2250) // C++ header #ifndef _MPEG_1OR2_AUDIO_RTP_SINK_HH #define _MPEG_1OR2_AUDIO_RTP_SINK_HH #ifndef _AUDIO_RTP_SINK_HH #include "AudioRTPSink.hh" #endif class MPEG1or2AudioRTPSink: public AudioRTPSink { public: static MPEG1or2AudioRTPSink* createNew(UsageEnvironment& env, Groupsock* RTPgs); protected: MPEG1or2AudioRTPSink(UsageEnvironment& env, Groupsock* RTPgs); // called only by createNew() virtual ~MPEG1or2AudioRTPSink(); private: // redefined virtual functions: virtual void doSpecialFrameHandling(unsigned fragmentationOffset, unsigned char* frameStart, unsigned numBytesInFrame, struct timeval framePresentationTime, unsigned numRemainingBytes); virtual unsigned specialHeaderSize() const; }; #endif live/liveMedia/include/MPEG4ESVideoRTPSink.hh000444 001751 000000 00000005271 12265042432 021055 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // RTP sink for MPEG-4 Elementary Stream video (RFC 3016) // C++ header #ifndef _MPEG4ES_VIDEO_RTP_SINK_HH #define _MPEG4ES_VIDEO_RTP_SINK_HH #ifndef _VIDEO_RTP_SINK_HH #include "VideoRTPSink.hh" #endif class MPEG4ESVideoRTPSink: public VideoRTPSink { public: static MPEG4ESVideoRTPSink* createNew(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, u_int32_t rtpTimestampFrequency = 90000); static MPEG4ESVideoRTPSink* createNew(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, u_int32_t rtpTimestampFrequency, u_int8_t profileAndLevelIndication, char const* configStr); // an optional variant of "createNew()", useful if we know, in advance, the stream's 'configuration' info. protected: MPEG4ESVideoRTPSink(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, u_int32_t rtpTimestampFrequency, u_int8_t profileAndLevelIndication = 0, char const* configStr = NULL); // called only by createNew() virtual ~MPEG4ESVideoRTPSink(); protected: // redefined virtual functions: virtual Boolean sourceIsCompatibleWithUs(MediaSource& source); virtual void doSpecialFrameHandling(unsigned fragmentationOffset, unsigned char* frameStart, unsigned numBytesInFrame, struct timeval framePresentationTime, unsigned numRemainingBytes); virtual Boolean allowFragmentationAfterStart() const; virtual Boolean frameCanAppearAfterPacketStart(unsigned char const* frameStart, unsigned numBytesInFrame) const; virtual char const* auxSDPLine(); protected: Boolean fVOPIsPresent; private: u_int8_t fProfileAndLevelIndication; unsigned char* fConfigBytes; unsigned fNumConfigBytes; char* fFmtpSDPLine; }; #endif live/liveMedia/include/RTCP.hh000444 001751 000000 00000016113 12265042432 016354 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // RTCP // C++ header #ifndef _RTCP_HH #define _RTCP_HH #ifndef _RTP_SINK_HH #include "RTPSink.hh" #endif #ifndef _RTP_SOURCE_HH #include "RTPSource.hh" #endif class SDESItem { public: SDESItem(unsigned char tag, unsigned char const* value); unsigned char const* data() const {return fData;} unsigned totalSize() const; private: unsigned char fData[2 + 0xFF]; // first 2 bytes are tag and length }; class RTCPMemberDatabase; // forward class RTCPInstance: public Medium { public: static RTCPInstance* createNew(UsageEnvironment& env, Groupsock* RTCPgs, unsigned totSessionBW, /* in kbps */ unsigned char const* cname, RTPSink* sink, RTPSource const* source, Boolean isSSMSource = False); static Boolean lookupByName(UsageEnvironment& env, char const* instanceName, RTCPInstance*& resultInstance); unsigned numMembers() const; unsigned totSessionBW() const { return fTotSessionBW; } void setByeHandler(TaskFunc* handlerTask, void* clientData, Boolean handleActiveParticipantsOnly = True); // Assigns a handler routine to be called if a "BYE" arrives. // The handler is called once only; for subsequent "BYE"s, // "setByeHandler()" would need to be called again. // If "handleActiveParticipantsOnly" is True, then the handler is called // only if the SSRC is for a known sender (if we have a "RTPSource"), // or if the SSRC is for a known receiver (if we have a "RTPSink"). // This prevents (for example) the handler for a multicast receiver being // called if some other multicast receiver happens to exit. // If "handleActiveParticipantsOnly" is False, then the handler is called // for any incoming RTCP "BYE". // (To remove an existing "BYE" handler, call "setByeHandler()" again, with a "handlerTask" of NULL.) void setSRHandler(TaskFunc* handlerTask, void* clientData); void setRRHandler(TaskFunc* handlerTask, void* clientData); // Assigns a handler routine to be called if a "SR" or "RR" // (respectively) arrives. Unlike "setByeHandler()", the handler will // be called once for each incoming "SR" or "RR". (To turn off handling, // call the function again with "handlerTask" (and "clientData") as NULL. void setSpecificRRHandler(netAddressBits fromAddress, Port fromPort, TaskFunc* handlerTask, void* clientData); // Like "setRRHandler()", but applies only to "RR" packets that come from // a specific source address and port. (Note that if both a specific // and a general "RR" handler function is set, then both will be called.) void unsetSpecificRRHandler(netAddressBits fromAddress, Port fromPort); // equivalent to setSpecificRRHandler(..., NULL, NULL); Groupsock* RTCPgs() const { return fRTCPInterface.gs(); } void setStreamSocket(int sockNum, unsigned char streamChannelId); void addStreamSocket(int sockNum, unsigned char streamChannelId); void removeStreamSocket(int sockNum, unsigned char streamChannelId) { fRTCPInterface.removeStreamSocket(sockNum, streamChannelId); } // hacks to allow sending RTP over TCP (RFC 2236, section 10.12) void setAuxilliaryReadHandler(AuxHandlerFunc* handlerFunc, void* handlerClientData) { fRTCPInterface.setAuxilliaryReadHandler(handlerFunc, handlerClientData); } protected: RTCPInstance(UsageEnvironment& env, Groupsock* RTPgs, unsigned totSessionBW, unsigned char const* cname, RTPSink* sink, RTPSource const* source, Boolean isSSMSource); // called only by createNew() virtual ~RTCPInstance(); private: // redefined virtual functions: virtual Boolean isRTCPInstance() const; private: Boolean addReport(Boolean alwaysAdd = False); void addSR(); void addRR(); void enqueueCommonReportPrefix(unsigned char packetType, u_int32_t SSRC, unsigned numExtraWords = 0); void enqueueCommonReportSuffix(); void enqueueReportBlock(RTPReceptionStats* receptionStats); void addSDES(); void addBYE(); void sendBuiltPacket(); static void onExpire(RTCPInstance* instance); void onExpire1(); static void incomingReportHandler(RTCPInstance* instance, int /*mask*/); void incomingReportHandler1(); void onReceive(int typeOfPacket, int totPacketSize, u_int32_t ssrc); private: unsigned char* fInBuf; unsigned fNumBytesAlreadyRead; OutPacketBuffer* fOutBuf; RTPInterface fRTCPInterface; unsigned fTotSessionBW; RTPSink* fSink; RTPSource const* fSource; Boolean fIsSSMSource; SDESItem fCNAME; RTCPMemberDatabase* fKnownMembers; unsigned fOutgoingReportCount; // used for SSRC member aging double fAveRTCPSize; int fIsInitial; double fPrevReportTime; double fNextReportTime; int fPrevNumMembers; int fLastSentSize; int fLastReceivedSize; u_int32_t fLastReceivedSSRC; int fTypeOfEvent; int fTypeOfPacket; Boolean fHaveJustSentPacket; unsigned fLastPacketSentSize; TaskFunc* fByeHandlerTask; void* fByeHandlerClientData; Boolean fByeHandleActiveParticipantsOnly; TaskFunc* fSRHandlerTask; void* fSRHandlerClientData; TaskFunc* fRRHandlerTask; void* fRRHandlerClientData; AddressPortLookupTable* fSpecificRRHandlerTable; public: // because this stuff is used by an external "C" function void schedule(double nextTime); void reschedule(double nextTime); void sendReport(); void sendBYE(); int typeOfEvent() {return fTypeOfEvent;} int sentPacketSize() {return fLastSentSize;} int packetType() {return fTypeOfPacket;} int receivedPacketSize() {return fLastReceivedSize;} int checkNewSSRC(); void removeLastReceivedSSRC(); void removeSSRC(u_int32_t ssrc, Boolean alsoRemoveStats); }; // RTCP packet types: const unsigned char RTCP_PT_SR = 200; const unsigned char RTCP_PT_RR = 201; const unsigned char RTCP_PT_SDES = 202; const unsigned char RTCP_PT_BYE = 203; const unsigned char RTCP_PT_APP = 204; // SDES tags: const unsigned char RTCP_SDES_END = 0; const unsigned char RTCP_SDES_CNAME = 1; const unsigned char RTCP_SDES_NAME = 2; const unsigned char RTCP_SDES_EMAIL = 3; const unsigned char RTCP_SDES_PHONE = 4; const unsigned char RTCP_SDES_LOC = 5; const unsigned char RTCP_SDES_TOOL = 6; const unsigned char RTCP_SDES_NOTE = 7; const unsigned char RTCP_SDES_PRIV = 8; #endif live/liveMedia/include/MPEG1or2AudioRTPSource.hh000444 001751 000000 00000003372 12265042432 021574 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // MPEG-1 or MPEG-2 Audio RTP Sources // C++ header #ifndef _MPEG_1OR2_AUDIO_RTP_SOURCE_HH #define _MPEG_1OR2_AUDIO_RTP_SOURCE_HH #ifndef _MULTI_FRAMED_RTP_SOURCE_HH #include "MultiFramedRTPSource.hh" #endif class MPEG1or2AudioRTPSource: public MultiFramedRTPSource { public: static MPEG1or2AudioRTPSource* createNew(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat = 14, unsigned rtpTimestampFrequency = 90000); protected: virtual ~MPEG1or2AudioRTPSource(); private: MPEG1or2AudioRTPSource(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, unsigned rtpTimestampFrequency); // called only by createNew() private: // redefined virtual functions: virtual Boolean processSpecialHeader(BufferedPacket* packet, unsigned& resultSpecialHeaderSize); virtual char const* MIMEtype() const; }; #endif live/liveMedia/include/VideoRTPSink.hh000444 001751 000000 00000002666 12265042432 020075 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A generic RTP sink for video codecs (abstract base class) // C++ header #ifndef _VIDEO_RTP_SINK_HH #define _VIDEO_RTP_SINK_HH #ifndef _MULTI_FRAMED_RTP_SINK_HH #include "MultiFramedRTPSink.hh" #endif class VideoRTPSink: public MultiFramedRTPSink { protected: VideoRTPSink(UsageEnvironment& env, Groupsock* rtpgs, unsigned char rtpPayloadType, unsigned rtpTimestampFrequency, char const* rtpPayloadFormatName); // (we're an abstract base class) virtual ~VideoRTPSink(); private: // redefined virtual functions: virtual char const* sdpMediaType() const; }; #endif live/liveMedia/include/MPEG4VideoStreamFramer.hh000444 001751 000000 00000005012 12265042432 021714 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A filter that breaks up an MPEG-4 video elementary stream into // frames for: // - Visual Object Sequence (VS) Header + Visual Object (VO) Header // + Video Object Layer (VOL) Header // - Group of VOP (GOV) Header // - VOP frame // C++ header #ifndef _MPEG4_VIDEO_STREAM_FRAMER_HH #define _MPEG4_VIDEO_STREAM_FRAMER_HH #ifndef _MPEG_VIDEO_STREAM_FRAMER_HH #include "MPEGVideoStreamFramer.hh" #endif class MPEG4VideoStreamFramer: public MPEGVideoStreamFramer { public: static MPEG4VideoStreamFramer* createNew(UsageEnvironment& env, FramedSource* inputSource); u_int8_t profile_and_level_indication() const { return fProfileAndLevelIndication; } unsigned char* getConfigBytes(unsigned& numBytes) const; void setConfigInfo(u_int8_t profileAndLevelIndication, char const* configStr); // Assigns the "profile_and_level_indication" number, and the 'config' bytes. // If this function is not called, then this data is only assigned later, when it appears in the input stream. protected: MPEG4VideoStreamFramer(UsageEnvironment& env, FramedSource* inputSource, Boolean createParser = True); // called only by createNew(), or by subclass constructors virtual ~MPEG4VideoStreamFramer(); void startNewConfig(); void appendToNewConfig(unsigned char* newConfigBytes, unsigned numNewBytes); void completeNewConfig(); private: // redefined virtual functions: virtual Boolean isMPEG4VideoStreamFramer() const; protected: u_int8_t fProfileAndLevelIndication; unsigned char* fConfigBytes; unsigned fNumConfigBytes; private: unsigned char* fNewConfigBytes; unsigned fNumNewConfigBytes; friend class MPEG4VideoStreamParser; // hack }; #endif live/liveMedia/include/AudioRTPSink.hh000444 001751 000000 00000002730 12265042432 020060 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A generic RTP sink for audio codecs (abstract base class) // C++ header #ifndef _AUDIO_RTP_SINK_HH #define _AUDIO_RTP_SINK_HH #ifndef _MULTI_FRAMED_RTP_SINK_HH #include "MultiFramedRTPSink.hh" #endif class AudioRTPSink: public MultiFramedRTPSink { protected: AudioRTPSink(UsageEnvironment& env, Groupsock* rtpgs, unsigned char rtpPayloadType, unsigned rtpTimestampFrequency, char const* rtpPayloadFormatName, unsigned numChannels = 1); // (we're an abstract base class) virtual ~AudioRTPSink(); private: // redefined virtual functions: virtual char const* sdpMediaType() const; }; #endif live/liveMedia/include/AMRAudioRTPSink.hh000444 001751 000000 00000004420 12265042432 020416 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // RTP sink for AMR audio (RFC 4867) // C++ header #ifndef _AMR_AUDIO_RTP_SINK_HH #define _AMR_AUDIO_RTP_SINK_HH #ifndef _AUDIO_RTP_SINK_HH #include "AudioRTPSink.hh" #endif class AMRAudioRTPSink: public AudioRTPSink { public: static AMRAudioRTPSink* createNew(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, Boolean sourceIsWideband = False, unsigned numChannelsInSource = 1); Boolean sourceIsWideband() const { return fSourceIsWideband; } protected: AMRAudioRTPSink(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, Boolean sourceIsWideband, unsigned numChannelsInSource); // called only by createNew() virtual ~AMRAudioRTPSink(); private: // redefined virtual functions: virtual Boolean sourceIsCompatibleWithUs(MediaSource& source); virtual void doSpecialFrameHandling(unsigned fragmentationOffset, unsigned char* frameStart, unsigned numBytesInFrame, struct timeval framePresentationTime, unsigned numRemainingBytes); virtual Boolean frameCanAppearAfterPacketStart(unsigned char const* frameStart, unsigned numBytesInFrame) const; virtual unsigned specialHeaderSize() const; virtual char const* auxSDPLine(); private: Boolean fSourceIsWideband; char* fFmtpSDPLine; }; #endif live/liveMedia/include/FileServerMediaSubsession.hh000444 001751 000000 00000002734 12265042432 022674 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A 'ServerMediaSubsession' object that creates new, unicast, "RTPSink"s // on demand, from a file. // C++ header #ifndef _FILE_SERVER_MEDIA_SUBSESSION_HH #define _FILE_SERVER_MEDIA_SUBSESSION_HH #ifndef _ON_DEMAND_SERVER_MEDIA_SUBSESSION_HH #include "OnDemandServerMediaSubsession.hh" #endif class FileServerMediaSubsession: public OnDemandServerMediaSubsession { protected: // we're a virtual base class FileServerMediaSubsession(UsageEnvironment& env, char const* fileName, Boolean reuseFirstSource); virtual ~FileServerMediaSubsession(); protected: char const* fFileName; u_int64_t fFileSize; // if known }; #endif live/liveMedia/include/PassiveServerMediaSubsession.hh000444 001751 000000 00000006112 12265042432 023421 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A 'ServerMediaSubsession' object that represents an existing // 'RTPSink', rather than one that creates new 'RTPSink's on demand. // C++ header #ifndef _PASSIVE_SERVER_MEDIA_SUBSESSION_HH #define _PASSIVE_SERVER_MEDIA_SUBSESSION_HH #ifndef _SERVER_MEDIA_SESSION_HH #include "ServerMediaSession.hh" #endif #ifndef _RTP_SINK_HH #include "RTPSink.hh" #endif #ifndef _RTCP_HH #include "RTCP.hh" #endif class PassiveServerMediaSubsession: public ServerMediaSubsession { public: static PassiveServerMediaSubsession* createNew(RTPSink& rtpSink, RTCPInstance* rtcpInstance = NULL); protected: PassiveServerMediaSubsession(RTPSink& rtpSink, RTCPInstance* rtcpInstance); // called only by createNew(); virtual ~PassiveServerMediaSubsession(); protected: // redefined virtual functions virtual char const* sdpLines(); virtual void getStreamParameters(unsigned clientSessionId, netAddressBits clientAddress, Port const& clientRTPPort, Port const& clientRTCPPort, int tcpSocketNum, unsigned char rtpChannelId, unsigned char rtcpChannelId, netAddressBits& destinationAddress, u_int8_t& destinationTTL, Boolean& isMulticast, Port& serverRTPPort, Port& serverRTCPPort, void*& streamToken); virtual void startStream(unsigned clientSessionId, void* streamToken, TaskFunc* rtcpRRHandler, void* rtcpRRHandlerClientData, unsigned short& rtpSeqNum, unsigned& rtpTimestamp, ServerRequestAlternativeByteHandler* serverRequestAlternativeByteHandler, void* serverRequestAlternativeByteHandlerClientData); virtual float getCurrentNPT(void* streamToken); virtual void deleteStream(unsigned clientSessionId, void*& streamToken); protected: char* fSDPLines; private: RTPSink& fRTPSink; RTCPInstance* fRTCPInstance; HashTable* fClientRTCPSourceRecords; // indexed by client session id; used to implement RTCP "RR" handling }; #endif live/liveMedia/include/AMRAudioFileServerMediaSubsession.hh000444 001751 000000 00000003615 12265042432 024215 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A 'ServerMediaSubsession' object that creates new, unicast, "RTPSink"s // on demand, from an AMR audio file. // C++ header #ifndef _AMR_AUDIO_FILE_SERVER_MEDIA_SUBSESSION_HH #define _AMR_AUDIO_FILE_SERVER_MEDIA_SUBSESSION_HH #ifndef _FILE_SERVER_MEDIA_SUBSESSION_HH #include "FileServerMediaSubsession.hh" #endif class AMRAudioFileServerMediaSubsession: public FileServerMediaSubsession{ public: static AMRAudioFileServerMediaSubsession* createNew(UsageEnvironment& env, char const* fileName, Boolean reuseFirstSource); private: AMRAudioFileServerMediaSubsession(UsageEnvironment& env, char const* fileName, Boolean reuseFirstSource); // called only by createNew(); virtual ~AMRAudioFileServerMediaSubsession(); private: // redefined virtual functions virtual FramedSource* createNewStreamSource(unsigned clientSessionId, unsigned& estBitrate); virtual RTPSink* createNewRTPSink(Groupsock* rtpGroupsock, unsigned char rtpPayloadTypeIfDynamic, FramedSource* inputSource); }; #endif live/liveMedia/include/MP3AudioFileServerMediaSubsession.hh000444 001751 000000 00000005644 12265042432 024201 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A 'ServerMediaSubsession' object that creates new, unicast, "RTPSink"s // on demand, from an MP3 audio file. // (Actually, any MPEG-1 or MPEG-2 audio file should work.) // C++ header #ifndef _MP3_AUDIO_FILE_SERVER_MEDIA_SUBSESSION_HH #define _MP3_AUDIO_FILE_SERVER_MEDIA_SUBSESSION_HH #ifndef _FILE_SERVER_MEDIA_SUBSESSION_HH #include "FileServerMediaSubsession.hh" #endif #ifndef _MP3_ADU_INTERLEAVING_HH #include "MP3ADUinterleaving.hh" #endif #ifndef _MP3_ADU_HH #include "MP3ADU.hh" #endif class MP3AudioFileServerMediaSubsession: public FileServerMediaSubsession{ public: static MP3AudioFileServerMediaSubsession* createNew(UsageEnvironment& env, char const* fileName, Boolean reuseFirstSource, Boolean generateADUs, Interleaving* interleaving); // Note: "interleaving" is used only if "generateADUs" is True, // (and a value of NULL means 'no interleaving') protected: MP3AudioFileServerMediaSubsession(UsageEnvironment& env, char const* fileName, Boolean reuseFirstSource, Boolean generateADUs, Interleaving* interleaving); // called only by createNew(); virtual ~MP3AudioFileServerMediaSubsession(); FramedSource* createNewStreamSourceCommon(FramedSource* baseMP3Source, unsigned mp3NumBytes, unsigned& estBitrate); void getBaseStreams(FramedSource* frontStream, FramedSource*& sourceMP3Stream, ADUFromMP3Source*& aduStream/*if any*/); protected: // redefined virtual functions virtual void seekStreamSource(FramedSource* inputSource, double& seekNPT, double streamDuration, u_int64_t& numBytes); virtual void setStreamSourceScale(FramedSource* inputSource, float scale); virtual FramedSource* createNewStreamSource(unsigned clientSessionId, unsigned& estBitrate); virtual RTPSink* createNewRTPSink(Groupsock* rtpGroupsock, unsigned char rtpPayloadTypeIfDynamic, FramedSource* inputSource); virtual void testScaleFactor(float& scale); virtual float duration() const; protected: Boolean fGenerateADUs; Interleaving* fInterleaving; float fFileDuration; }; #endif live/liveMedia/include/MPEG1or2VideoFileServerMediaSubsession.hh000444 001751 000000 00000004352 12265042432 025036 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A 'ServerMediaSubsession' object that creates new, unicast, "RTPSink"s // on demand, from a MPEG-1 or 2 Elementary Stream video file. // C++ header #ifndef _MPEG_1OR2_VIDEO_FILE_SERVER_MEDIA_SUBSESSION_HH #define _MPEG_1OR2_VIDEO_FILE_SERVER_MEDIA_SUBSESSION_HH #ifndef _FILE_SERVER_MEDIA_SUBSESSION_HH #include "FileServerMediaSubsession.hh" #endif class MPEG1or2VideoFileServerMediaSubsession: public FileServerMediaSubsession{ public: static MPEG1or2VideoFileServerMediaSubsession* createNew(UsageEnvironment& env, char const* fileName, Boolean reuseFirstSource, Boolean iFramesOnly = False, double vshPeriod = 5.0 /* how often (in seconds) to inject a Video_Sequence_Header, if one doesn't already appear in the stream */); private: MPEG1or2VideoFileServerMediaSubsession(UsageEnvironment& env, char const* fileName, Boolean reuseFirstSource, Boolean iFramesOnly, double vshPeriod); // called only by createNew(); virtual ~MPEG1or2VideoFileServerMediaSubsession(); private: // redefined virtual functions virtual FramedSource* createNewStreamSource(unsigned clientSessionId, unsigned& estBitrate); virtual RTPSink* createNewRTPSink(Groupsock* rtpGroupsock, unsigned char rtpPayloadTypeIfDynamic, FramedSource* inputSource); private: Boolean fIFramesOnly; double fVSHPeriod; }; #endif live/liveMedia/include/OnDemandServerMediaSubsession.hh000444 001751 000000 00000017146 12265042432 023505 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A 'ServerMediaSubsession' object that creates new, unicast, "RTPSink"s // on demand. // C++ header #ifndef _ON_DEMAND_SERVER_MEDIA_SUBSESSION_HH #define _ON_DEMAND_SERVER_MEDIA_SUBSESSION_HH #ifndef _SERVER_MEDIA_SESSION_HH #include "ServerMediaSession.hh" #endif #ifndef _RTP_SINK_HH #include "RTPSink.hh" #endif #ifndef _BASIC_UDP_SINK_HH #include "BasicUDPSink.hh" #endif #ifndef _RTCP_HH #include "RTCP.hh" #endif class OnDemandServerMediaSubsession: public ServerMediaSubsession { protected: // we're a virtual base class OnDemandServerMediaSubsession(UsageEnvironment& env, Boolean reuseFirstSource, portNumBits initialPortNum = 6970); virtual ~OnDemandServerMediaSubsession(); protected: // redefined virtual functions virtual char const* sdpLines(); virtual void getStreamParameters(unsigned clientSessionId, netAddressBits clientAddress, Port const& clientRTPPort, Port const& clientRTCPPort, int tcpSocketNum, unsigned char rtpChannelId, unsigned char rtcpChannelId, netAddressBits& destinationAddress, u_int8_t& destinationTTL, Boolean& isMulticast, Port& serverRTPPort, Port& serverRTCPPort, void*& streamToken); virtual void startStream(unsigned clientSessionId, void* streamToken, TaskFunc* rtcpRRHandler, void* rtcpRRHandlerClientData, unsigned short& rtpSeqNum, unsigned& rtpTimestamp, ServerRequestAlternativeByteHandler* serverRequestAlternativeByteHandler, void* serverRequestAlternativeByteHandlerClientData); virtual void pauseStream(unsigned clientSessionId, void* streamToken); virtual void seekStream(unsigned clientSessionId, void* streamToken, double& seekNPT, double streamDuration, u_int64_t& numBytes); virtual void seekStream(unsigned clientSessionId, void* streamToken, char*& absStart, char*& absEnd); virtual void nullSeekStream(unsigned clientSessionId, void* streamToken); virtual void setStreamScale(unsigned clientSessionId, void* streamToken, float scale); virtual float getCurrentNPT(void* streamToken); virtual FramedSource* getStreamSource(void* streamToken); virtual void deleteStream(unsigned clientSessionId, void*& streamToken); protected: // new virtual functions, possibly redefined by subclasses virtual char const* getAuxSDPLine(RTPSink* rtpSink, FramedSource* inputSource); virtual void seekStreamSource(FramedSource* inputSource, double& seekNPT, double streamDuration, u_int64_t& numBytes); // This routine is used to seek by relative (i.e., NPT) time. // "streamDuration", if >0.0, specifies how much data to stream, past "seekNPT". (If <=0.0, all remaining data is streamed.) // "numBytes" returns the size (in bytes) of the data to be streamed, or 0 if unknown or unlimited. virtual void seekStreamSource(FramedSource* inputSource, char*& absStart, char*& absEnd); // This routine is used to seek by 'absolute' time. // "absStart" should be a string of the form "YYYYMMDDTHHMMSSZ" or "YYYYMMDDTHHMMSS.Z". // "absEnd" should be either NULL (for no end time), or a string of the same form as "absStart". // These strings may be modified in-place, or can be reassigned to a newly-allocated value (after delete[]ing the original). virtual void setStreamSourceScale(FramedSource* inputSource, float scale); virtual void closeStreamSource(FramedSource* inputSource); protected: // new virtual functions, defined by all subclasses virtual FramedSource* createNewStreamSource(unsigned clientSessionId, unsigned& estBitrate) = 0; // "estBitrate" is the stream's estimated bitrate, in kbps virtual RTPSink* createNewRTPSink(Groupsock* rtpGroupsock, unsigned char rtpPayloadTypeIfDynamic, FramedSource* inputSource) = 0; private: void setSDPLinesFromRTPSink(RTPSink* rtpSink, FramedSource* inputSource, unsigned estBitrate); // used to implement "sdpLines()" protected: char* fSDPLines; HashTable* fDestinationsHashTable; // indexed by client session id private: Boolean fReuseFirstSource; portNumBits fInitialPortNum; void* fLastStreamToken; char fCNAME[100]; // for RTCP friend class StreamState; }; // A class that represents the state of an ongoing stream. This is used only internally, in the implementation of // "OnDemandServerMediaSubsession", but we expose the definition here, in case subclasses of "OnDemandServerMediaSubsession" // want to access it. class Destinations { public: Destinations(struct in_addr const& destAddr, Port const& rtpDestPort, Port const& rtcpDestPort) : isTCP(False), addr(destAddr), rtpPort(rtpDestPort), rtcpPort(rtcpDestPort) { } Destinations(int tcpSockNum, unsigned char rtpChanId, unsigned char rtcpChanId) : isTCP(True), rtpPort(0) /*dummy*/, rtcpPort(0) /*dummy*/, tcpSocketNum(tcpSockNum), rtpChannelId(rtpChanId), rtcpChannelId(rtcpChanId) { } public: Boolean isTCP; struct in_addr addr; Port rtpPort; Port rtcpPort; int tcpSocketNum; unsigned char rtpChannelId, rtcpChannelId; }; class StreamState { public: StreamState(OnDemandServerMediaSubsession& master, Port const& serverRTPPort, Port const& serverRTCPPort, RTPSink* rtpSink, BasicUDPSink* udpSink, unsigned totalBW, FramedSource* mediaSource, Groupsock* rtpGS, Groupsock* rtcpGS); virtual ~StreamState(); void startPlaying(Destinations* destinations, TaskFunc* rtcpRRHandler, void* rtcpRRHandlerClientData, ServerRequestAlternativeByteHandler* serverRequestAlternativeByteHandler, void* serverRequestAlternativeByteHandlerClientData); void pause(); void endPlaying(Destinations* destinations); void reclaim(); unsigned& referenceCount() { return fReferenceCount; } Port const& serverRTPPort() const { return fServerRTPPort; } Port const& serverRTCPPort() const { return fServerRTCPPort; } RTPSink* rtpSink() const { return fRTPSink; } float streamDuration() const { return fStreamDuration; } FramedSource* mediaSource() const { return fMediaSource; } float& startNPT() { return fStartNPT; } private: OnDemandServerMediaSubsession& fMaster; Boolean fAreCurrentlyPlaying; unsigned fReferenceCount; Port fServerRTPPort, fServerRTCPPort; RTPSink* fRTPSink; BasicUDPSink* fUDPSink; float fStreamDuration; unsigned fTotalBW; RTCPInstance* fRTCPInstance; FramedSource* fMediaSource; float fStartNPT; // initial 'normal play time'; reset after each seek Groupsock* fRTPgs; Groupsock* fRTCPgs; }; #endif live/liveMedia/include/BasicUDPSink.hh000444 001751 000000 00000003735 12265042432 020031 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A simple UDP sink (i.e., without RTP or other headers added); one frame per packet // C++ header #ifndef _BASIC_UDP_SINK_HH #define _BASIC_UDP_SINK_HH #ifndef _MEDIA_SINK_HH #include "MediaSink.hh" #endif #ifndef _GROUPSOCK_HH #include #endif class BasicUDPSink: public MediaSink { public: static BasicUDPSink* createNew(UsageEnvironment& env, Groupsock* gs, unsigned maxPayloadSize = 1450); protected: BasicUDPSink(UsageEnvironment& env, Groupsock* gs, unsigned maxPayloadSize); // called only by createNew() virtual ~BasicUDPSink(); private: // redefined virtual functions: virtual Boolean continuePlaying(); private: void continuePlaying1(); static void afterGettingFrame(void* clientData, unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime, unsigned durationInMicroseconds); void afterGettingFrame1(unsigned frameSize, unsigned numTruncatedBytes, unsigned durationInMicroseconds); static void sendNext(void* firstArg); private: Groupsock* fGS; unsigned fMaxPayloadSize; unsigned char* fOutputBuffer; struct timeval fNextSendTime; }; #endif live/liveMedia/include/AMRAudioFileSink.hh000444 001751 000000 00000003360 12265042432 020632 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // AMR Audio File Sinks // C++ header #ifndef _AMR_AUDIO_FILE_SINK_HH #define _AMR_AUDIO_FILE_SINK_HH #ifndef _FILE_SINK_HH #include "FileSink.hh" #endif class AMRAudioFileSink: public FileSink { public: static AMRAudioFileSink* createNew(UsageEnvironment& env, char const* fileName, unsigned bufferSize = 10000, Boolean oneFilePerFrame = False); // (See "FileSink.hh" for a description of these parameters.) protected: AMRAudioFileSink(UsageEnvironment& env, FILE* fid, unsigned bufferSize, char const* perFrameFileNamePrefix); // called only by createNew() virtual ~AMRAudioFileSink(); protected: // redefined virtual functions: virtual Boolean sourceIsCompatibleWithUs(MediaSource& source); virtual void afterGettingFrame(unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime); protected: Boolean fHaveWrittenHeader; }; #endif live/liveMedia/include/DVVideoStreamFramer.hh000444 001751 000000 00000006047 12265042432 021422 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A filter that parses a DV input stream into DV frames to deliver to the downstream object // C++ header #ifndef _DV_VIDEO_STREAM_FRAMER_HH #define _DV_VIDEO_STREAM_FRAMER_HH #ifndef _FRAMED_FILTER_HH #include "FramedFilter.hh" #endif #define DV_DIF_BLOCK_SIZE 80 #define DV_NUM_BLOCKS_PER_SEQUENCE 150 #define DV_SAVED_INITIAL_BLOCKS_SIZE ((DV_NUM_BLOCKS_PER_SEQUENCE+6-1)*DV_DIF_BLOCK_SIZE) /* enough data to ensure that it contains an intact 6-block header (which occurs at the start of a 150-block sequence) */ class DVVideoStreamFramer: public FramedFilter { public: static DVVideoStreamFramer* createNew(UsageEnvironment& env, FramedSource* inputSource, Boolean sourceIsSeekable = False, Boolean leavePresentationTimesUnmodified = False); // Set "sourceIsSeekable" to True if the input source is a seekable object (e.g. a file), and the server that uses us // does a seek-to-zero on the source before reading from it. (Our RTSP server implementation does this.) char const* profileName(); Boolean getFrameParameters(unsigned& frameSize/*bytes*/, double& frameDuration/*microseconds*/); protected: DVVideoStreamFramer(UsageEnvironment& env, FramedSource* inputSource, Boolean sourceIsSeekable, Boolean leavePresentationTimesUnmodified); // called only by createNew(), or by subclass constructors virtual ~DVVideoStreamFramer(); protected: // redefined virtual functions: virtual Boolean isDVVideoStreamFramer() const; virtual void doGetNextFrame(); protected: void getAndDeliverData(); // used to implement "doGetNextFrame()" static void afterGettingFrame(void* clientData, unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime, unsigned durationInMicroseconds); void afterGettingFrame(unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime); void getProfile(); protected: Boolean fLeavePresentationTimesUnmodified; void const* fOurProfile; struct timeval fNextFramePresentationTime; unsigned char fSavedInitialBlocks[DV_SAVED_INITIAL_BLOCKS_SIZE]; char fInitialBlocksPresent; Boolean fSourceIsSeekable; }; #endif live/liveMedia/include/DVVideoFileServerMediaSubsession.hh000444 001751 000000 00000004140 12265042432 024106 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A 'ServerMediaSubsession' object that creates new, unicast, "RTPSink"s // on demand, from a DV video file. // C++ header #ifndef _DV_VIDEO_FILE_SERVER_MEDIA_SUBSESSION_HH #define _DV_VIDEO_FILE_SERVER_MEDIA_SUBSESSION_HH #ifndef _FILE_SERVER_MEDIA_SUBSESSION_HH #include "FileServerMediaSubsession.hh" #endif class DVVideoFileServerMediaSubsession: public FileServerMediaSubsession{ public: static DVVideoFileServerMediaSubsession* createNew(UsageEnvironment& env, char const* fileName, Boolean reuseFirstSource); private: DVVideoFileServerMediaSubsession(UsageEnvironment& env, char const* fileName, Boolean reuseFirstSource); // called only by createNew(); virtual ~DVVideoFileServerMediaSubsession(); private: // redefined virtual functions virtual char const* getAuxSDPLine(RTPSink* rtpSink, FramedSource* inputSource); virtual void seekStreamSource(FramedSource* inputSource, double& seekNPT, double streamDuration, u_int64_t& numBytes); virtual FramedSource* createNewStreamSource(unsigned clientSessionId, unsigned& estBitrate); virtual RTPSink* createNewRTPSink(Groupsock* rtpGroupsock, unsigned char rtpPayloadTypeIfDynamic, FramedSource* inputSource); virtual float duration() const; private: float fFileDuration; // in seconds }; #endif live/liveMedia/include/WAVAudioFileServerMediaSubsession.hh000444 001751 000000 00000005166 12265042432 024236 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A 'ServerMediaSubsession' object that creates new, unicast, "RTPSink"s // on demand, from an WAV audio file. // C++ header #ifndef _WAV_AUDIO_FILE_SERVER_MEDIA_SUBSESSION_HH #define _WAV_AUDIO_FILE_SERVER_MEDIA_SUBSESSION_HH #ifndef _FILE_SERVER_MEDIA_SUBSESSION_HH #include "FileServerMediaSubsession.hh" #endif class WAVAudioFileServerMediaSubsession: public FileServerMediaSubsession{ public: static WAVAudioFileServerMediaSubsession* createNew(UsageEnvironment& env, char const* fileName, Boolean reuseFirstSource, Boolean convertToULaw = False); // If "convertToULaw" is True, 16-bit audio streams are converted to // 8-bit u-law audio prior to streaming. protected: WAVAudioFileServerMediaSubsession(UsageEnvironment& env, char const* fileName, Boolean reuseFirstSource, Boolean convertToULaw); // called only by createNew(); virtual ~WAVAudioFileServerMediaSubsession(); protected: // redefined virtual functions virtual void seekStreamSource(FramedSource* inputSource, double& seekNPT, double streamDuration, u_int64_t& numBytes); virtual void setStreamSourceScale(FramedSource* inputSource, float scale); virtual FramedSource* createNewStreamSource(unsigned clientSessionId, unsigned& estBitrate); virtual RTPSink* createNewRTPSink(Groupsock* rtpGroupsock, unsigned char rtpPayloadTypeIfDynamic, FramedSource* inputSource); virtual void testScaleFactor(float& scale); virtual float duration() const; protected: Boolean fConvertToULaw; // The following parameters of the input stream are set after // "createNewStreamSource" is called: unsigned char fAudioFormat; unsigned char fBitsPerSample; unsigned fSamplingFrequency; unsigned fNumChannels; float fFileDuration; }; #endif live/liveMedia/include/DarwinInjector.hh000444 001751 000000 00000007251 12265042432 020531 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // An object that redirects one or more RTP/RTCP streams - forming a single // multimedia session - into a 'Darwin Streaming Server' (for subsequent // reflection to potentially arbitrarily many remote RTSP clients). // C++ header #ifndef _DARWIN_INJECTOR_HH #define _DARWIN_INJECTOR_HH #ifndef _RTSP_CLIENT_HH #include #endif #ifndef _RTCP_HH #include #endif /* To use a "DarwinInjector": 1/ Create RTP sinks and RTCP instances for each audio or video subsession. Note: These can use 0.0.0.0 for the address, and 0 for the port number, of each 'groupsock') 2/ Call "addStream()" for each. 3/ Call "setDestination()" to specify the remote Darwin Streaming Server. Note: You must have 'write' permission on the Darwin Streaming Server. This can be set up using a "qtaccess" file in the server's 'movies' directory. For example, the following "qtaccess" file allows anyone to play streams from the server, but allows only valid users to inject streams *into* the server: require valid-user require any-user Use the "remoteUserName" and "remotePassword" parameters to "setDestination()", as appropriate. 4/ Call "startPlaying" on each RTP sink (from the corresponding 'source'). */ class SubstreamDescriptor; // forward class DarwinInjector: public Medium { public: static DarwinInjector* createNew(UsageEnvironment& env, char const* applicationName = "DarwinInjector", int verbosityLevel = 0); static Boolean lookupByName(UsageEnvironment& env, char const* name, DarwinInjector*& result); void addStream(RTPSink* rtpSink, RTCPInstance* rtcpInstance); Boolean setDestination(char const* remoteRTSPServerNameOrAddress, char const* remoteFileName, char const* sessionName = "", char const* sessionInfo = "", portNumBits remoteRTSPServerPortNumber = 554, char const* remoteUserName = "", char const* remotePassword = "", char const* sessionAuthor = "", char const* sessionCopyright = "", int timeout = -1); private: // redefined virtual functions virtual Boolean isDarwinInjector() const; private: DarwinInjector(UsageEnvironment& env, char const* applicationName, int verbosityLevel); // called only by createNew() virtual ~DarwinInjector(); static void genericResponseHandler(RTSPClient* rtspClient, int responseCode, char* responseString); void genericResponseHandler1(int responseCode, char* responseString); private: char const* fApplicationName; int fVerbosityLevel; RTSPClient* fRTSPClient; unsigned fSubstreamSDPSizes; SubstreamDescriptor* fHeadSubstream; SubstreamDescriptor* fTailSubstream; MediaSession* fSession; unsigned fLastTrackId; char fWatchVariable; int fResultCode; char* fResultString; }; #endif live/liveMedia/include/MPEG1or2DemuxedServerMediaSubsession.hh000444 001751 000000 00000005005 12265042432 024557 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A 'ServerMediaSubsession' object that creates new, unicast, "RTPSink"s // on demand, from a MPEG-1 or 2 demuxer. // C++ header #ifndef _MPEG_1OR2_DEMUXED_SERVER_MEDIA_SUBSESSION_HH #define _MPEG_1OR2_DEMUXED_SERVER_MEDIA_SUBSESSION_HH #ifndef _ON_DEMAND_SERVER_MEDIA_SUBSESSION_HH #include "OnDemandServerMediaSubsession.hh" #endif #ifndef _MPEG_1OR2_FILE_SERVER_DEMUX_HH #include "MPEG1or2FileServerDemux.hh" #endif class MPEG1or2DemuxedServerMediaSubsession: public OnDemandServerMediaSubsession{ public: static MPEG1or2DemuxedServerMediaSubsession* createNew(MPEG1or2FileServerDemux& demux, u_int8_t streamIdTag, Boolean reuseFirstSource, Boolean iFramesOnly = False, double vshPeriod = 5.0); // The last two parameters are relevant for video streams only private: MPEG1or2DemuxedServerMediaSubsession(MPEG1or2FileServerDemux& demux, u_int8_t streamIdTag, Boolean reuseFirstSource, Boolean iFramesOnly, double vshPeriod); // called only by createNew(); virtual ~MPEG1or2DemuxedServerMediaSubsession(); private: // redefined virtual functions virtual void seekStreamSource(FramedSource* inputSource, double& seekNPT, double streamDuration, u_int64_t& numBytes); virtual FramedSource* createNewStreamSource(unsigned clientSessionId, unsigned& estBitrate); virtual RTPSink* createNewRTPSink(Groupsock* rtpGroupsock, unsigned char rtpPayloadTypeIfDynamic, FramedSource* inputSource); virtual float duration() const; private: MPEG1or2FileServerDemux& fOurDemux; u_int8_t fStreamIdTag; Boolean fIFramesOnly; // for video streams double fVSHPeriod; // for video streams }; #endif live/liveMedia/include/AMRAudioRTPSource.hh000444 001751 000000 00000003515 12265042432 020756 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // AMR Audio RTP Sources (RFC 4867) // C++ header #ifndef _AMR_AUDIO_RTP_SOURCE_HH #define _AMR_AUDIO_RTP_SOURCE_HH #ifndef _RTP_SOURCE_HH #include "RTPSource.hh" #endif #ifndef _AMR_AUDIO_SOURCE_HH #include "AMRAudioSource.hh" #endif class AMRAudioRTPSource { public: static AMRAudioSource* createNew(UsageEnvironment& env, Groupsock* RTPgs, RTPSource*& resultRTPSource, unsigned char rtpPayloadFormat, Boolean isWideband = False, unsigned numChannels = 1, Boolean isOctetAligned = True, unsigned interleaving = 0, // relevant only if "isOctetAligned" // The maximum # of frame-blocks in a group // 0 means: no interleaving Boolean robustSortingOrder = False, // relevant only if "isOctetAligned" Boolean CRCsArePresent = False // relevant only if "isOctetAligned" ); // This returns a source to read from, but "resultRTPSource" will // point to RTP-related state. }; #endif live/liveMedia/include/AMRAudioSource.hh000444 001751 000000 00000003261 12265042432 020366 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A source object for AMR audio sources // C++ header #ifndef _AMR_AUDIO_SOURCE_HH #define _AMR_AUDIO_SOURCE_HH #ifndef _FRAMED_SOURCE_HH #include "FramedSource.hh" #endif class AMRAudioSource: public FramedSource { public: Boolean isWideband() const { return fIsWideband; } unsigned numChannels() const { return fNumChannels; } u_int8_t lastFrameHeader() const { return fLastFrameHeader; } // The frame header for the most recently read frame (RFC 4867, sec. 5.3) protected: AMRAudioSource(UsageEnvironment& env, Boolean isWideband, unsigned numChannels); // virtual base class virtual ~AMRAudioSource(); private: // redefined virtual functions: virtual char const* MIMEtype() const; virtual Boolean isAMRAudioSource() const; protected: Boolean fIsWideband; unsigned fNumChannels; u_int8_t fLastFrameHeader; }; #endif live/liveMedia/include/MPEG2TransportStreamFromPESSource.hh000444 001751 000000 00000004342 12265042432 024065 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A filter for converting a stream of MPEG PES packets to a MPEG-2 Transport Stream // C++ header #ifndef _MPEG2_TRANSPORT_STREAM_FROM_PES_SOURCE_HH #define _MPEG2_TRANSPORT_STREAM_FROM_PES_SOURCE_HH #ifndef _MPEG2_TRANSPORT_STREAM_MULTIPLEXOR_HH #include "MPEG2TransportStreamMultiplexor.hh" #endif #ifndef _MPEG_1OR2_DEMUXED_ELEMENTARY_STREAM_HH #include "MPEG1or2DemuxedElementaryStream.hh" #endif class MPEG2TransportStreamFromPESSource: public MPEG2TransportStreamMultiplexor { public: static MPEG2TransportStreamFromPESSource* createNew(UsageEnvironment& env, MPEG1or2DemuxedElementaryStream* inputSource); protected: MPEG2TransportStreamFromPESSource(UsageEnvironment& env, MPEG1or2DemuxedElementaryStream* inputSource); // called only by createNew() virtual ~MPEG2TransportStreamFromPESSource(); private: // Redefined virtual functions: virtual void doStopGettingFrames(); virtual void awaitNewBuffer(unsigned char* oldBuffer); private: static void afterGettingFrame(void* clientData, unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime, unsigned durationInMicroseconds); void afterGettingFrame1(unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime, unsigned durationInMicroseconds); private: MPEG1or2DemuxedElementaryStream* fInputSource; unsigned char* fInputBuffer; }; #endif live/liveMedia/include/MPEG2TransportFileServerMediaSubsession.hh000444 001751 000000 00000012462 12265042432 025343 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A 'ServerMediaSubsession' object that creates new, unicast, "RTPSink"s // on demand, from a MPEG-2 Transport Stream file. // C++ header #ifndef _MPEG2_TRANSPORT_FILE_SERVER_MEDIA_SUBSESSION_HH #define _MPEG2_TRANSPORT_FILE_SERVER_MEDIA_SUBSESSION_HH #ifndef _FILE_SERVER_MEDIA_SUBSESSION_HH #include "FileServerMediaSubsession.hh" #endif #ifndef _MPEG2_TRANSPORT_STREAM_FRAMER_HH #include "MPEG2TransportStreamFramer.hh" #endif #ifndef _BYTE_STREAM_FILE_SOURCE_HH #include "ByteStreamFileSource.hh" #endif #ifndef _MPEG2_TRANSPORT_STREAM_TRICK_MODE_FILTER_HH #include "MPEG2TransportStreamTrickModeFilter.hh" #endif #ifndef _MPEG2_TRANSPORT_STREAM_FROM_ES_SOURCE_HH #include "MPEG2TransportStreamFromESSource.hh" #endif class ClientTrickPlayState; // forward class MPEG2TransportFileServerMediaSubsession: public FileServerMediaSubsession{ public: static MPEG2TransportFileServerMediaSubsession* createNew(UsageEnvironment& env, char const* dataFileName, char const* indexFileName, Boolean reuseFirstSource); protected: MPEG2TransportFileServerMediaSubsession(UsageEnvironment& env, char const* fileName, MPEG2TransportStreamIndexFile* indexFile, Boolean reuseFirstSource); // called only by createNew(); virtual ~MPEG2TransportFileServerMediaSubsession(); virtual ClientTrickPlayState* newClientTrickPlayState(); private: // redefined virtual functions // Note that because - to implement 'trick play' operations - we're operating on // more than just the input source, we reimplement some functions that are // already implemented in "OnDemandServerMediaSubsession", rather than // reimplementing "seekStreamSource()" and "setStreamSourceScale()": virtual void startStream(unsigned clientSessionId, void* streamToken, TaskFunc* rtcpRRHandler, void* rtcpRRHandlerClientData, unsigned short& rtpSeqNum, unsigned& rtpTimestamp, ServerRequestAlternativeByteHandler* serverRequestAlternativeByteHandler, void* serverRequestAlternativeByteHandlerClientData); virtual void pauseStream(unsigned clientSessionId, void* streamToken); virtual void seekStream(unsigned clientSessionId, void* streamToken, double& seekNPT, double streamDuration, u_int64_t& numBytes); virtual void setStreamScale(unsigned clientSessionId, void* streamToken, float scale); virtual void deleteStream(unsigned clientSessionId, void*& streamToken); // The virtual functions thare are usually implemented by "ServerMediaSubsession"s: virtual FramedSource* createNewStreamSource(unsigned clientSessionId, unsigned& estBitrate); virtual RTPSink* createNewRTPSink(Groupsock* rtpGroupsock, unsigned char rtpPayloadTypeIfDynamic, FramedSource* inputSource); virtual void testScaleFactor(float& scale); virtual float duration() const; private: ClientTrickPlayState* lookupClient(unsigned clientSessionId); private: MPEG2TransportStreamIndexFile* fIndexFile; float fDuration; HashTable* fClientSessionHashTable; // indexed by client session id }; // This class encapsulates the 'trick play' state for each current client (for // a given "MPEG2TransportFileServerMediaSubsession" - i.e., Transport Stream file). // It is used only within the implementation of "MPEG2TransportFileServerMediaSubsession", but is included here, // in case subclasses of "MPEG2TransportFileServerMediaSubsession" want to use it. class ClientTrickPlayState { public: ClientTrickPlayState(MPEG2TransportStreamIndexFile* indexFile); // Functions to bring "fNPT", "fTSRecordNum" and "fIxRecordNum" in sync: unsigned long updateStateFromNPT(double npt, double seekDuration); void updateStateOnScaleChange(); void updateStateOnPlayChange(Boolean reverseToPreviousVSH); void handleStreamDeletion(); void setSource(MPEG2TransportStreamFramer* framer); void setNextScale(float nextScale) { fNextScale = nextScale; } Boolean areChangingScale() const { return fNextScale != fScale; } protected: void updateTSRecordNum(); void reseekOriginalTransportStreamSource(); protected: MPEG2TransportStreamIndexFile* fIndexFile; ByteStreamFileSource* fOriginalTransportStreamSource; MPEG2TransportStreamTrickModeFilter* fTrickModeFilter; MPEG2TransportStreamFromESSource* fTrickPlaySource; MPEG2TransportStreamFramer* fFramer; float fScale, fNextScale, fNPT; unsigned long fTSRecordNum, fIxRecordNum; }; #endif live/liveMedia/include/MPEG2TransportStreamFramer.hh000444 001751 000000 00000005311 12265042432 022642 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A filter that passes through (unchanged) chunks that contain an integral number // of MPEG-2 Transport Stream packets, but returning (in "fDurationInMicroseconds") // an updated estimate of the time gap between chunks. // C++ header #ifndef _MPEG2_TRANSPORT_STREAM_FRAMER_HH #define _MPEG2_TRANSPORT_STREAM_FRAMER_HH #ifndef _FRAMED_FILTER_HH #include "FramedFilter.hh" #endif #ifndef _HASH_TABLE_HH #include "HashTable.hh" #endif class MPEG2TransportStreamFramer: public FramedFilter { public: static MPEG2TransportStreamFramer* createNew(UsageEnvironment& env, FramedSource* inputSource); u_int64_t tsPacketCount() const { return fTSPacketCount; } void changeInputSource(FramedSource* newInputSource) { fInputSource = newInputSource; } void clearPIDStatusTable(); void setNumTSPacketsToStream(unsigned long numTSRecordsToStream); void setPCRLimit(float pcrLimit); protected: MPEG2TransportStreamFramer(UsageEnvironment& env, FramedSource* inputSource); // called only by createNew() virtual ~MPEG2TransportStreamFramer(); private: // Redefined virtual functions: virtual void doGetNextFrame(); virtual void doStopGettingFrames(); private: static void afterGettingFrame(void* clientData, unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime, unsigned durationInMicroseconds); void afterGettingFrame1(unsigned frameSize, struct timeval presentationTime); Boolean updateTSPacketDurationEstimate(unsigned char* pkt, double timeNow); private: u_int64_t fTSPacketCount; double fTSPacketDurationEstimate; HashTable* fPIDStatusTable; u_int64_t fTSPCRCount; Boolean fLimitNumTSPacketsToStream; unsigned long fNumTSPacketsToStream; // used iff "fLimitNumTSPacketsToStream" is True Boolean fLimitTSPacketsToStreamByPCR; float fPCRLimit; // used iff "fLimitTSPacketsToStreamByPCR" is True }; #endif live/liveMedia/include/MPEG4LATMAudioRTPSink.hh000444 001751 000000 00000004757 12265042432 021306 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // RTP sink for MPEG-4 audio, using LATM multiplexing (RFC 3016) // (Note that the initial 'size' field is assumed to be present at the start of // each frame.) // C++ header #ifndef _MPEG4_LATM_AUDIO_RTP_SINK_HH #define _MPEG4_LATM_AUDIO_RTP_SINK_HH #ifndef _AUDIO_RTP_SINK_HH #include "AudioRTPSink.hh" #endif class MPEG4LATMAudioRTPSink: public AudioRTPSink { public: static MPEG4LATMAudioRTPSink* createNew(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, u_int32_t rtpTimestampFrequency, char const* streamMuxConfigString, unsigned numChannels, Boolean allowMultipleFramesPerPacket = False); protected: MPEG4LATMAudioRTPSink(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, u_int32_t rtpTimestampFrequency, char const* streamMuxConfigString, unsigned numChannels, Boolean allowMultipleFramesPerPacket); // called only by createNew() virtual ~MPEG4LATMAudioRTPSink(); private: // redefined virtual functions: virtual void doSpecialFrameHandling(unsigned fragmentationOffset, unsigned char* frameStart, unsigned numBytesInFrame, struct timeval framePresentationTime, unsigned numRemainingBytes); virtual Boolean frameCanAppearAfterPacketStart(unsigned char const* frameStart, unsigned numBytesInFrame) const; virtual char const* auxSDPLine(); // for the "a=fmtp:" SDP line private: char const* fStreamMuxConfigString; char* fFmtpSDPLine; Boolean fAllowMultipleFramesPerPacket; }; #endif live/liveMedia/include/MPEG4VideoStreamDiscreteFramer.hh000444 001751 000000 00000005457 12265042432 023414 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A simplified version of "MPEG4VideoStreamFramer" that takes only complete, // discrete frames (rather than an arbitrary byte stream) as input. // This avoids the parsing and data copying overhead of the full // "MPEG4VideoStreamFramer". // C++ header #ifndef _MPEG4_VIDEO_STREAM_DISCRETE_FRAMER_HH #define _MPEG4_VIDEO_STREAM_DISCRETE_FRAMER_HH #ifndef _MPEG4_VIDEO_STREAM_FRAMER_HH #include "MPEG4VideoStreamFramer.hh" #endif class MPEG4VideoStreamDiscreteFramer: public MPEG4VideoStreamFramer { public: static MPEG4VideoStreamDiscreteFramer* createNew(UsageEnvironment& env, FramedSource* inputSource, Boolean leavePresentationTimesUnmodified = False); protected: MPEG4VideoStreamDiscreteFramer(UsageEnvironment& env, FramedSource* inputSource, Boolean leavePresentationTimesUnmodified); // called only by createNew() virtual ~MPEG4VideoStreamDiscreteFramer(); protected: // redefined virtual functions: virtual void doGetNextFrame(); protected: static void afterGettingFrame(void* clientData, unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime, unsigned durationInMicroseconds); void afterGettingFrame1(unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime, unsigned durationInMicroseconds); Boolean getNextFrameBit(u_int8_t& result); Boolean getNextFrameBits(unsigned numBits, u_int32_t& result); // Which are used by: void analyzeVOLHeader(); protected: Boolean fLeavePresentationTimesUnmodified; u_int32_t vop_time_increment_resolution; unsigned fNumVTIRBits; // # of bits needed to count to "vop_time_increment_resolution" struct timeval fLastNonBFramePresentationTime; unsigned fLastNonBFrameVop_time_increment; private: unsigned fNumBitsSeenSoFar; // used by the getNextFrameBit*() routines }; #endif live/liveMedia/include/DigestAuthentication.hh000444 001751 000000 00000005731 12265042432 021727 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A class used for digest authentication. // C++ header #ifndef _DIGEST_AUTHENTICATION_HH #define _DIGEST_AUTHENTICATION_HH #ifndef _BOOLEAN_HH #include #endif // A class used for digest authentication. // The "realm", and "nonce" fields are supplied by the server // (in a "401 Unauthorized" response). // The "username" and "password" fields are supplied by the client. class Authenticator { public: Authenticator(); Authenticator(char const* username, char const* password, Boolean passwordIsMD5 = False); // If "passwordIsMD5" is True, then "password" is actually the value computed // by md5(::) Authenticator(const Authenticator& orig); Authenticator& operator=(const Authenticator& rightSide); virtual ~Authenticator(); void reset(); void setRealmAndNonce(char const* realm, char const* nonce); void setRealmAndRandomNonce(char const* realm); // as above, except that the nonce is created randomly. // (This is used by servers.) void setUsernameAndPassword(char const* username, char const* password, Boolean passwordIsMD5 = False); // If "passwordIsMD5" is True, then "password" is actually the value computed // by md5(::) char const* realm() const { return fRealm; } char const* nonce() const { return fNonce; } char const* username() const { return fUsername; } char const* password() const { return fPassword; } char const* computeDigestResponse(char const* cmd, char const* url) const; // The returned string from this function must later be freed by calling: void reclaimDigestResponse(char const* responseStr) const; private: void resetRealmAndNonce(); void resetUsernameAndPassword(); void assignRealmAndNonce(char const* realm, char const* nonce); void assignUsernameAndPassword(char const* username, char const* password, Boolean passwordIsMD5); void assign(char const* realm, char const* nonce, char const* username, char const* password, Boolean passwordIsMD5); private: char* fRealm; char* fNonce; char* fUsername; char* fPassword; Boolean fPasswordIsMD5; }; #endif live/liveMedia/include/Base64.hh000444 001751 000000 00000003276 12265042432 016636 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // Base64 encoding and decoding // C++ header #ifndef _BASE64_HH #define _BASE64_HH #ifndef _BOOLEAN_HH #include "Boolean.hh" #endif unsigned char* base64Decode(char const* in, unsigned& resultSize, Boolean trimTrailingZeros = True); // returns a newly allocated array - of size "resultSize" - that // the caller is responsible for delete[]ing. unsigned char* base64Decode(char const* in, unsigned inSize, unsigned& resultSize, Boolean trimTrailingZeros = True); // As above, but includes the size of the input string (i.e., the number of bytes to decode) as a parameter. // This saves an extra call to "strlen()" if we already know the length of the input string. char* base64Encode(char const* orig, unsigned origLength); // returns a 0-terminated string that // the caller is responsible for delete[]ing. #endif live/liveMedia/include/DVVideoRTPSink.hh000444 001751 000000 00000004157 12265042432 020324 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // RTP sink for DV video (RFC 3189) // (Thanks to Ben Hutchings for prototyping this.) // C++ header #ifndef _DV_VIDEO_RTP_SINK_HH #define _DV_VIDEO_RTP_SINK_HH #ifndef _VIDEO_RTP_SINK_HH #include "VideoRTPSink.hh" #endif #ifndef _DV_VIDEO_STREAM_FRAMER_HH #include "DVVideoStreamFramer.hh" #endif class DVVideoRTPSink: public VideoRTPSink { public: static DVVideoRTPSink* createNew(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat); char const* auxSDPLineFromFramer(DVVideoStreamFramer* framerSource); protected: DVVideoRTPSink(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat); // called only by createNew() virtual ~DVVideoRTPSink(); private: // redefined virtual functions: virtual Boolean sourceIsCompatibleWithUs(MediaSource& source); virtual void doSpecialFrameHandling(unsigned fragmentationOffset, unsigned char* frameStart, unsigned numBytesInFrame, struct timeval framePresentationTime, unsigned numRemainingBytes); virtual unsigned computeOverflowForNewFrame(unsigned newFrameSize) const; virtual char const* auxSDPLine(); private: char* fFmtpSDPLine; }; #endif live/liveMedia/include/BitVector.hh000444 001751 000000 00000004142 12265042432 017504 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // Bit Vector data structure // C++ header #ifndef _BIT_VECTOR_HH #define _BIT_VECTOR_HH #ifndef _BOOLEAN_HH #include "Boolean.hh" #endif class BitVector { public: BitVector(unsigned char* baseBytePtr, unsigned baseBitOffset, unsigned totNumBits); void setup(unsigned char* baseBytePtr, unsigned baseBitOffset, unsigned totNumBits); void putBits(unsigned from, unsigned numBits); // "numBits" <= 32 void put1Bit(unsigned bit); unsigned getBits(unsigned numBits); // "numBits" <= 32 unsigned get1Bit(); Boolean get1BitBoolean() { return get1Bit() != 0; } void skipBits(unsigned numBits); unsigned curBitIndex() const { return fCurBitIndex; } unsigned totNumBits() const { return fTotNumBits; } unsigned numBitsRemaining() const { return fTotNumBits - fCurBitIndex; } unsigned get_expGolomb(); // Returns the value of the next bits, assuming that they were encoded using an exponential-Golomb code of order 0 private: unsigned char* fBaseBytePtr; unsigned fBaseBitOffset; unsigned fTotNumBits; unsigned fCurBitIndex; }; // A general bit copy operation: void shiftBits(unsigned char* toBasePtr, unsigned toBitOffset, unsigned char const* fromBasePtr, unsigned fromBitOffset, unsigned numBits); #endif live/liveMedia/include/MPEG1or2VideoStreamDiscreteFramer.hh000444 001751 000000 00000006007 12265042432 024024 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A simplified version of "MPEG1or2VideoStreamFramer" that takes only // complete, discrete frames (rather than an arbitrary byte stream) as input. // This avoids the parsing and data copying overhead of the full // "MPEG1or2VideoStreamFramer". // C++ header #ifndef _MPEG1or2_VIDEO_STREAM_DISCRETE_FRAMER_HH #define _MPEG1or2_VIDEO_STREAM_DISCRETE_FRAMER_HH #ifndef _MPEG1or2_VIDEO_STREAM_FRAMER_HH #include "MPEG1or2VideoStreamFramer.hh" #endif #define VSH_MAX_SIZE 1000 class MPEG1or2VideoStreamDiscreteFramer: public MPEG1or2VideoStreamFramer { public: static MPEG1or2VideoStreamDiscreteFramer* createNew(UsageEnvironment& env, FramedSource* inputSource, Boolean iFramesOnly = False, // see MPEG1or2VideoStreamFramer.hh double vshPeriod = 5.0, // see MPEG1or2VideoStreamFramer.hh Boolean leavePresentationTimesUnmodified = False); protected: MPEG1or2VideoStreamDiscreteFramer(UsageEnvironment& env, FramedSource* inputSource, Boolean iFramesOnly, double vshPeriod, Boolean leavePresentationTimesUnmodified); // called only by createNew() virtual ~MPEG1or2VideoStreamDiscreteFramer(); protected: // redefined virtual functions: virtual void doGetNextFrame(); protected: static void afterGettingFrame(void* clientData, unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime, unsigned durationInMicroseconds); void afterGettingFrame1(unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime, unsigned durationInMicroseconds); protected: Boolean fLeavePresentationTimesUnmodified; struct timeval fLastNonBFramePresentationTime; unsigned fLastNonBFrameTemporal_reference; // A saved copy of the most recently seen 'video_sequence_header', // in case we need to insert it into the stream periodically: unsigned char fSavedVSHBuffer[VSH_MAX_SIZE]; unsigned fSavedVSHSize; double fSavedVSHTimestamp; Boolean fIFramesOnly; double fVSHPeriod; }; #endif live/liveMedia/include/AVIFileSink.hh000444 001751 000000 00000007470 12265042432 017656 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A sink that generates an AVI file from a composite media session // C++ header #ifndef _AVI_FILE_SINK_HH #define _AVI_FILE_SINK_HH #ifndef _MEDIA_SESSION_HH #include "MediaSession.hh" #endif class AVIFileSink: public Medium { public: static AVIFileSink* createNew(UsageEnvironment& env, MediaSession& inputSession, char const* outputFileName, unsigned bufferSize = 20000, unsigned short movieWidth = 240, unsigned short movieHeight = 180, unsigned movieFPS = 15, Boolean packetLossCompensate = False); typedef void (afterPlayingFunc)(void* clientData); Boolean startPlaying(afterPlayingFunc* afterFunc, void* afterClientData); unsigned numActiveSubsessions() const { return fNumSubsessions; } private: AVIFileSink(UsageEnvironment& env, MediaSession& inputSession, char const* outputFileName, unsigned bufferSize, unsigned short movieWidth, unsigned short movieHeight, unsigned movieFPS, Boolean packetLossCompensate); // called only by createNew() virtual ~AVIFileSink(); Boolean continuePlaying(); static void afterGettingFrame(void* clientData, unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime, unsigned durationInMicroseconds); static void onSourceClosure(void* clientData); void onSourceClosure1(); static void onRTCPBye(void* clientData); void addIndexRecord(class AVIIndexRecord* newIndexRecord); void completeOutputFile(); private: friend class AVISubsessionIOState; MediaSession& fInputSession; FILE* fOutFid; class AVIIndexRecord *fIndexRecordsHead, *fIndexRecordsTail; unsigned fNumIndexRecords; unsigned fBufferSize; Boolean fPacketLossCompensate; Boolean fAreCurrentlyBeingPlayed; afterPlayingFunc* fAfterFunc; void* fAfterClientData; unsigned fNumSubsessions; unsigned fNumBytesWritten; struct timeval fStartTime; Boolean fHaveCompletedOutputFile; private: ///// Definitions specific to the AVI file format: unsigned addWord(unsigned word); // outputs "word" in little-endian order unsigned addHalfWord(unsigned short halfWord); unsigned addByte(unsigned char byte) { putc(byte, fOutFid); return 1; } unsigned addZeroWords(unsigned numWords); unsigned add4ByteString(char const* str); void setWord(unsigned filePosn, unsigned size); // Define member functions for outputting various types of file header: #define _header(name) unsigned addFileHeader_##name() _header(AVI); _header(hdrl); _header(avih); _header(strl); _header(strh); _header(strf); _header(JUNK); // _header(JUNK); _header(movi); private: unsigned short fMovieWidth, fMovieHeight; unsigned fMovieFPS; unsigned fRIFFSizePosition, fRIFFSizeValue; unsigned fAVIHMaxBytesPerSecondPosition; unsigned fAVIHFrameCountPosition; unsigned fMoviSizePosition, fMoviSizeValue; class AVISubsessionIOState* fCurrentIOState; unsigned fJunkNumber; }; #endif live/liveMedia/include/H264VideoRTPSource.hh000444 001751 000000 00000004604 12265042432 020767 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // H.264 Video RTP Sources // C++ header #ifndef _H264_VIDEO_RTP_SOURCE_HH #define _H264_VIDEO_RTP_SOURCE_HH #ifndef _MULTI_FRAMED_RTP_SOURCE_HH #include "MultiFramedRTPSource.hh" #endif class H264VideoRTPSource: public MultiFramedRTPSource { public: static H264VideoRTPSource* createNew(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, unsigned rtpTimestampFrequency = 90000); protected: H264VideoRTPSource(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, unsigned rtpTimestampFrequency); // called only by createNew() virtual ~H264VideoRTPSource(); protected: // redefined virtual functions: virtual Boolean processSpecialHeader(BufferedPacket* packet, unsigned& resultSpecialHeaderSize); virtual char const* MIMEtype() const; private: friend class H264BufferedPacket; unsigned char fCurPacketNALUnitType; }; class SPropRecord { public: ~SPropRecord() { delete[] sPropBytes; } unsigned sPropLength; // in bytes unsigned char* sPropBytes; }; SPropRecord* parseSPropParameterSets(char const* sPropParameterSetsStr, // result parameter: unsigned& numSPropRecords); // Returns the binary value of each 'parameter set' specified in a // "sprop-parameter-sets" string (in the SDP description for a H.264/RTP stream). // The value is returned as an array (length "numSPropRecords") of "SPropRecord"s. // This array is dynamically allocated by this routine, and must be delete[]d by the caller. #endif live/liveMedia/include/ADTSAudioFileSource.hh000444 001751 000000 00000003565 12265042432 021311 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A source object for AAC audio files in ADTS format // C++ header #ifndef _ADTS_AUDIO_FILE_SOURCE_HH #define _ADTS_AUDIO_FILE_SOURCE_HH #ifndef _FRAMED_FILE_SOURCE_HH #include "FramedFileSource.hh" #endif class ADTSAudioFileSource: public FramedFileSource { public: static ADTSAudioFileSource* createNew(UsageEnvironment& env, char const* fileName); unsigned samplingFrequency() const { return fSamplingFrequency; } unsigned numChannels() const { return fNumChannels; } char const* configStr() const { return fConfigStr; } // returns the 'AudioSpecificConfig' for this stream (in ASCII form) private: ADTSAudioFileSource(UsageEnvironment& env, FILE* fid, u_int8_t profile, u_int8_t samplingFrequencyIndex, u_int8_t channelConfiguration); // called only by createNew() virtual ~ADTSAudioFileSource(); private: // redefined virtual functions: virtual void doGetNextFrame(); private: unsigned fSamplingFrequency; unsigned fNumChannels; unsigned fuSecsPerFrame; char fConfigStr[5]; }; #endif live/liveMedia/include/ADTSAudioFileServerMediaSubsession.hh000444 001751 000000 00000003645 12265042432 024334 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A 'ServerMediaSubsession' object that creates new, unicast, "RTPSink"s // on demand, from an AAC audio file in ADTS format // C++ header #ifndef _ADTS_AUDIO_FILE_SERVER_MEDIA_SUBSESSION_HH #define _ADTS_AUDIO_FILE_SERVER_MEDIA_SUBSESSION_HH #ifndef _FILE_SERVER_MEDIA_SUBSESSION_HH #include "FileServerMediaSubsession.hh" #endif class ADTSAudioFileServerMediaSubsession: public FileServerMediaSubsession{ public: static ADTSAudioFileServerMediaSubsession* createNew(UsageEnvironment& env, char const* fileName, Boolean reuseFirstSource); protected: ADTSAudioFileServerMediaSubsession(UsageEnvironment& env, char const* fileName, Boolean reuseFirstSource); // called only by createNew(); virtual ~ADTSAudioFileServerMediaSubsession(); protected: // redefined virtual functions virtual FramedSource* createNewStreamSource(unsigned clientSessionId, unsigned& estBitrate); virtual RTPSink* createNewRTPSink(Groupsock* rtpGroupsock, unsigned char rtpPayloadTypeIfDynamic, FramedSource* inputSource); }; #endif live/liveMedia/include/MPEG2TransportStreamMultiplexor.hh000444 001751 000000 00000005253 12265042432 023757 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A class for generating MPEG-2 Transport Stream from one or more input // Elementary Stream data sources // C++ header #ifndef _MPEG2_TRANSPORT_STREAM_MULTIPLEXOR_HH #define _MPEG2_TRANSPORT_STREAM_MULTIPLEXOR_HH #ifndef _FRAMED_SOURCE_HH #include "FramedSource.hh" #endif #ifndef _MPEG_1OR2_DEMUX_HH #include "MPEG1or2Demux.hh" // for SCR #endif #define PID_TABLE_SIZE 256 class MPEG2TransportStreamMultiplexor: public FramedSource { protected: MPEG2TransportStreamMultiplexor(UsageEnvironment& env); virtual ~MPEG2TransportStreamMultiplexor(); virtual void awaitNewBuffer(unsigned char* oldBuffer) = 0; // implemented by subclasses void handleNewBuffer(unsigned char* buffer, unsigned bufferSize, int mpegVersion, MPEG1or2Demux::SCR scr); // called by "awaitNewBuffer()" // Note: For MPEG-4 video, set "mpegVersion" to 4; for H.264 video, set "mpegVersion" to 5. private: // Redefined virtual functions: virtual void doGetNextFrame(); private: void deliverDataToClient(u_int8_t pid, unsigned char* buffer, unsigned bufferSize, unsigned& startPositionInBuffer); void deliverPATPacket(); void deliverPMTPacket(Boolean hasChanged); void setProgramStreamMap(unsigned frameSize); protected: Boolean fHaveVideoStreams; private: unsigned fOutgoingPacketCounter; unsigned fProgramMapVersion; u_int8_t fPreviousInputProgramMapVersion, fCurrentInputProgramMapVersion; // These two fields are used if we see "program_stream_map"s in the input. struct { unsigned counter; u_int8_t streamType; // for use in Program Maps } fPIDState[PID_TABLE_SIZE]; u_int8_t fPCR_PID, fCurrentPID; // Note: We map 8-bit stream_ids directly to PIDs MPEG1or2Demux::SCR fPCR; unsigned char* fInputBuffer; unsigned fInputBufferSize, fInputBufferBytesUsed; Boolean fIsFirstAdaptationField; }; #endif live/liveMedia/include/MPEG2TransportStreamFromESSource.hh000444 001751 000000 00000004163 12265042432 023746 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A filter for converting one or more MPEG Elementary Streams // to a MPEG-2 Transport Stream // C++ header #ifndef _MPEG2_TRANSPORT_STREAM_FROM_ES_SOURCE_HH #define _MPEG2_TRANSPORT_STREAM_FROM_ES_SOURCE_HH #ifndef _MPEG2_TRANSPORT_STREAM_MULTIPLEXOR_HH #include "MPEG2TransportStreamMultiplexor.hh" #endif class MPEG2TransportStreamFromESSource: public MPEG2TransportStreamMultiplexor { public: static MPEG2TransportStreamFromESSource* createNew(UsageEnvironment& env); void addNewVideoSource(FramedSource* inputSource, int mpegVersion); // Note: For MPEG-4 video, set "mpegVersion" to 4; for H.264 video, set "mpegVersion" to 5. void addNewAudioSource(FramedSource* inputSource, int mpegVersion); protected: MPEG2TransportStreamFromESSource(UsageEnvironment& env); // called only by createNew() virtual ~MPEG2TransportStreamFromESSource(); void addNewInputSource(FramedSource* inputSource, u_int8_t streamId, int mpegVersion); // used to implement addNew*Source() above private: // Redefined virtual functions: virtual void doStopGettingFrames(); virtual void awaitNewBuffer(unsigned char* oldBuffer); private: friend class InputESSourceRecord; class InputESSourceRecord* fInputSources; unsigned fVideoSourceCounter, fAudioSourceCounter; }; #endif live/liveMedia/include/H263plusVideoFileServerMediaSubsession.hh000444 001751 000000 00000003670 12265042432 025132 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A 'ServerMediaSubsession' object that creates new, unicast, "RTPSink"s // on demand, from a H.263 video file. // C++ header #ifndef _H263PLUS_VIDEO_FILE_SERVER_MEDIA_SUBSESSION_HH #define _H263PLUS_VIDEO_FILE_SERVER_MEDIA_SUBSESSION_HH #ifndef _FILE_SERVER_MEDIA_SUBSESSION_HH #include "FileServerMediaSubsession.hh" #endif class H263plusVideoFileServerMediaSubsession: public FileServerMediaSubsession{ public: static H263plusVideoFileServerMediaSubsession* createNew(UsageEnvironment& env, char const* fileName, Boolean reuseFirstSource); private: H263plusVideoFileServerMediaSubsession(UsageEnvironment& env, char const* fileName, Boolean reuseFirstSource); // called only by createNew(); virtual ~H263plusVideoFileServerMediaSubsession(); private: // redefined virtual functions virtual FramedSource* createNewStreamSource(unsigned clientSessionId, unsigned& estBitrate); virtual RTPSink* createNewRTPSink(Groupsock* rtpGroupsock, unsigned char rtpPayloadTypeIfDynamic, FramedSource* inputSource); }; #endif live/liveMedia/include/H263plusVideoStreamFramer.hh000444 001751 000000 00000004167 12265042432 022440 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A filter that breaks up an H263 video elementary stream into frames. // Author Benhard Feiten #ifndef _H263PLUS_VIDEO_STREAM_FRAMER_HH #define _H263PLUS_VIDEO_STREAM_FRAMER_HH #ifndef _FRAMED_FILTER_HH #include "FramedFilter.hh" #endif class H263plusVideoStreamFramer: public FramedFilter { public: static H263plusVideoStreamFramer* createNew(UsageEnvironment& env, FramedSource* inputSource); Boolean& pictureEndMarker() { return fPictureEndMarker; } // a hack for implementing the RTP 'M' bit protected: // Constructor called only by createNew(), or by subclass constructors H263plusVideoStreamFramer(UsageEnvironment& env, FramedSource* inputSource, Boolean createParser = True); virtual ~H263plusVideoStreamFramer(); public: static void continueReadProcessing(void* clientData, unsigned char* ptr, unsigned size, struct timeval presentationTime); void continueReadProcessing(); private: virtual void doGetNextFrame(); virtual Boolean isH263plusVideoStreamFramer() const; protected: double fFrameRate; unsigned fPictureCount; // hack used to implement doGetNextFrame() ?? Boolean fPictureEndMarker; private: class H263plusVideoStreamParser* fParser; struct timeval fPresentationTimeBase; }; #endif live/liveMedia/include/RTSPCommon.hh000444 001751 000000 00000005023 12265042432 017543 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // Common routines used by both RTSP clients and servers // C++ header #ifndef _RTSP_COMMON_HH #define _RTSP_COMMON_HH #ifndef _BOOLEAN_HH #include "Boolean.hh" #endif #ifndef _MEDIA_HH #include // includes some definitions perhaps needed for Borland compilers? #endif #if defined(__WIN32__) || defined(_WIN32) || defined(_QNX4) #define _strncasecmp _strnicmp #define snprintf _snprintf #else #define _strncasecmp strncasecmp #endif #define RTSP_PARAM_STRING_MAX 200 Boolean parseRTSPRequestString(char const *reqStr, unsigned reqStrSize, char *resultCmdName, unsigned resultCmdNameMaxSize, char* resultURLPreSuffix, unsigned resultURLPreSuffixMaxSize, char* resultURLSuffix, unsigned resultURLSuffixMaxSize, char* resultCSeq, unsigned resultCSeqMaxSize, char* resultSessionId, unsigned resultSessionIdMaxSize, unsigned& contentLength); Boolean parseRangeParam(char const* paramStr, double& rangeStart, double& rangeEnd, char*& absStartTime, char*& absEndTime); Boolean parseRangeHeader(char const* buf, double& rangeStart, double& rangeEnd, char*& absStartTime, char*& absEndTime); Boolean parseScaleHeader(char const* buf, float& scale); Boolean RTSPOptionIsSupported(char const* commandName, char const* optionsResponseString); // Returns True iff the RTSP command "commandName" is mentioned as one of the commands supported in "optionsResponseString" // (which should be the 'resultString' from a previous RTSP "OPTIONS" request). char const* dateHeader(); // A "Date:" header that can be used in a RTSP (or HTTP) response void ignoreSigPipeOnSocket(int socketNum); #endif live/liveMedia/include/H264VideoStreamFramer.hh000444 001751 000000 00000003154 12265042432 021530 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A filter that breaks up a H.264 Video Elementary Stream into NAL units. // C++ header #ifndef _H264_VIDEO_STREAM_FRAMER_HH #define _H264_VIDEO_STREAM_FRAMER_HH #ifndef _H264_OR_5_VIDEO_STREAM_FRAMER_HH #include "H264or5VideoStreamFramer.hh" #endif class H264VideoStreamFramer: public H264or5VideoStreamFramer { public: static H264VideoStreamFramer* createNew(UsageEnvironment& env, FramedSource* inputSource, Boolean includeStartCodeInOutput = False); protected: H264VideoStreamFramer(UsageEnvironment& env, FramedSource* inputSource, Boolean createParser, Boolean includeStartCodeInOutput); // called only by "createNew()" virtual ~H264VideoStreamFramer(); // redefined virtual functions: virtual Boolean isH264VideoStreamFramer() const; }; #endif live/liveMedia/include/liveMedia.hh000444 001751 000000 00000010762 12265042432 017507 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // Inclusion of header files representing the interface // for the entire library // // Programs that use the library can include this header file, // instead of each of the individual media header files #ifndef _LIVEMEDIA_HH #define _LIVEMEDIA_HH #include "MPEG1or2AudioRTPSink.hh" #include "MP3ADURTPSink.hh" #include "MPEG1or2VideoRTPSink.hh" #include "MPEG4ESVideoRTPSink.hh" #include "AMRAudioFileSink.hh" #include "H264VideoFileSink.hh" #include "BasicUDPSink.hh" #include "GSMAudioRTPSink.hh" #include "H263plusVideoRTPSink.hh" #include "H264VideoRTPSink.hh" #include "H265VideoRTPSink.hh" #include "DVVideoRTPSource.hh" #include "DVVideoRTPSink.hh" #include "DVVideoStreamFramer.hh" #include "H264VideoStreamFramer.hh" #include "H265VideoStreamFramer.hh" #include "H264VideoStreamDiscreteFramer.hh" #include "H265VideoStreamDiscreteFramer.hh" #include "JPEGVideoRTPSink.hh" #include "SimpleRTPSink.hh" #include "uLawAudioFilter.hh" #include "MPEG2IndexFromTransportStream.hh" #include "MPEG2TransportStreamTrickModeFilter.hh" #include "ByteStreamMultiFileSource.hh" #include "ByteStreamMemoryBufferSource.hh" #include "BasicUDPSource.hh" #include "SimpleRTPSource.hh" #include "MPEG1or2AudioRTPSource.hh" #include "MPEG4LATMAudioRTPSource.hh" #include "MPEG4LATMAudioRTPSink.hh" #include "MPEG4ESVideoRTPSource.hh" #include "MPEG4GenericRTPSource.hh" #include "MP3ADURTPSource.hh" #include "QCELPAudioRTPSource.hh" #include "AMRAudioRTPSource.hh" #include "JPEGVideoRTPSource.hh" #include "JPEGVideoSource.hh" #include "MPEG1or2VideoRTPSource.hh" #include "VorbisAudioRTPSource.hh" #include "VP8VideoRTPSource.hh" #include "MPEG2TransportStreamFromPESSource.hh" #include "MPEG2TransportStreamFromESSource.hh" #include "MPEG2TransportStreamFramer.hh" #include "ADTSAudioFileSource.hh" #include "H261VideoRTPSource.hh" #include "H263plusVideoRTPSource.hh" #include "H264VideoRTPSource.hh" #include "MP3FileSource.hh" #include "MP3ADU.hh" #include "MP3ADUinterleaving.hh" #include "MP3Transcoder.hh" #include "MPEG1or2DemuxedElementaryStream.hh" #include "MPEG1or2AudioStreamFramer.hh" #include "H263plusVideoStreamFramer.hh" #include "AC3AudioStreamFramer.hh" #include "AC3AudioRTPSource.hh" #include "AC3AudioRTPSink.hh" #include "VorbisAudioRTPSink.hh" #include "VP8VideoRTPSink.hh" #include "MPEG4GenericRTPSink.hh" #include "MPEG1or2VideoStreamDiscreteFramer.hh" #include "MPEG4VideoStreamDiscreteFramer.hh" #include "DeviceSource.hh" #include "AudioInputDevice.hh" #include "WAVAudioFileSource.hh" #include "StreamReplicator.hh" #include "RTSPRegisterSender.hh" #include "RTSPServerSupportingHTTPStreaming.hh" #include "RTSPClient.hh" #include "SIPClient.hh" #include "QuickTimeFileSink.hh" #include "QuickTimeGenericRTPSource.hh" #include "AVIFileSink.hh" #include "PassiveServerMediaSubsession.hh" #include "MPEG4VideoFileServerMediaSubsession.hh" #include "H264VideoFileServerMediaSubsession.hh" #include "H265VideoFileServerMediaSubsession.hh" #include "WAVAudioFileServerMediaSubsession.hh" #include "AMRAudioFileServerMediaSubsession.hh" #include "AMRAudioFileSource.hh" #include "AMRAudioRTPSink.hh" #include "T140TextRTPSink.hh" #include "TCPStreamSink.hh" #include "MP3AudioFileServerMediaSubsession.hh" #include "MPEG1or2VideoFileServerMediaSubsession.hh" #include "MPEG1or2FileServerDemux.hh" #include "MPEG2TransportFileServerMediaSubsession.hh" #include "H263plusVideoFileServerMediaSubsession.hh" #include "ADTSAudioFileServerMediaSubsession.hh" #include "DVVideoFileServerMediaSubsession.hh" #include "AC3AudioFileServerMediaSubsession.hh" #include "MPEG2TransportUDPServerMediaSubsession.hh" #include "MatroskaFileServerDemux.hh" #include "ProxyServerMediaSession.hh" #include "DarwinInjector.hh" #endif live/liveMedia/include/OutputFile.hh000444 001751 000000 00000002166 12265042432 017707 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // Common routines for opening/closing named output files // C++ header #ifndef _OUTPUT_FILE_HH #define _OUTPUT_FILE_HH #include #include FILE* OpenOutputFile(UsageEnvironment& env, char const* fileName); void CloseOutputFile(FILE* fid); #endif live/liveMedia/include/RTPSink.hh000444 001751 000000 00000017734 12265042432 017110 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // RTP Sinks // C++ header #ifndef _RTP_SINK_HH #define _RTP_SINK_HH #ifndef _MEDIA_SINK_HH #include "MediaSink.hh" #endif #ifndef _RTP_INTERFACE_HH #include "RTPInterface.hh" #endif class RTPTransmissionStatsDB; // forward class RTPSink: public MediaSink { public: static Boolean lookupByName(UsageEnvironment& env, char const* sinkName, RTPSink*& resultSink); // used by RTSP servers: Groupsock const& groupsockBeingUsed() const { return *(fRTPInterface.gs()); } Groupsock& groupsockBeingUsed() { return *(fRTPInterface.gs()); } unsigned char rtpPayloadType() const { return fRTPPayloadType; } unsigned rtpTimestampFrequency() const { return fTimestampFrequency; } void setRTPTimestampFrequency(unsigned freq) { fTimestampFrequency = freq; } char const* rtpPayloadFormatName() const {return fRTPPayloadFormatName;} unsigned numChannels() const { return fNumChannels; } virtual char const* sdpMediaType() const; // for use in SDP m= lines virtual char* rtpmapLine() const; // returns a string to be delete[]d virtual char const* auxSDPLine(); // optional SDP line (e.g. a=fmtp:...) u_int16_t currentSeqNo() const { return fSeqNo; } u_int32_t presetNextTimestamp(); // ensures that the next timestamp to be used will correspond to // the current 'wall clock' time. RTPTransmissionStatsDB& transmissionStatsDB() const { return *fTransmissionStatsDB; } Boolean nextTimestampHasBeenPreset() const { return fNextTimestampHasBeenPreset; } Boolean& enableRTCPReports() { return fEnableRTCPReports; } void getTotalBitrate(unsigned& outNumBytes, double& outElapsedTime); // returns the number of bytes sent since the last time that we // were called, and resets the counter. struct timeval const& creationTime() const { return fCreationTime; } struct timeval const& initialPresentationTime() const { return fInitialPresentationTime; } struct timeval const& mostRecentPresentationTime() const { return fMostRecentPresentationTime; } void resetPresentationTimes(); // Hacks to allow sending RTP over TCP (RFC 2236, section 10.12): void setStreamSocket(int sockNum, unsigned char streamChannelId) { fRTPInterface.setStreamSocket(sockNum, streamChannelId); } void addStreamSocket(int sockNum, unsigned char streamChannelId) { fRTPInterface.addStreamSocket(sockNum, streamChannelId); } void removeStreamSocket(int sockNum, unsigned char streamChannelId) { fRTPInterface.removeStreamSocket(sockNum, streamChannelId); } protected: RTPSink(UsageEnvironment& env, Groupsock* rtpGS, unsigned char rtpPayloadType, u_int32_t rtpTimestampFrequency, char const* rtpPayloadFormatName, unsigned numChannels); // abstract base class virtual ~RTPSink(); // used by RTCP: friend class RTCPInstance; friend class RTPTransmissionStats; u_int32_t SSRC() const {return fSSRC;} // later need a means of changing the SSRC if there's a collision ##### u_int32_t convertToRTPTimestamp(struct timeval tv); unsigned packetCount() const {return fPacketCount;} unsigned octetCount() const {return fOctetCount;} protected: RTPInterface fRTPInterface; unsigned char fRTPPayloadType; unsigned fPacketCount, fOctetCount, fTotalOctetCount /*incl RTP hdr*/; struct timeval fTotalOctetCountStartTime, fInitialPresentationTime, fMostRecentPresentationTime; u_int32_t fCurrentTimestamp; u_int16_t fSeqNo; private: // redefined virtual functions: virtual Boolean isRTPSink() const; private: u_int32_t fSSRC, fTimestampBase; unsigned fTimestampFrequency; Boolean fNextTimestampHasBeenPreset; Boolean fEnableRTCPReports; // whether RTCP "SR" reports should be sent for this sink (default: True) char const* fRTPPayloadFormatName; unsigned fNumChannels; struct timeval fCreationTime; RTPTransmissionStatsDB* fTransmissionStatsDB; }; class RTPTransmissionStats; // forward class RTPTransmissionStatsDB { public: unsigned numReceivers() const { return fNumReceivers; } class Iterator { public: Iterator(RTPTransmissionStatsDB& receptionStatsDB); virtual ~Iterator(); RTPTransmissionStats* next(); // NULL if none private: HashTable::Iterator* fIter; }; // The following is called whenever a RTCP RR packet is received: void noteIncomingRR(u_int32_t SSRC, struct sockaddr_in const& lastFromAddress, unsigned lossStats, unsigned lastPacketNumReceived, unsigned jitter, unsigned lastSRTime, unsigned diffSR_RRTime); // The following is called when a RTCP BYE packet is received: void removeRecord(u_int32_t SSRC); RTPTransmissionStats* lookup(u_int32_t SSRC) const; private: // constructor and destructor, called only by RTPSink: friend class RTPSink; RTPTransmissionStatsDB(RTPSink& rtpSink); virtual ~RTPTransmissionStatsDB(); private: void add(u_int32_t SSRC, RTPTransmissionStats* stats); private: friend class Iterator; unsigned fNumReceivers; RTPSink& fOurRTPSink; HashTable* fTable; }; class RTPTransmissionStats { public: u_int32_t SSRC() const {return fSSRC;} struct sockaddr_in const& lastFromAddress() const {return fLastFromAddress;} unsigned lastPacketNumReceived() const {return fLastPacketNumReceived;} unsigned firstPacketNumReported() const {return fFirstPacketNumReported;} unsigned totNumPacketsLost() const {return fTotNumPacketsLost;} unsigned jitter() const {return fJitter;} unsigned lastSRTime() const { return fLastSRTime; } unsigned diffSR_RRTime() const { return fDiffSR_RRTime; } unsigned roundTripDelay() const; // The round-trip delay (in units of 1/65536 seconds) computed from // the most recently-received RTCP RR packet. struct timeval timeCreated() const {return fTimeCreated;} struct timeval lastTimeReceived() const {return fTimeReceived;} void getTotalOctetCount(u_int32_t& hi, u_int32_t& lo); void getTotalPacketCount(u_int32_t& hi, u_int32_t& lo); // Information which requires at least two RRs to have been received: unsigned packetsReceivedSinceLastRR() const; u_int8_t packetLossRatio() const { return fPacketLossRatio; } // as an 8-bit fixed-point number int packetsLostBetweenRR() const; private: // called only by RTPTransmissionStatsDB: friend class RTPTransmissionStatsDB; RTPTransmissionStats(RTPSink& rtpSink, u_int32_t SSRC); virtual ~RTPTransmissionStats(); void noteIncomingRR(struct sockaddr_in const& lastFromAddress, unsigned lossStats, unsigned lastPacketNumReceived, unsigned jitter, unsigned lastSRTime, unsigned diffSR_RRTime); private: RTPSink& fOurRTPSink; u_int32_t fSSRC; struct sockaddr_in fLastFromAddress; unsigned fLastPacketNumReceived; u_int8_t fPacketLossRatio; unsigned fTotNumPacketsLost; unsigned fJitter; unsigned fLastSRTime; unsigned fDiffSR_RRTime; struct timeval fTimeCreated, fTimeReceived; Boolean fAtLeastTwoRRsHaveBeenReceived; unsigned fOldLastPacketNumReceived; unsigned fOldTotNumPacketsLost; Boolean fFirstPacket; unsigned fFirstPacketNumReported; u_int32_t fLastOctetCount, fTotalOctetCount_hi, fTotalOctetCount_lo; u_int32_t fLastPacketCount, fTotalPacketCount_hi, fTotalPacketCount_lo; }; #endif live/liveMedia/include/FileSink.hh000444 001751 000000 00000005057 12265042432 017315 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // File Sinks // C++ header #ifndef _FILE_SINK_HH #define _FILE_SINK_HH #ifndef _MEDIA_SINK_HH #include "MediaSink.hh" #endif class FileSink: public MediaSink { public: static FileSink* createNew(UsageEnvironment& env, char const* fileName, unsigned bufferSize = 20000, Boolean oneFilePerFrame = False); // "bufferSize" should be at least as large as the largest expected // input frame. // "oneFilePerFrame" - if True - specifies that each input frame will // be written to a separate file (using the presentation time as a // file name suffix). The default behavior ("oneFilePerFrame" == False) // is to output all incoming data into a single file. void addData(unsigned char const* data, unsigned dataSize, struct timeval presentationTime); // (Available in case a client wants to add extra data to the output file) protected: FileSink(UsageEnvironment& env, FILE* fid, unsigned bufferSize, char const* perFrameFileNamePrefix); // called only by createNew() virtual ~FileSink(); protected: // redefined virtual functions: virtual Boolean continuePlaying(); protected: static void afterGettingFrame(void* clientData, unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime, unsigned durationInMicroseconds); virtual void afterGettingFrame(unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime); FILE* fOutFid; unsigned char* fBuffer; unsigned fBufferSize; char* fPerFrameFileNamePrefix; // used if "oneFilePerFrame" is True char* fPerFrameFileNameBuffer; // used if "oneFilePerFrame" is True struct timeval fPrevPresentationTime; unsigned fSamePresentationTimeCounter; }; #endif live/liveMedia/include/MPEG2TransportStreamIndexFile.hh000444 001751 000000 00000007602 12265042432 023302 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A class that encapsulates MPEG-2 Transport Stream 'index files'/ // These index files are used to implement 'trick play' operations // (seek-by-time, fast forward, reverse play) on Transport Stream files. // // C++ header #ifndef _MPEG2_TRANSPORT_STREAM_INDEX_FILE_HH #define _MPEG2_TRANSPORT_STREAM_INDEX_FILE_HH #ifndef _MEDIA_HH #include "Media.hh" #endif #define INDEX_RECORD_SIZE 11 class MPEG2TransportStreamIndexFile: public Medium { public: static MPEG2TransportStreamIndexFile* createNew(UsageEnvironment& env, char const* indexFileName); virtual ~MPEG2TransportStreamIndexFile(); // Functions that map between a playing time and a Transport packet number // in the original Transport Stream file: void lookupTSPacketNumFromNPT(float& npt, unsigned long& tsPacketNumber, unsigned long& indexRecordNumber); // Looks up the Transport Stream Packet number corresponding to "npt". // (This may modify "npt" to a more exact value.) // (We also return the index record number that we looked up.) void lookupPCRFromTSPacketNum(unsigned long& tsPacketNumber, Boolean reverseToPreviousCleanPoint, float& pcr, unsigned long& indexRecordNumber); // Looks up the PCR timestamp for the transport packet "tsPacketNumber". // (Adjust "tsPacketNumber" only if "reverseToPreviousCleanPoint" is True.) // (We also return the index record number that we looked up.) // Miscellaneous functions used to implement 'trick play': Boolean readIndexRecordValues(unsigned long indexRecordNum, unsigned long& transportPacketNum, u_int8_t& offset, u_int8_t& size, float& pcr, u_int8_t& recordType); float getPlayingDuration(); void stopReading() { closeFid(); } int mpegVersion(); // returns the best guess for the version of MPEG being used for data within the underlying Transport Stream file. // (1,2,4, or 5 (representing H.264). 0 means 'don't know' (usually because the index file is empty)) private: MPEG2TransportStreamIndexFile(UsageEnvironment& env, char const* indexFileName); Boolean openFid(); Boolean seekToIndexRecord(unsigned long indexRecordNumber); Boolean readIndexRecord(unsigned long indexRecordNum); // into "fBuf" Boolean readOneIndexRecord(unsigned long indexRecordNum); // closes "fFid" at end void closeFid(); u_int8_t recordTypeFromBuf() { return fBuf[0]; } u_int8_t offsetFromBuf() { return fBuf[1]; } u_int8_t sizeFromBuf() { return fBuf[2]; } float pcrFromBuf(); // after "fBuf" has been read unsigned long tsPacketNumFromBuf(); void setMPEGVersionFromRecordType(u_int8_t recordType); Boolean rewindToCleanPoint(unsigned long&ixFound); // used to implement "lookupTSPacketNumber()" private: char* fFileName; FILE* fFid; // used internally when reading from the file int fMPEGVersion; unsigned long fCurrentIndexRecordNum; // within "fFid" float fCachedPCR; unsigned long fCachedTSPacketNumber, fCachedIndexRecordNumber; unsigned long fNumIndexRecords; unsigned char fBuf[INDEX_RECORD_SIZE]; // used for reading index records from file }; #endif live/liveMedia/include/MPEG2TransportStreamTrickModeFilter.hh000444 001751 000000 00000007165 12265042432 024466 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved.// A filter that converts a MPEG Transport Stream file - with corresponding index file // - to a corresponding Video Elementary Stream. It also uses a "scale" parameter // to implement 'trick mode' (fast forward or reverse play, using I-frames) on // the video stream. // C++ header #ifndef _MPEG2_TRANSPORT_STREAM_TRICK_MODE_FILTER_HH #define _MPEG2_TRANSPORT_STREAM_TRICK_MODE_FILTER_HH #ifndef _FRAMED_FILTER_HH #include "FramedFilter.hh" #endif #ifndef _MPEG2_TRANSPORT_STREAM_INDEX_FILE_HH #include "MPEG2TransportStreamIndexFile.hh" #endif #ifndef TRANSPORT_PACKET_SIZE #define TRANSPORT_PACKET_SIZE 188 #endif class MPEG2TransportStreamTrickModeFilter: public FramedFilter { public: static MPEG2TransportStreamTrickModeFilter* createNew(UsageEnvironment& env, FramedSource* inputSource, MPEG2TransportStreamIndexFile* indexFile, int scale); Boolean seekTo(unsigned long tsPacketNumber, unsigned long indexRecordNumber); unsigned long nextIndexRecordNum() const { return fNextIndexRecordNum; } void forgetInputSource() { fInputSource = NULL; } // this lets us delete this without also deleting the input Transport Stream protected: MPEG2TransportStreamTrickModeFilter(UsageEnvironment& env, FramedSource* inputSource, MPEG2TransportStreamIndexFile* indexFile, int scale); // called only by createNew() virtual ~MPEG2TransportStreamTrickModeFilter(); private: // Redefined virtual functions: virtual void doGetNextFrame(); virtual void doStopGettingFrames(); private: void attemptDeliveryToClient(); void seekToTransportPacket(unsigned long tsPacketNum); void readTransportPacket(unsigned long tsPacketNum); // asynchronously static void afterGettingFrame(void* clientData, unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime, unsigned durationInMicroseconds); void afterGettingFrame1(unsigned frameSize); static void onSourceClosure(void* clientData); void onSourceClosure1(); private: Boolean fHaveStarted; MPEG2TransportStreamIndexFile* fIndexFile; int fScale; // absolute value int fDirection; // 1 => forward; -1 => reverse enum { SKIPPING_FRAME, DELIVERING_SAVED_FRAME, SAVING_AND_DELIVERING_FRAME } fState; unsigned fFrameCount; unsigned long fNextIndexRecordNum; // next to be read from the index file unsigned long fNextTSPacketNum; // next to be read from the transport stream file unsigned char fInputBuffer[TRANSPORT_PACKET_SIZE]; unsigned long fCurrentTSPacketNum; // corresponding to data currently in the buffer unsigned long fDesiredTSPacketNum; u_int8_t fDesiredDataOffset, fDesiredDataSize; float fDesiredDataPCR, fFirstPCR; unsigned long fSavedFrameIndexRecordStart; unsigned long fSavedSequentialIndexRecordNum; Boolean fUseSavedFrameNextTime; }; #endif live/liveMedia/include/H264VideoFileSink.hh000444 001751 000000 00000003671 12265042432 020650 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // H.264 Video File Sinks // C++ header #ifndef _H264_VIDEO_FILE_SINK_HH #define _H264_VIDEO_FILE_SINK_HH #ifndef _FILE_SINK_HH #include "FileSink.hh" #endif class H264VideoFileSink: public FileSink { public: static H264VideoFileSink* createNew(UsageEnvironment& env, char const* fileName, char const* sPropParameterSetsStr = NULL, // An optional 'SDP format' string (comma-separated Base64-encoded) representing SPS and/or PPS NAL-units to prepend to the output unsigned bufferSize = 100000, Boolean oneFilePerFrame = False); // See "FileSink.hh" for a description of these parameters. protected: H264VideoFileSink(UsageEnvironment& env, FILE* fid, char const* sPropParameterSetsStr, unsigned bufferSize, char const* perFrameFileNamePrefix); // called only by createNew() virtual ~H264VideoFileSink(); protected: // redefined virtual functions: virtual void afterGettingFrame(unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime); private: char const* fSPropParameterSetsStr; Boolean fHaveWrittenFirstFrame; }; #endif live/liveMedia/include/DVVideoRTPSource.hh000444 001751 000000 00000003266 12265042432 020660 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // DV Video RTP Sources // C++ header #ifndef _DV_VIDEOO_RTP_SOURCE_HH #define _DV_VIDEO_RTP_SOURCE_HH #ifndef _MULTI_FRAMED_RTP_SOURCE_HH #include "MultiFramedRTPSource.hh" #endif class DVVideoRTPSource: public MultiFramedRTPSource { public: static DVVideoRTPSource* createNew(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, unsigned rtpTimestampFrequency); protected: virtual ~DVVideoRTPSource(); private: DVVideoRTPSource(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, unsigned rtpTimestampFrequency); // called only by createNew() private: // redefined virtual functions: virtual Boolean processSpecialHeader(BufferedPacket* packet, unsigned& resultSpecialHeaderSize); virtual char const* MIMEtype() const; }; #endif live/liveMedia/include/Media.hh000444 001751 000000 00000007471 12265042432 016632 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // Medium // C++ header #ifndef _MEDIA_HH #define _MEDIA_HH #ifndef _LIVEMEDIA_VERSION_HH #include "liveMedia_version.hh" #endif #ifndef _HASH_TABLE_HH #include "HashTable.hh" #endif #ifndef _USAGE_ENVIRONMENT_HH #include "UsageEnvironment.hh" #endif // Lots of files end up needing the following, so just #include them here: #ifndef _NET_COMMON_H #include "NetCommon.h" #endif #include // The following makes the Borland compiler happy: #ifdef __BORLANDC__ #define _strnicmp strnicmp #define fabsf(x) fabs(x) #endif #define mediumNameMaxLen 30 class Medium { public: static Boolean lookupByName(UsageEnvironment& env, char const* mediumName, Medium*& resultMedium); static void close(UsageEnvironment& env, char const* mediumName); static void close(Medium* medium); // alternative close() method using ptrs // (has no effect if medium == NULL) UsageEnvironment& envir() const {return fEnviron;} char const* name() const {return fMediumName;} // Test for specific types of media: virtual Boolean isSource() const; virtual Boolean isSink() const; virtual Boolean isRTCPInstance() const; virtual Boolean isRTSPClient() const; virtual Boolean isRTSPServer() const; virtual Boolean isMediaSession() const; virtual Boolean isServerMediaSession() const; virtual Boolean isDarwinInjector() const; protected: friend class MediaLookupTable; Medium(UsageEnvironment& env); // abstract base class virtual ~Medium(); // instances are deleted using close() only TaskToken& nextTask() { return fNextTask; } private: UsageEnvironment& fEnviron; char fMediumName[mediumNameMaxLen]; TaskToken fNextTask; }; // A data structure for looking up a Medium by its string name. // (It is used only to implement "Medium", but we make it visible here, in case developers want to use it to iterate over // the whole set of "Medium" objects that we've created.) class MediaLookupTable { public: static MediaLookupTable* ourMedia(UsageEnvironment& env); HashTable const& getTable() { return *fTable; } protected: MediaLookupTable(UsageEnvironment& env); virtual ~MediaLookupTable(); private: friend class Medium; Medium* lookup(char const* name) const; // Returns NULL if none already exists void addNew(Medium* medium, char* mediumName); void remove(char const* name); void generateNewName(char* mediumName, unsigned maxLen); private: UsageEnvironment& fEnv; HashTable* fTable; unsigned fNameGenerator; }; // The structure pointed to by the "liveMediaPriv" UsageEnvironment field: class _Tables { public: static _Tables* getOurTables(UsageEnvironment& env, Boolean createIfNotPresent = True); // returns a pointer to an "ourTables" structure (creating it if necessary) void reclaimIfPossible(); // used to delete ourselves when we're no longer used MediaLookupTable* mediaTable; void* socketTable; protected: _Tables(UsageEnvironment& env); virtual ~_Tables(); private: UsageEnvironment& fEnv; }; #endif live/liveMedia/include/H264VideoStreamDiscreteFramer.hh000444 001751 000000 00000003367 12265042432 023221 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A simplified version of "H264VideoStreamFramer" that takes only complete, // discrete frames (rather than an arbitrary byte stream) as input. // This avoids the parsing and data copying overhead of the full // "H264VideoStreamFramer". // C++ header #ifndef _H264_VIDEO_STREAM_DISCRETE_FRAMER_HH #define _H264_VIDEO_STREAM_DISCRETE_FRAMER_HH #ifndef _H264_OR_5_VIDEO_STREAM_DISCRETE_FRAMER_HH #include "H264or5VideoStreamDiscreteFramer.hh" #endif class H264VideoStreamDiscreteFramer: public H264or5VideoStreamDiscreteFramer { public: static H264VideoStreamDiscreteFramer* createNew(UsageEnvironment& env, FramedSource* inputSource); protected: H264VideoStreamDiscreteFramer(UsageEnvironment& env, FramedSource* inputSource); // called only by createNew() virtual ~H264VideoStreamDiscreteFramer(); private: // redefined virtual functions: virtual Boolean isH264VideoStreamFramer() const; }; #endif live/liveMedia/include/H264VideoFileServerMediaSubsession.hh000444 001751 000000 00000004416 12265042432 024226 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A 'ServerMediaSubsession' object that creates new, unicast, "RTPSink"s // on demand, from a H264 Elementary Stream video file. // C++ header #ifndef _H264_VIDEO_FILE_SERVER_MEDIA_SUBSESSION_HH #define _H264_VIDEO_FILE_SERVER_MEDIA_SUBSESSION_HH #ifndef _FILE_SERVER_MEDIA_SUBSESSION_HH #include "FileServerMediaSubsession.hh" #endif class H264VideoFileServerMediaSubsession: public FileServerMediaSubsession { public: static H264VideoFileServerMediaSubsession* createNew(UsageEnvironment& env, char const* fileName, Boolean reuseFirstSource); // Used to implement "getAuxSDPLine()": void checkForAuxSDPLine1(); void afterPlayingDummy1(); protected: H264VideoFileServerMediaSubsession(UsageEnvironment& env, char const* fileName, Boolean reuseFirstSource); // called only by createNew(); virtual ~H264VideoFileServerMediaSubsession(); void setDoneFlag() { fDoneFlag = ~0; } protected: // redefined virtual functions virtual char const* getAuxSDPLine(RTPSink* rtpSink, FramedSource* inputSource); virtual FramedSource* createNewStreamSource(unsigned clientSessionId, unsigned& estBitrate); virtual RTPSink* createNewRTPSink(Groupsock* rtpGroupsock, unsigned char rtpPayloadTypeIfDynamic, FramedSource* inputSource); private: char* fAuxSDPLine; char fDoneFlag; // used when setting up "fAuxSDPLine" RTPSink* fDummyRTPSink; // ditto }; #endif live/liveMedia/include/AC3AudioFileServerMediaSubsession.hh000444 001751 000000 00000003615 12265042432 024144 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A 'ServerMediaSubsession' object that creates new, unicast, "RTPSink"s // on demand, from an AC3 audio file. // C++ header #ifndef _AC3_AUDIO_FILE_SERVER_MEDIA_SUBSESSION_HH #define _AC3_AUDIO_FILE_SERVER_MEDIA_SUBSESSION_HH #ifndef _FILE_SERVER_MEDIA_SUBSESSION_HH #include "FileServerMediaSubsession.hh" #endif class AC3AudioFileServerMediaSubsession: public FileServerMediaSubsession{ public: static AC3AudioFileServerMediaSubsession* createNew(UsageEnvironment& env, char const* fileName, Boolean reuseFirstSource); private: AC3AudioFileServerMediaSubsession(UsageEnvironment& env, char const* fileName, Boolean reuseFirstSource); // called only by createNew(); virtual ~AC3AudioFileServerMediaSubsession(); private: // redefined virtual functions virtual FramedSource* createNewStreamSource(unsigned clientSessionId, unsigned& estBitrate); virtual RTPSink* createNewRTPSink(Groupsock* rtpGroupsock, unsigned char rtpPayloadTypeIfDynamic, FramedSource* inputSource); }; #endif live/liveMedia/include/TCPStreamSink.hh000444 001751 000000 00000005004 12265042432 020230 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A sink representing a TCP output stream // C++ header #ifndef _TCP_STREAM_SINK_HH #define _TCP_STREAM_SINK_HH #ifndef _MEDIA_SINK_HH #include "MediaSink.hh" #endif #define TCP_STREAM_SINK_BUFFER_SIZE 10000 class TCPStreamSink: public MediaSink { public: static TCPStreamSink* createNew(UsageEnvironment& env, int socketNum); // "socketNum" is the socket number of an existing, writable TCP socket (which should be non-blocking). // The caller is responsible for closing this socket later (when this object no longer exists). protected: TCPStreamSink(UsageEnvironment& env, int socketNum); // called only by "createNew()" virtual ~TCPStreamSink(); protected: // Redefined virtual functions: virtual Boolean continuePlaying(); private: void processBuffer(); // common routine, called from both the 'socket writable' and 'incoming data' handlers below static void socketWritableHandler(void* clientData, int mask); void socketWritableHandler1(); static void afterGettingFrame(void* clientData, unsigned frameSize, unsigned numTruncatedBytes, struct timeval /*presentationTime*/, unsigned /*durationInMicroseconds*/); void afterGettingFrame(unsigned frameSize, unsigned numTruncatedBytes); static void ourOnSourceClosure(void* clientData); void ourOnSourceClosure1(); unsigned numUnwrittenBytes() const { return fUnwrittenBytesEnd - fUnwrittenBytesStart; } unsigned freeBufferSpace() const { return TCP_STREAM_SINK_BUFFER_SIZE - fUnwrittenBytesEnd; } private: unsigned char fBuffer[TCP_STREAM_SINK_BUFFER_SIZE]; unsigned fUnwrittenBytesStart, fUnwrittenBytesEnd; Boolean fInputSourceIsOpen, fOutputSocketIsWritable; int fOutputSocketNum; }; #endif live/liveMedia/include/RTSPServerSupportingHTTPStreaming.hh000444 001751 000000 00000005356 12265042432 024237 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A server that supports both RTSP, and HTTP streaming (using Apple's "HTTP Live Streaming" protocol) // C++ header #ifndef _RTSP_SERVER_SUPPORTING_HTTP_STREAMING_HH #define _RTSP_SERVER_SUPPORTING_HTTP_STREAMING_HH #ifndef _RTSP_SERVER_HH #include "RTSPServer.hh" #endif #ifndef _BYTE_STREAM_MEMORY_BUFFER_SOURCE_HH #include "ByteStreamMemoryBufferSource.hh" #endif #ifndef _TCP_STREAM_SINK_HH #include "TCPStreamSink.hh" #endif class RTSPServerSupportingHTTPStreaming: public RTSPServer { public: static RTSPServerSupportingHTTPStreaming* createNew(UsageEnvironment& env, Port rtspPort = 554, UserAuthenticationDatabase* authDatabase = NULL, unsigned reclamationTestSeconds = 65); Boolean setHTTPPort(Port httpPort) { return setUpTunnelingOverHTTP(httpPort); } protected: RTSPServerSupportingHTTPStreaming(UsageEnvironment& env, int ourSocket, Port ourPort, UserAuthenticationDatabase* authDatabase, unsigned reclamationTestSeconds); // called only by createNew(); virtual ~RTSPServerSupportingHTTPStreaming(); protected: // redefined virtual functions virtual RTSPClientConnection* createNewClientConnection(int clientSocket, struct sockaddr_in clientAddr); public: // should be protected, but some old compilers complain otherwise class RTSPClientConnectionSupportingHTTPStreaming: public RTSPServer::RTSPClientConnection { public: RTSPClientConnectionSupportingHTTPStreaming(RTSPServer& ourServer, int clientSocket, struct sockaddr_in clientAddr); virtual ~RTSPClientConnectionSupportingHTTPStreaming(); protected: // redefined virtual functions virtual void handleHTTPCmd_StreamingGET(char const* urlSuffix, char const* fullRequestStr); protected: static void afterStreaming(void* clientData); private: u_int32_t fClientSessionId; ByteStreamMemoryBufferSource* fPlaylistSource; TCPStreamSink* fTCPSink; }; }; #endif live/liveMedia/include/ByteStreamMemoryBufferSource.hh000444 001751 000000 00000004726 12265042432 023376 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A class for streaming data from a (static) memory buffer, as if it were a file. // C++ header #ifndef _BYTE_STREAM_MEMORY_BUFFER_SOURCE_HH #define _BYTE_STREAM_MEMORY_BUFFER_SOURCE_HH #ifndef _FRAMED_SOURCE_HH #include "FramedSource.hh" #endif class ByteStreamMemoryBufferSource: public FramedSource { public: static ByteStreamMemoryBufferSource* createNew(UsageEnvironment& env, u_int8_t* buffer, u_int64_t bufferSize, Boolean deleteBufferOnClose = True, unsigned preferredFrameSize = 0, unsigned playTimePerFrame = 0); // "preferredFrameSize" == 0 means 'no preference' // "playTimePerFrame" is in microseconds u_int64_t bufferSize() const { return fBufferSize; } void seekToByteAbsolute(u_int64_t byteNumber, u_int64_t numBytesToStream = 0); // if "numBytesToStream" is >0, then we limit the stream to that number of bytes, before treating it as EOF void seekToByteRelative(int64_t offset); protected: ByteStreamMemoryBufferSource(UsageEnvironment& env, u_int8_t* buffer, u_int64_t bufferSize, Boolean deleteBufferOnClose, unsigned preferredFrameSize, unsigned playTimePerFrame); // called only by createNew() virtual ~ByteStreamMemoryBufferSource(); private: // redefined virtual functions: virtual void doGetNextFrame(); private: u_int8_t* fBuffer; u_int64_t fBufferSize; u_int64_t fCurIndex; Boolean fDeleteBufferOnClose; unsigned fPreferredFrameSize; unsigned fPlayTimePerFrame; unsigned fLastPlayTime; Boolean fLimitNumBytesToStream; u_int64_t fNumBytesToStream; // used iff "fLimitNumBytesToStream" is True }; #endif live/liveMedia/include/VP8VideoRTPSink.hh000444 001751 000000 00000003575 12265042432 020433 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // RTP sink for VP8 video // C++ header #ifndef _VP8_VIDEO_RTP_SINK_HH #define _VP8_VIDEO_RTP_SINK_HH #ifndef _VIDEO_RTP_SINK_HH #include "VideoRTPSink.hh" #endif class VP8VideoRTPSink: public VideoRTPSink { public: static VP8VideoRTPSink* createNew(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat); protected: VP8VideoRTPSink(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat); // called only by createNew() virtual ~VP8VideoRTPSink(); private: // redefined virtual functions: virtual void doSpecialFrameHandling(unsigned fragmentationOffset, unsigned char* frameStart, unsigned numBytesInFrame, struct timeval framePresentationTime, unsigned numRemainingBytes); virtual Boolean frameCanAppearAfterPacketStart(unsigned char const* frameStart, unsigned numBytesInFrame) const; virtual unsigned specialHeaderSize() const; }; #endif live/liveMedia/include/AMRAudioFileSource.hh000444 001751 000000 00000002773 12265042432 021175 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A source object for AMR audio files (as defined in RFC 4867, section 5) // C++ header #ifndef _AMR_AUDIO_FILE_SOURCE_HH #define _AMR_AUDIO_FILE_SOURCE_HH #ifndef _AMR_AUDIO_SOURCE_HH #include "AMRAudioSource.hh" #endif class AMRAudioFileSource: public AMRAudioSource { public: static AMRAudioFileSource* createNew(UsageEnvironment& env, char const* fileName); private: AMRAudioFileSource(UsageEnvironment& env, FILE* fid, Boolean isWideband, unsigned numChannels); // called only by createNew() virtual ~AMRAudioFileSource(); private: // redefined virtual functions: virtual void doGetNextFrame(); private: FILE* fFid; }; #endif live/liveMedia/include/RTPInterface.hh000444 001751 000000 00000010372 12265042432 020073 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // An abstraction of a network interface used for RTP (or RTCP). // (This allows the RTP-over-TCP hack (RFC 2326, section 10.12) to // be implemented transparently.) // C++ header #ifndef _RTP_INTERFACE_HH #define _RTP_INTERFACE_HH #ifndef _MEDIA_HH #include #endif #ifndef _GROUPSOCK_HH #include "Groupsock.hh" #endif // Typedef for an optional auxilliary handler function, to be called // when each new packet is read: typedef void AuxHandlerFunc(void* clientData, unsigned char* packet, unsigned& packetSize); typedef void ServerRequestAlternativeByteHandler(void* instance, u_int8_t requestByte); // A hack that allows a handler for RTP/RTCP packets received over TCP to process RTSP commands that may also appear within // the same TCP connection. A RTSP server implementation would supply a function like this - as a parameter to // "ServerMediaSubsession::startStream()". class tcpStreamRecord { public: tcpStreamRecord(int streamSocketNum, unsigned char streamChannelId, tcpStreamRecord* next); virtual ~tcpStreamRecord(); public: tcpStreamRecord* fNext; int fStreamSocketNum; unsigned char fStreamChannelId; }; class RTPInterface { public: RTPInterface(Medium* owner, Groupsock* gs); virtual ~RTPInterface(); Groupsock* gs() const { return fGS; } void setStreamSocket(int sockNum, unsigned char streamChannelId); void addStreamSocket(int sockNum, unsigned char streamChannelId); void removeStreamSocket(int sockNum, unsigned char streamChannelId); static void setServerRequestAlternativeByteHandler(UsageEnvironment& env, int socketNum, ServerRequestAlternativeByteHandler* handler, void* clientData); static void clearServerRequestAlternativeByteHandler(UsageEnvironment& env, int socketNum); Boolean sendPacket(unsigned char* packet, unsigned packetSize); void startNetworkReading(TaskScheduler::BackgroundHandlerProc* handlerProc); Boolean handleRead(unsigned char* buffer, unsigned bufferMaxSize, unsigned& bytesRead, struct sockaddr_in& fromAddress, Boolean& packetReadWasIncomplete); void stopNetworkReading(); UsageEnvironment& envir() const { return fOwner->envir(); } void setAuxilliaryReadHandler(AuxHandlerFunc* handlerFunc, void* handlerClientData) { fAuxReadHandlerFunc = handlerFunc; fAuxReadHandlerClientData = handlerClientData; } // A hack for supporting handlers for RTCP packets arriving interleaved over TCP: int nextTCPReadStreamSocketNum() const { return fNextTCPReadStreamSocketNum; } unsigned char nextTCPReadStreamChannelId() const { return fNextTCPReadStreamChannelId; } private: // Helper functions for sending a RTP or RTCP packet over a TCP connection: Boolean sendRTPorRTCPPacketOverTCP(unsigned char* packet, unsigned packetSize, int socketNum, unsigned char streamChannelId); Boolean sendDataOverTCP(int socketNum, u_int8_t const* data, unsigned dataSize, Boolean forceSendToSucceed); private: friend class SocketDescriptor; Medium* fOwner; Groupsock* fGS; tcpStreamRecord* fTCPStreams; // optional, for RTP-over-TCP streaming/receiving unsigned short fNextTCPReadSize; // how much data (if any) is available to be read from the TCP stream int fNextTCPReadStreamSocketNum; unsigned char fNextTCPReadStreamChannelId; TaskScheduler::BackgroundHandlerProc* fReadHandlerProc; // if any AuxHandlerFunc* fAuxReadHandlerFunc; void* fAuxReadHandlerClientData; }; #endif live/liveMedia/include/T140TextRTPSink.hh000444 001751 000000 00000007172 12265042432 020321 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // RTP sink for T.140 text (RFC 2793) // C++ header #ifndef _T140_TEXT_RTP_SINK_HH #define _T140_TEXT_RTP_SINK_HH #ifndef _TEXT_RTP_SINK_HH #include "TextRTPSink.hh" #endif #ifndef _FRAMED_FILTER_HH #include "FramedFilter.hh" #endif class T140IdleFilter; class T140TextRTPSink: public TextRTPSink { public: static T140TextRTPSink* createNew(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat); protected: T140TextRTPSink(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat); // called only by createNew() virtual ~T140TextRTPSink(); protected: // redefined virtual functions: virtual Boolean continuePlaying(); virtual void doSpecialFrameHandling(unsigned fragmentationOffset, unsigned char* frameStart, unsigned numBytesInFrame, struct timeval framePresentationTime, unsigned numRemainingBytes); virtual Boolean frameCanAppearAfterPacketStart(unsigned char const* frameStart, unsigned numBytesInFrame) const; protected: T140IdleFilter* fOurIdleFilter; Boolean fAreInIdlePeriod; }; ////////// T140IdleFilter definition ////////// // Because the T.140 text RTP payload format specification recommends that (empty) RTP packets be sent during 'idle periods' // when no new text is available, we implement "T140TextRTPSink" using a separate "T140IdleFilter" class - sitting in front // - that delivers, to the "T140TextRTPSink", a continuous sequence of (possibly) empty frames. // (Note: This class should be used only by "T140TextRTPSink", or a subclass.) class T140IdleFilter: public FramedFilter { public: T140IdleFilter(UsageEnvironment& env, FramedSource* inputSource); virtual ~T140IdleFilter(); private: // redefined virtual functions: virtual void doGetNextFrame(); virtual void doStopGettingFrames(); private: static void afterGettingFrame(void* clientData, unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime, unsigned durationInMicroseconds); void afterGettingFrame(unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime, unsigned durationInMicroseconds); static void handleIdleTimeout(void* clientData); void handleIdleTimeout(); void deliverFromBuffer(); void deliverEmptyFrame(); static void onSourceClosure(void* clientData); void onSourceClosure(); private: TaskToken fIdleTimerTask; unsigned fBufferSize, fNumBufferedBytes; char* fBuffer; unsigned fBufferedNumTruncatedBytes; // a count of truncated bytes from the upstream struct timeval fBufferedDataPresentationTime; unsigned fBufferedDataDurationInMicroseconds; }; #endif live/liveMedia/include/MatroskaFileServerDemux.hh000444 001751 000000 00000007507 12265042432 022366 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A server demultiplexor for a Matroska file // C++ header #ifndef _MATROSKA_FILE_SERVER_DEMUX_HH #define _MATROSKA_FILE_SERVER_DEMUX_HH #ifndef _SERVER_MEDIA_SESSION_HH #include "ServerMediaSession.hh" #endif #ifndef _MATROSKA_FILE_HH #include "MatroskaFile.hh" #endif class MatroskaFileServerDemux: public Medium { public: typedef void (onCreationFunc)(MatroskaFileServerDemux* newDemux, void* clientData); static void createNew(UsageEnvironment& env, char const* fileName, onCreationFunc* onCreation, void* onCreationClientData, char const* preferredLanguage = "eng"); // Note: Unlike most "createNew()" functions, this one doesn't return a new object immediately. Instead, because this class // requires file reading (to parse the Matroska 'Track' headers) before a new object can be initialized, the creation of a new // object is signalled by calling - from the event loop - an 'onCreationFunc' that is passed as a parameter to "createNew()". ServerMediaSubsession* newServerMediaSubsession(); ServerMediaSubsession* newServerMediaSubsession(unsigned& resultTrackNumber); // Returns a new "ServerMediaSubsession" object that represents the next preferred media track // (video, audio, subtitle - in that order) from the file. (Preferred media tracks are based on the file's language preference.) // This function returns NULL when no more media tracks exist. ServerMediaSubsession* newServerMediaSubsessionByTrackNumber(unsigned trackNumber); // As above, but creates a new "ServerMediaSubsession" object for a specific track number within the Matroska file. // (You should not call this function more than once with the same track number.) // The following public: member functions are called only by the "ServerMediaSubsession" objects: MatroskaFile* ourMatroskaFile() { return fOurMatroskaFile; } char const* fileName() const { return fFileName; } float fileDuration() const { return fOurMatroskaFile->fileDuration(); } MatroskaTrack* lookup(unsigned trackNumber) { return fOurMatroskaFile->lookup(trackNumber); } // shortcut FramedSource* newDemuxedTrack(unsigned clientSessionId, unsigned trackNumber); // Used by the "ServerMediaSubsession" objects to implement their "createNewStreamSource()" virtual function. private: MatroskaFileServerDemux(UsageEnvironment& env, char const* fileName, onCreationFunc* onCreation, void* onCreationClientData, char const* preferredLanguage); // called only by createNew() virtual ~MatroskaFileServerDemux(); static void onMatroskaFileCreation(MatroskaFile* newFile, void* clientData); void onMatroskaFileCreation(MatroskaFile* newFile); private: char const* fFileName; onCreationFunc* fOnCreation; void* fOnCreationClientData; MatroskaFile* fOurMatroskaFile; // Used to implement "newServerMediaSubsession()": u_int8_t fNextTrackTypeToCheck; // Used to set up demuxing, to implement "newDemuxedTrack()": unsigned fLastClientSessionId; MatroskaDemux* fLastCreatedDemux; }; #endif live/liveMedia/include/VorbisAudioRTPSink.hh000444 001751 000000 00000006235 12265042432 021251 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // RTP sink for Vorbis audio // C++ header #ifndef _VORBIS_AUDIO_RTP_SINK_HH #define _VORBIS_AUDIO_RTP_SINK_HH #ifndef _AUDIO_RTP_SINK_HH #include "AudioRTPSink.hh" #endif class VorbisAudioRTPSink: public AudioRTPSink { public: static VorbisAudioRTPSink* createNew(UsageEnvironment& env, Groupsock* RTPgs, u_int8_t rtpPayloadFormat, u_int32_t rtpTimestampFrequency, unsigned numChannels, // The following headers provide the 'configuration' information, for the SDP description: u_int8_t* identificationHeader, unsigned identificationHeaderSize, u_int8_t* commentHeader, unsigned commentHeaderSize, u_int8_t* setupHeader, unsigned setupHeaderSize); static VorbisAudioRTPSink* createNew(UsageEnvironment& env, Groupsock* RTPgs, u_int8_t rtpPayloadFormat, u_int32_t rtpTimestampFrequency, unsigned numChannels, char const* configStr); // an optional variant of "createNew()" that takes a Base-64-encoded 'configuration' string, // rather than the raw configuration headers. as parameter. protected: VorbisAudioRTPSink(UsageEnvironment& env, Groupsock* RTPgs, u_int8_t rtpPayloadFormat, u_int32_t rtpTimestampFrequency, unsigned numChannels, u_int8_t* identificationHeader, unsigned identificationHeaderSize, u_int8_t* commentHeader, unsigned commentHeaderSize, u_int8_t* setupHeader, unsigned setupHeaderSize, u_int32_t identField = 0xFACADE); // called only by createNew() virtual ~VorbisAudioRTPSink(); private: // redefined virtual functions: virtual char const* auxSDPLine(); // for the "a=fmtp:" SDP line virtual void doSpecialFrameHandling(unsigned fragmentationOffset, unsigned char* frameStart, unsigned numBytesInFrame, struct timeval framePresentationTime, unsigned numRemainingBytes); virtual Boolean frameCanAppearAfterPacketStart(unsigned char const* frameStart, unsigned numBytesInFrame) const; virtual unsigned specialHeaderSize() const; virtual unsigned frameSpecificHeaderSize() const; #endif private: u_int32_t fIdent; // "Ident" field used by this stream. (Only the low 24 bits of this are used.) char* fFmtpSDPLine; }; live/liveMedia/include/VorbisAudioRTPSource.hh000444 001751 000000 00000003642 12265042432 021604 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // Vorbis Audio RTP Sources // C++ header #ifndef _VORBIS_AUDIO_RTP_SOURCE_HH #define _VORBIS_AUDIO_RTP_SOURCE_HH #ifndef _MULTI_FRAMED_RTP_SOURCE_HH #include "MultiFramedRTPSource.hh" #endif class VorbisAudioRTPSource: public MultiFramedRTPSource { public: static VorbisAudioRTPSource* createNew(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, unsigned rtpTimestampFrequency); u_int32_t curPacketIdent() const { return fCurPacketIdent; } // The current "Ident" field; only the low-order 24 bits are used protected: VorbisAudioRTPSource(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, unsigned rtpTimestampFrequency); // called only by createNew() virtual ~VorbisAudioRTPSource(); protected: // redefined virtual functions: virtual Boolean processSpecialHeader(BufferedPacket* packet, unsigned& resultSpecialHeaderSize); virtual char const* MIMEtype() const; private: u_int32_t fCurPacketIdent; // only the low-order 24 bits are used }; #endif live/liveMedia/include/MPEG2TransportUDPServerMediaSubsession.hh000444 001751 000000 00000004415 12265042432 025113 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A 'ServerMediaSubsession' object that creates new, unicast, "RTPSink"s // on demand, from an incoming UDP (or RTP/UDP) MPEG-2 Transport Stream // C++ header #ifndef _MPEG2_TRANSPORT_UDP_SERVER_MEDIA_SUBSESSION_HH #define _MPEG2_TRANSPORT_UDP_SERVER_MEDIA_SUBSESSION_HH #ifndef _ON_DEMAND_SERVER_MEDIA_SUBSESSION_HH #include "OnDemandServerMediaSubsession.hh" #endif class MPEG2TransportUDPServerMediaSubsession: public OnDemandServerMediaSubsession { public: static MPEG2TransportUDPServerMediaSubsession* createNew(UsageEnvironment& env, char const* inputAddressStr, // An IP multicast address, or use "0.0.0.0" or NULL for unicast input Port const& inputPort, Boolean inputStreamIsRawUDP = False); // otherwise (default) the input stream is RTP/UDP protected: MPEG2TransportUDPServerMediaSubsession(UsageEnvironment& env, char const* inputAddressStr, Port const& inputPort, Boolean inputStreamIsRawUDP); // called only by createNew(); virtual ~MPEG2TransportUDPServerMediaSubsession(); protected: // redefined virtual functions virtual FramedSource* createNewStreamSource(unsigned clientSessionId, unsigned& estBitrate); virtual RTPSink* createNewRTPSink(Groupsock* rtpGroupsock, unsigned char rtpPayloadTypeIfDynamic, FramedSource* inputSource); protected: char const* fInputAddressStr; Port fInputPort; Groupsock* fInputGroupsock; Boolean fInputStreamIsRawUDP; }; #endif live/liveMedia/include/ProxyServerMediaSession.hh000444 001751 000000 00000021702 12265042432 022420 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A subclass of "ServerMediaSession" that can be used to create a (unicast) RTSP servers that acts as a 'proxy' for // another (unicast or multicast) RTSP/RTP stream. // C++ header #ifndef _PROXY_SERVER_MEDIA_SESSION_HH #define _PROXY_SERVER_MEDIA_SESSION_HH #ifndef _SERVER_MEDIA_SESSION_HH #include "ServerMediaSession.hh" #endif #ifndef _MEDIA_SESSION_HH #include "MediaSession.hh" #endif #ifndef _RTSP_CLIENT_HH #include "RTSPClient.hh" #endif // A subclass of "RTSPClient", used to refer to the particular "ProxyServerMediaSession" object being used. // It is used only within the implementation of "ProxyServerMediaSession", but is defined here, in case developers wish to // subclass it. class ProxyRTSPClient: public RTSPClient { public: ProxyRTSPClient(class ProxyServerMediaSession& ourServerMediaSession, char const* rtspURL, char const* username, char const* password, portNumBits tunnelOverHTTPPortNum, int verbosityLevel, int socketNumToServer); virtual ~ProxyRTSPClient(); void continueAfterDESCRIBE(char const* sdpDescription); void continueAfterLivenessCommand(int resultCode, Boolean serverSupportsGetParameter); void continueAfterSETUP(); private: void reset(); Authenticator* auth() { return fOurAuthenticator; } void scheduleLivenessCommand(); static void sendLivenessCommand(void* clientData); void scheduleDESCRIBECommand(); static void sendDESCRIBE(void* clientData); static void subsessionTimeout(void* clientData); void handleSubsessionTimeout(); private: friend class ProxyServerMediaSession; friend class ProxyServerMediaSubsession; ProxyServerMediaSession& fOurServerMediaSession; char* fOurURL; Authenticator* fOurAuthenticator; Boolean fStreamRTPOverTCP; class ProxyServerMediaSubsession *fSetupQueueHead, *fSetupQueueTail; unsigned fNumSetupsDone; unsigned fNextDESCRIBEDelay; // in seconds Boolean fServerSupportsGetParameter, fLastCommandWasPLAY; TaskToken fLivenessCommandTask, fDESCRIBECommandTask, fSubsessionTimerTask; }; typedef ProxyRTSPClient* createNewProxyRTSPClientFunc(ProxyServerMediaSession& ourServerMediaSession, char const* rtspURL, char const* username, char const* password, portNumBits tunnelOverHTTPPortNum, int verbosityLevel, int socketNumToServer); ProxyRTSPClient* defaultCreateNewProxyRTSPClientFunc(ProxyServerMediaSession& ourServerMediaSession, char const* rtspURL, char const* username, char const* password, portNumBits tunnelOverHTTPPortNum, int verbosityLevel, int socketNumToServer); class ProxyServerMediaSession: public ServerMediaSession { public: static ProxyServerMediaSession* createNew(UsageEnvironment& env, RTSPServer* ourRTSPServer, // Note: We can be used by just one "RTSPServer" char const* inputStreamURL, // the "rtsp://" URL of the stream we'll be proxying char const* streamName = NULL, char const* username = NULL, char const* password = NULL, portNumBits tunnelOverHTTPPortNum = 0, // for streaming the *proxied* (i.e., back-end) stream int verbosityLevel = 0, int socketNumToServer = -1); // Hack: "tunnelOverHTTPPortNum" == 0xFFFF (i.e., all-ones) means: Stream RTP/RTCP-over-TCP, but *not* using HTTP // "verbosityLevel" == 1 means display basic proxy setup info; "verbosityLevel" == 2 means display RTSP client protocol also. // If "socketNumToServer" is >= 0, then it is the socket number of an already-existing TCP connection to the server. // (In this case, "inputStreamURL" must point to the socket's endpoint, so that it can be accessed via the socket.) virtual ~ProxyServerMediaSession(); char const* url() const; char describeCompletedFlag; // initialized to 0; set to 1 when the back-end "DESCRIBE" completes. // (This can be used as a 'watch variable' in "doEventLoop()".) Boolean describeCompletedSuccessfully() const { return fClientMediaSession != NULL; } // This can be used - along with "describeCompletdFlag" - to check whether the back-end "DESCRIBE" completed *successfully*. protected: ProxyServerMediaSession(UsageEnvironment& env, RTSPServer* ourRTSPServer, char const* inputStreamURL, char const* streamName, char const* username, char const* password, portNumBits tunnelOverHTTPPortNum, int verbosityLevel, int socketNumToServer, createNewProxyRTSPClientFunc* ourCreateNewProxyRTSPClientFunc = defaultCreateNewProxyRTSPClientFunc); // If you subclass "ProxyRTSPClient", then you will also need to define your own function // - with signature "createNewProxyRTSPClientFunc" (see above) - that creates a new object // of this subclass. You should also subclass "ProxyServerMediaSession" and, in your // subclass's constructor, initialize the parent class (i.e., "ProxyServerMediaSession") // constructor by passing your new function as the "ourCreateNewProxyRTSPClientFunc" // parameter. protected: RTSPServer* fOurRTSPServer; ProxyRTSPClient* fProxyRTSPClient; MediaSession* fClientMediaSession; private: friend class ProxyRTSPClient; friend class ProxyServerMediaSubsession; void continueAfterDESCRIBE(char const* sdpDescription); void resetDESCRIBEState(); // undoes what was done by "contineAfterDESCRIBE()" private: int fVerbosityLevel; class PresentationTimeSessionNormalizer* fPresentationTimeSessionNormalizer; createNewProxyRTSPClientFunc* fCreateNewProxyRTSPClientFunc; }; ////////// PresentationTimeSessionNormalizer and PresentationTimeSubsessionNormalizer definitions ////////// // The following two classes are used by proxies to convert incoming streams' presentation times into wall-clock-aligned // presentation times that are suitable for our "RTPSink"s (for the corresponding outgoing streams). // (For multi-subsession (i.e., audio+video) sessions, the outgoing streams' presentation times retain the same relative // separation as those of the incoming streams.) class PresentationTimeSubsessionNormalizer: public FramedFilter { public: void setRTPSink(RTPSink* rtpSink) { fRTPSink = rtpSink; } private: friend class PresentationTimeSessionNormalizer; PresentationTimeSubsessionNormalizer(PresentationTimeSessionNormalizer& parent, FramedSource* inputSource, RTPSource* rtpSource, char const* codecName, PresentationTimeSubsessionNormalizer* next); // called only from within "PresentationTimeSessionNormalizer" virtual ~PresentationTimeSubsessionNormalizer(); static void afterGettingFrame(void* clientData, unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime, unsigned durationInMicroseconds); void afterGettingFrame(unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime, unsigned durationInMicroseconds); private: // redefined virtual functions: virtual void doGetNextFrame(); private: PresentationTimeSessionNormalizer& fParent; RTPSource* fRTPSource; RTPSink* fRTPSink; char const* fCodecName; PresentationTimeSubsessionNormalizer* fNext; }; class PresentationTimeSessionNormalizer: public Medium { public: PresentationTimeSessionNormalizer(UsageEnvironment& env); virtual ~PresentationTimeSessionNormalizer(); PresentationTimeSubsessionNormalizer* createNewPresentationTimeSubsessionNormalizer(FramedSource* inputSource, RTPSource* rtpSource, char const* codecName); private: // called only from within "~PresentationTimeSubsessionNormalizer": friend class PresentationTimeSubsessionNormalizer; void normalizePresentationTime(PresentationTimeSubsessionNormalizer* ssNormalizer, struct timeval& toPT, struct timeval const& fromPT); void removePresentationTimeSubsessionNormalizer(PresentationTimeSubsessionNormalizer* ssNormalizer); private: PresentationTimeSubsessionNormalizer* fSubsessionNormalizers; PresentationTimeSubsessionNormalizer* fMasterSSNormalizer; // used for subsessions that have been RTCP-synced struct timeval fPTAdjustment; // Added to (RTCP-synced) subsession presentation times to 'normalize' them with wall-clock time. }; #endif live/liveMedia/include/VP8VideoRTPSource.hh000444 001751 000000 00000003277 12265042432 020766 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // VP8 Video RTP Sources // C++ header #ifndef _VP8_VIDEO_RTP_SOURCE_HH #define _VP8_VIDEO_RTP_SOURCE_HH #ifndef _MULTI_FRAMED_RTP_SOURCE_HH #include "MultiFramedRTPSource.hh" #endif class VP8VideoRTPSource: public MultiFramedRTPSource { public: static VP8VideoRTPSource* createNew(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, unsigned rtpTimestampFrequency = 90000); protected: VP8VideoRTPSource(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, unsigned rtpTimestampFrequency); // called only by createNew() virtual ~VP8VideoRTPSource(); protected: // redefined virtual functions: virtual Boolean processSpecialHeader(BufferedPacket* packet, unsigned& resultSpecialHeaderSize); virtual char const* MIMEtype() const; }; #endif live/liveMedia/include/RTSPRegisterSender.hh000444 001751 000000 00000006621 12265042432 021245 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A special object which, when created, sends a custom RTSP "REGISTER" command to a specified client. // C++ header #ifndef _RTSP_REGISTER_SENDER_HH #define _RTSP_REGISTER_SENDER_HH #ifndef _RTSP_CLIENT_HH #include "RTSPClient.hh" #endif class RTSPRegisterSender: public RTSPClient { public: static RTSPRegisterSender* createNew(UsageEnvironment& env, char const* remoteClientNameOrAddress, portNumBits remoteClientPortNum, char const* rtspURLToRegister, RTSPClient::responseHandler* rtspResponseHandler, Authenticator* authenticator = NULL, Boolean requestStreamingViaTCP = False, char const* proxyURLSuffix = NULL, Boolean reuseConnection = False, int verbosityLevel = 0, char const* applicationName = NULL); void grabConnection(int& sock, struct sockaddr_in& remoteAddress); // so that the socket doesn't get closed when we're deleted protected: RTSPRegisterSender(UsageEnvironment& env, char const* remoteClientNameOrAddress, portNumBits remoteClientPortNum, char const* rtspURLToRegister, RTSPClient::responseHandler* rtspResponseHandler, Authenticator* authenticator, Boolean requestStreamingViaTCP, char const* proxyURLSuffix, Boolean reuseConnection, int verbosityLevel, char const* applicationName); // called only by "createNew()" virtual ~RTSPRegisterSender(); // Redefined virtual functions: virtual Boolean setRequestFields(RequestRecord* request, char*& cmdURL, Boolean& cmdURLWasAllocated, char const*& protocolStr, char*& extraHeaders, Boolean& extraHeadersWereAllocated); public: // Some compilers complain if this is "protected:" // A subclass of "RTSPClient::RequestRecord", specific to our "REGISTER" command: class RequestRecord_REGISTER: public RTSPClient::RequestRecord { public: RequestRecord_REGISTER(unsigned cseq, RTSPClient::responseHandler* rtspResponseHandler, char const* rtspURLToRegister, Boolean reuseConnection, Boolean requestStreamingViaTCP, char const* proxyURLSuffix); virtual ~RequestRecord_REGISTER(); char const* rtspURLToRegister() const { return fRTSPURLToRegister; } Boolean reuseConnection() const { return fReuseConnection; } Boolean requestStreamingViaTCP() const { return fRequestStreamingViaTCP; } char const* proxyURLSuffix() const { return fProxyURLSuffix; } private: char* fRTSPURLToRegister; Boolean fReuseConnection, fRequestStreamingViaTCP; char* fProxyURLSuffix; }; private: portNumBits fRemoteClientPortNum; }; #endif live/liveMedia/include/TheoraVideoRTPSink.hh000444 001751 000000 00000003746 12265042432 021240 0ustar00rsfwheel000000 000000 /* * Theora Video RTP packetizer * Copied from live555's VorbisAudioRTPSink */ #ifndef _THEORA_VIDEO_RTP_SINK_HH #define _THEORA_VIDEO_RTP_SINK_HH #ifndef _VIDEO_RTP_SINK_HH #include "VideoRTPSink.hh" #endif class TheoraVideoRTPSink: public VideoRTPSink { public: enum PixFmt { YUV420, YUV422, YUV444, }; static TheoraVideoRTPSink* createNew(UsageEnvironment& env, Groupsock* RTPgs, u_int8_t rtpPayloadFormat, u_int32_t rtpTimestampFrequency, unsigned width, unsigned height, enum PixFmt pf, // The following headers provide the 'configuration' information, for the SDP description: u_int8_t* identificationHeader, unsigned identificationHeaderSize, u_int8_t* commentHeader, unsigned commentHeaderSize, u_int8_t* setupHeader, unsigned setupHeaderSize, u_int32_t identField); protected: TheoraVideoRTPSink(UsageEnvironment& env, Groupsock* RTPgs, u_int8_t rtpPayloadFormat, u_int32_t rtpTimestampFrequency, unsigned width, unsigned height, enum PixFmt pf, u_int8_t* identificationHeader, unsigned identificationHeaderSize, u_int8_t* commentHeader, unsigned commentHeaderSize, u_int8_t* setupHeader, unsigned setupHeaderSize, u_int32_t identField); // called only by createNew() virtual ~TheoraVideoRTPSink(); private: // redefined virtual functions: virtual char const* auxSDPLine(); // for the "a=fmtp:" SDP line virtual void doSpecialFrameHandling(unsigned fragmentationOffset, unsigned char* frameStart, unsigned numBytesInFrame, struct timeval framePresentationTime, unsigned numRemainingBytes); virtual Boolean frameCanAppearAfterPacketStart(unsigned char const* frameStart, unsigned numBytesInFrame) const; virtual unsigned specialHeaderSize() const; private: u_int32_t fIdent; // "Ident" field used by this stream. (Only the low 24 bits of this are used.) char* fFmtpSDPLine; }; #endif live/liveMedia/include/H264or5VideoStreamFramer.hh000444 001751 000000 00000007435 12265042432 022164 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A filter that breaks up a H.264 or H.265 Video Elementary Stream into NAL units. // C++ header #ifndef _H264_OR_5_VIDEO_STREAM_FRAMER_HH #define _H264_OR_5_VIDEO_STREAM_FRAMER_HH #ifndef _MPEG_VIDEO_STREAM_FRAMER_HH #include "MPEGVideoStreamFramer.hh" #endif class H264or5VideoStreamFramer: public MPEGVideoStreamFramer { public: void getVPSandSPSandPPS(u_int8_t*& vps, unsigned& vpsSize, u_int8_t*& sps, unsigned& spsSize, u_int8_t*& pps, unsigned& ppsSize) const { // Returns pointers to copies of the most recently seen VPS (video parameter set) // SPS (sequence parameter set) and PPS (picture parameter set) NAL units. // (NULL pointers are returned if the NAL units have not yet been seen.) vps = fLastSeenVPS; vpsSize = fLastSeenVPSSize; sps = fLastSeenSPS; spsSize = fLastSeenSPSSize; pps = fLastSeenPPS; ppsSize = fLastSeenPPSSize; } void setVPSandSPSandPPS(u_int8_t* vps, unsigned vpsSize, u_int8_t* sps, unsigned spsSize, u_int8_t* pps, unsigned ppsSize) { // Assigns copies of the VPS, SPS and PPS NAL units. If this function is not called, // then these NAL units are assigned only if/when they appear in the input stream. saveCopyOfVPS(vps, vpsSize); saveCopyOfSPS(sps, spsSize); saveCopyOfPPS(pps, ppsSize); } u_int32_t profileLevelId() const { return fProfileLevelId; } // used for H.264 only u_int8_t const* profileTierLevelHeaderBytes() const { return fProfileTierLevelHeaderBytes; } // used for H.265 only protected: H264or5VideoStreamFramer(int hNumber, // 264 or 265 UsageEnvironment& env, FramedSource* inputSource, Boolean createParser, Boolean includeStartCodeInOutput); // We're an abstract base class. virtual ~H264or5VideoStreamFramer(); void saveCopyOfVPS(u_int8_t* from, unsigned size); void saveCopyOfSPS(u_int8_t* from, unsigned size); void saveCopyOfPPS(u_int8_t* from, unsigned size); void setPresentationTime() { fPresentationTime = fNextPresentationTime; } Boolean isVPS(u_int8_t nal_unit_type); Boolean isSPS(u_int8_t nal_unit_type); Boolean isPPS(u_int8_t nal_unit_type); Boolean isVCL(u_int8_t nal_unit_type); protected: int fHNumber; u_int8_t* fLastSeenVPS; unsigned fLastSeenVPSSize; u_int8_t* fLastSeenSPS; unsigned fLastSeenSPSSize; u_int8_t* fLastSeenPPS; unsigned fLastSeenPPSSize; u_int32_t fProfileLevelId; // set/used for H.264 only u_int8_t fProfileTierLevelHeaderBytes[12]; // set/used for H.265 only struct timeval fNextPresentationTime; // the presentation time to be used for the next NAL unit to be parsed/delivered after this friend class H264or5VideoStreamParser; // hack }; // A general routine for making a copy of a (H.264 or H.265) NAL unit, // removing 'emulation' bytes from the copy: unsigned removeH264or5EmulationBytes(u_int8_t* to, unsigned toMaxSize, u_int8_t* from, unsigned fromSize); // returns the size of the copy; it will be <= min(toMaxSize,fromSize) #endif live/liveMedia/include/H265VideoStreamFramer.hh000444 001751 000000 00000003151 12265042432 021526 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A filter that breaks up a H.265 Video Elementary Stream into NAL units. // C++ header #ifndef _H265_VIDEO_STREAM_FRAMER_HH #define _H265_VIDEO_STREAM_FRAMER_HH #ifndef _H264_OR_5_VIDEO_STREAM_FRAMER_HH #include "H264or5VideoStreamFramer.hh" #endif class H265VideoStreamFramer: public H264or5VideoStreamFramer { public: static H265VideoStreamFramer* createNew(UsageEnvironment& env, FramedSource* inputSource, Boolean includeStartCodeInOutput = False); protected: H265VideoStreamFramer(UsageEnvironment& env, FramedSource* inputSource, Boolean createParser, Boolean includeStartCodeInOutput); // called only by "createNew()" virtual ~H265VideoStreamFramer(); // redefined virtual functions: virtual Boolean isH265VideoStreamFramer() const; }; #endif live/liveMedia/include/H265VideoStreamDiscreteFramer.hh000444 001751 000000 00000003367 12265042432 023222 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A simplified version of "H265VideoStreamFramer" that takes only complete, // discrete frames (rather than an arbitrary byte stream) as input. // This avoids the parsing and data copying overhead of the full // "H265VideoStreamFramer". // C++ header #ifndef _H264_VIDEO_STREAM_DISCRETE_FRAMER_HH #define _H264_VIDEO_STREAM_DISCRETE_FRAMER_HH #ifndef _H264_OR_5_VIDEO_STREAM_DISCRETE_FRAMER_HH #include "H264or5VideoStreamDiscreteFramer.hh" #endif class H265VideoStreamDiscreteFramer: public H264or5VideoStreamDiscreteFramer { public: static H265VideoStreamDiscreteFramer* createNew(UsageEnvironment& env, FramedSource* inputSource); protected: H265VideoStreamDiscreteFramer(UsageEnvironment& env, FramedSource* inputSource); // called only by createNew() virtual ~H265VideoStreamDiscreteFramer(); private: // redefined virtual functions: virtual Boolean isH265VideoStreamFramer() const; }; #endif live/liveMedia/include/H264or5VideoRTPSink.hh000444 001751 000000 00000004356 12265042432 021065 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // RTP sink for H.264 or H.265 video // C++ header #ifndef _H264_OR_5_VIDEO_RTP_SINK_HH #define _H264_OR_5_VIDEO_RTP_SINK_HH #ifndef _VIDEO_RTP_SINK_HH #include "VideoRTPSink.hh" #endif #ifndef _FRAMED_FILTER_HH #include "FramedFilter.hh" #endif class H264or5VideoRTPSink: public VideoRTPSink { protected: H264or5VideoRTPSink(int hNumber, // 264 or 265 UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, u_int8_t const* vps = NULL, unsigned vpsSize = 0, u_int8_t const* sps = NULL, unsigned spsSize = 0, u_int8_t const* pps = NULL, unsigned ppsSize = 0); // we're an abstrace base class virtual ~H264or5VideoRTPSink(); private: // redefined virtual functions: virtual Boolean continuePlaying(); virtual void doSpecialFrameHandling(unsigned fragmentationOffset, unsigned char* frameStart, unsigned numBytesInFrame, struct timeval framePresentationTime, unsigned numRemainingBytes); virtual Boolean frameCanAppearAfterPacketStart(unsigned char const* frameStart, unsigned numBytesInFrame) const; protected: int fHNumber; FramedFilter* fOurFragmenter; char* fFmtpSDPLine; u_int8_t* fVPS; unsigned fVPSSize; u_int8_t* fSPS; unsigned fSPSSize; u_int8_t* fPPS; unsigned fPPSSize; }; #endif live/liveMedia/include/H264or5VideoStreamDiscreteFramer.hh000444 001751 000000 00000004162 12265042432 023641 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A simplified version of "H264or5VideoStreamFramer" that takes only complete, // discrete frames (rather than an arbitrary byte stream) as input. // This avoids the parsing and data copying overhead of the full // "H264or5VideoStreamFramer". // C++ header #ifndef _H264_OR_5_VIDEO_STREAM_DISCRETE_FRAMER_HH #define _H264_OR_5_VIDEO_STREAM_DISCRETE_FRAMER_HH #ifndef _H264_OR_5_VIDEO_STREAM_FRAMER_HH #include "H264or5VideoStreamFramer.hh" #endif class H264or5VideoStreamDiscreteFramer: public H264or5VideoStreamFramer { protected: H264or5VideoStreamDiscreteFramer(int hNumber, UsageEnvironment& env, FramedSource* inputSource); // we're an abstract base class virtual ~H264or5VideoStreamDiscreteFramer(); protected: // redefined virtual functions: virtual void doGetNextFrame(); protected: static void afterGettingFrame(void* clientData, unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime, unsigned durationInMicroseconds); void afterGettingFrame1(unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime, unsigned durationInMicroseconds); }; #endif live/liveMedia/include/H265VideoFileServerMediaSubsession.hh000444 001751 000000 00000004416 12265042432 024227 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A 'ServerMediaSubsession' object that creates new, unicast, "RTPSink"s // on demand, from a H265 Elementary Stream video file. // C++ header #ifndef _H265_VIDEO_FILE_SERVER_MEDIA_SUBSESSION_HH #define _H265_VIDEO_FILE_SERVER_MEDIA_SUBSESSION_HH #ifndef _FILE_SERVER_MEDIA_SUBSESSION_HH #include "FileServerMediaSubsession.hh" #endif class H265VideoFileServerMediaSubsession: public FileServerMediaSubsession { public: static H265VideoFileServerMediaSubsession* createNew(UsageEnvironment& env, char const* fileName, Boolean reuseFirstSource); // Used to implement "getAuxSDPLine()": void checkForAuxSDPLine1(); void afterPlayingDummy1(); protected: H265VideoFileServerMediaSubsession(UsageEnvironment& env, char const* fileName, Boolean reuseFirstSource); // called only by createNew(); virtual ~H265VideoFileServerMediaSubsession(); void setDoneFlag() { fDoneFlag = ~0; } protected: // redefined virtual functions virtual char const* getAuxSDPLine(RTPSink* rtpSink, FramedSource* inputSource); virtual FramedSource* createNewStreamSource(unsigned clientSessionId, unsigned& estBitrate); virtual RTPSink* createNewRTPSink(Groupsock* rtpGroupsock, unsigned char rtpPayloadTypeIfDynamic, FramedSource* inputSource); private: char* fAuxSDPLine; char fDoneFlag; // used when setting up "fAuxSDPLine" RTPSink* fDummyRTPSink; // ditto }; #endif live/liveMedia/include/ByteStreamFileSource.hh000444 001751 000000 00000005410 12265042432 021642 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A file source that is a plain byte stream (rather than frames) // C++ header #ifndef _BYTE_STREAM_FILE_SOURCE_HH #define _BYTE_STREAM_FILE_SOURCE_HH #ifndef _FRAMED_FILE_SOURCE_HH #include "FramedFileSource.hh" #endif class ByteStreamFileSource: public FramedFileSource { public: static ByteStreamFileSource* createNew(UsageEnvironment& env, char const* fileName, unsigned preferredFrameSize = 0, unsigned playTimePerFrame = 0); // "preferredFrameSize" == 0 means 'no preference' // "playTimePerFrame" is in microseconds static ByteStreamFileSource* createNew(UsageEnvironment& env, FILE* fid, unsigned preferredFrameSize = 0, unsigned playTimePerFrame = 0); // an alternative version of "createNew()" that's used if you already have // an open file. u_int64_t fileSize() const { return fFileSize; } // 0 means zero-length, unbounded, or unknown void seekToByteAbsolute(u_int64_t byteNumber, u_int64_t numBytesToStream = 0); // if "numBytesToStream" is >0, then we limit the stream to that number of bytes, before treating it as EOF void seekToByteRelative(int64_t offset); void seekToEnd(); // to force EOF handling on the next read protected: ByteStreamFileSource(UsageEnvironment& env, FILE* fid, unsigned preferredFrameSize, unsigned playTimePerFrame); // called only by createNew() virtual ~ByteStreamFileSource(); static void fileReadableHandler(ByteStreamFileSource* source, int mask); void doReadFromFile(); private: // redefined virtual functions: virtual void doGetNextFrame(); virtual void doStopGettingFrames(); protected: u_int64_t fFileSize; private: unsigned fPreferredFrameSize; unsigned fPlayTimePerFrame; Boolean fFidIsSeekable; unsigned fLastPlayTime; Boolean fHaveStartedReading; Boolean fLimitNumBytesToStream; u_int64_t fNumBytesToStream; // used iff "fLimitNumBytesToStream" is True }; #endif live/groupsock/include/000755 001751 000000 00000000000 12265042432 015400 5ustar00rsfwheel000000 000000 live/groupsock/inet.c000444 001751 000000 00000034525 12265042432 015067 0ustar00rsfwheel000000 000000 #ifndef _NET_COMMON_H #include "NetCommon.h" #endif #include #ifdef VXWORKS #include #endif /* Some systems (e.g., SunOS) have header files that erroneously declare inet_addr() as taking no arguments. * This confuses C++. To overcome this, we use our own routine, implemented in C. */ unsigned our_inet_addr(cp) char const* cp; { return inet_addr(cp); } #if defined(__WIN32__) || defined(_WIN32) #ifndef IMN_PIM #define WS_VERSION_CHOICE1 0x202/*MAKEWORD(2,2)*/ #define WS_VERSION_CHOICE2 0x101/*MAKEWORD(1,1)*/ int initializeWinsockIfNecessary(void) { /* We need to call an initialization routine before * we can do anything with winsock. (How fucking lame!): */ static int _haveInitializedWinsock = 0; WSADATA wsadata; if (!_haveInitializedWinsock) { if ((WSAStartup(WS_VERSION_CHOICE1, &wsadata) != 0) && ((WSAStartup(WS_VERSION_CHOICE2, &wsadata)) != 0)) { return 0; /* error in initialization */ } if ((wsadata.wVersion != WS_VERSION_CHOICE1) && (wsadata.wVersion != WS_VERSION_CHOICE2)) { WSACleanup(); return 0; /* desired Winsock version was not available */ } _haveInitializedWinsock = 1; } return 1; } #else int initializeWinsockIfNecessary(void) { return 1; } #endif #else #define initializeWinsockIfNecessary() 1 #endif #ifndef NULL #define NULL 0 #endif #ifdef USE_SYSTEM_RANDOM /* Use the system-supplied "random()" and "srandom()" functions */ #include long our_random() { #if defined(__WIN32__) || defined(_WIN32) return rand(); #else return random(); #endif } void our_srandom(unsigned int x) { #if defined(__WIN32__) || defined(_WIN32) srand(x); #else srandom(x); #endif } #else /* Use our own implementation of the "random()" and "srandom()" functions */ /* * random.c: * * An improved random number generation package. In addition to the standard * rand()/srand() like interface, this package also has a special state info * interface. The our_initstate() routine is called with a seed, an array of * bytes, and a count of how many bytes are being passed in; this array is * then initialized to contain information for random number generation with * that much state information. Good sizes for the amount of state * information are 32, 64, 128, and 256 bytes. The state can be switched by * calling the our_setstate() routine with the same array as was initiallized * with our_initstate(). By default, the package runs with 128 bytes of state * information and generates far better random numbers than a linear * congruential generator. If the amount of state information is less than * 32 bytes, a simple linear congruential R.N.G. is used. * * Internally, the state information is treated as an array of longs; the * zeroeth element of the array is the type of R.N.G. being used (small * integer); the remainder of the array is the state information for the * R.N.G. Thus, 32 bytes of state information will give 7 longs worth of * state information, which will allow a degree seven polynomial. (Note: * the zeroeth word of state information also has some other information * stored in it -- see our_setstate() for details). * * The random number generation technique is a linear feedback shift register * approach, employing trinomials (since there are fewer terms to sum up that * way). In this approach, the least significant bit of all the numbers in * the state table will act as a linear feedback shift register, and will * have period 2^deg - 1 (where deg is the degree of the polynomial being * used, assuming that the polynomial is irreducible and primitive). The * higher order bits will have longer periods, since their values are also * influenced by pseudo-random carries out of the lower bits. The total * period of the generator is approximately deg*(2**deg - 1); thus doubling * the amount of state information has a vast influence on the period of the * generator. Note: the deg*(2**deg - 1) is an approximation only good for * large deg, when the period of the shift register is the dominant factor. * With deg equal to seven, the period is actually much longer than the * 7*(2**7 - 1) predicted by this formula. */ /* * For each of the currently supported random number generators, we have a * break value on the amount of state information (you need at least this * many bytes of state info to support this random number generator), a degree * for the polynomial (actually a trinomial) that the R.N.G. is based on, and * the separation between the two lower order coefficients of the trinomial. */ #define TYPE_0 0 /* linear congruential */ #define BREAK_0 8 #define DEG_0 0 #define SEP_0 0 #define TYPE_1 1 /* x**7 + x**3 + 1 */ #define BREAK_1 32 #define DEG_1 7 #define SEP_1 3 #define TYPE_2 2 /* x**15 + x + 1 */ #define BREAK_2 64 #define DEG_2 15 #define SEP_2 1 #define TYPE_3 3 /* x**31 + x**3 + 1 */ #define BREAK_3 128 #define DEG_3 31 #define SEP_3 3 #define TYPE_4 4 /* x**63 + x + 1 */ #define BREAK_4 256 #define DEG_4 63 #define SEP_4 1 /* * Array versions of the above information to make code run faster -- * relies on fact that TYPE_i == i. */ #define MAX_TYPES 5 /* max number of types above */ static int const degrees[MAX_TYPES] = { DEG_0, DEG_1, DEG_2, DEG_3, DEG_4 }; static int const seps [MAX_TYPES] = { SEP_0, SEP_1, SEP_2, SEP_3, SEP_4 }; /* * Initially, everything is set up as if from: * * our_initstate(1, &randtbl, 128); * * Note that this initialization takes advantage of the fact that srandom() * advances the front and rear pointers 10*rand_deg times, and hence the * rear pointer which starts at 0 will also end up at zero; thus the zeroeth * element of the state information, which contains info about the current * position of the rear pointer is just * * MAX_TYPES * (rptr - state) + TYPE_3 == TYPE_3. */ static long randtbl[DEG_3 + 1] = { TYPE_3, 0x9a319039, 0x32d9c024, 0x9b663182, 0x5da1f342, 0xde3b81e0, 0xdf0a6fb5, 0xf103bc02, 0x48f340fb, 0x7449e56b, 0xbeb1dbb0, 0xab5c5918, 0x946554fd, 0x8c2e680f, 0xeb3d799f, 0xb11ee0b7, 0x2d436b86, 0xda672e2a, 0x1588ca88, 0xe369735d, 0x904f35f7, 0xd7158fd6, 0x6fa6f051, 0x616e6b96, 0xac94efdc, 0x36413f93, 0xc622c298, 0xf5a42ab8, 0x8a88d77b, 0xf5ad9d0e, 0x8999220b, 0x27fb47b9, }; /* * fptr and rptr are two pointers into the state info, a front and a rear * pointer. These two pointers are always rand_sep places aparts, as they * cycle cyclically through the state information. (Yes, this does mean we * could get away with just one pointer, but the code for random() is more * efficient this way). The pointers are left positioned as they would be * from the call * * our_initstate(1, randtbl, 128); * * (The position of the rear pointer, rptr, is really 0 (as explained above * in the initialization of randtbl) because the state table pointer is set * to point to randtbl[1] (as explained below). */ static long* fptr = &randtbl[SEP_3 + 1]; static long* rptr = &randtbl[1]; /* * The following things are the pointer to the state information table, the * type of the current generator, the degree of the current polynomial being * used, and the separation between the two pointers. Note that for efficiency * of random(), we remember the first location of the state information, not * the zeroeth. Hence it is valid to access state[-1], which is used to * store the type of the R.N.G. Also, we remember the last location, since * this is more efficient than indexing every time to find the address of * the last element to see if the front and rear pointers have wrapped. */ static long *state = &randtbl[1]; static int rand_type = TYPE_3; static int rand_deg = DEG_3; static int rand_sep = SEP_3; static long* end_ptr = &randtbl[DEG_3 + 1]; /* * srandom: * * Initialize the random number generator based on the given seed. If the * type is the trivial no-state-information type, just remember the seed. * Otherwise, initializes state[] based on the given "seed" via a linear * congruential generator. Then, the pointers are set to known locations * that are exactly rand_sep places apart. Lastly, it cycles the state * information a given number of times to get rid of any initial dependencies * introduced by the L.C.R.N.G. Note that the initialization of randtbl[] * for default usage relies on values produced by this routine. */ long our_random(void); /*forward*/ void our_srandom(unsigned int x) { register int i; if (rand_type == TYPE_0) state[0] = x; else { state[0] = x; for (i = 1; i < rand_deg; i++) state[i] = 1103515245 * state[i - 1] + 12345; fptr = &state[rand_sep]; rptr = &state[0]; for (i = 0; i < 10 * rand_deg; i++) (void)our_random(); } } /* * our_initstate: * * Initialize the state information in the given array of n bytes for future * random number generation. Based on the number of bytes we are given, and * the break values for the different R.N.G.'s, we choose the best (largest) * one we can and set things up for it. srandom() is then called to * initialize the state information. * * Note that on return from srandom(), we set state[-1] to be the type * multiplexed with the current value of the rear pointer; this is so * successive calls to our_initstate() won't lose this information and will be * able to restart with our_setstate(). * * Note: the first thing we do is save the current state, if any, just like * our_setstate() so that it doesn't matter when our_initstate is called. * * Returns a pointer to the old state. */ char * our_initstate(seed, arg_state, n) unsigned int seed; /* seed for R.N.G. */ char *arg_state; /* pointer to state array */ int n; /* # bytes of state info */ { register char *ostate = (char *)(&state[-1]); if (rand_type == TYPE_0) state[-1] = rand_type; else state[-1] = MAX_TYPES * (rptr - state) + rand_type; if (n < BREAK_0) { #ifdef DEBUG (void)fprintf(stderr, "random: not enough state (%d bytes); ignored.\n", n); #endif return(0); } if (n < BREAK_1) { rand_type = TYPE_0; rand_deg = DEG_0; rand_sep = SEP_0; } else if (n < BREAK_2) { rand_type = TYPE_1; rand_deg = DEG_1; rand_sep = SEP_1; } else if (n < BREAK_3) { rand_type = TYPE_2; rand_deg = DEG_2; rand_sep = SEP_2; } else if (n < BREAK_4) { rand_type = TYPE_3; rand_deg = DEG_3; rand_sep = SEP_3; } else { rand_type = TYPE_4; rand_deg = DEG_4; rand_sep = SEP_4; } state = &(((long *)arg_state)[1]); /* first location */ end_ptr = &state[rand_deg]; /* must set end_ptr before srandom */ our_srandom(seed); if (rand_type == TYPE_0) state[-1] = rand_type; else state[-1] = MAX_TYPES*(rptr - state) + rand_type; return(ostate); } /* * our_setstate: * * Restore the state from the given state array. * * Note: it is important that we also remember the locations of the pointers * in the current state information, and restore the locations of the pointers * from the old state information. This is done by multiplexing the pointer * location into the zeroeth word of the state information. * * Note that due to the order in which things are done, it is OK to call * our_setstate() with the same state as the current state. * * Returns a pointer to the old state information. */ char * our_setstate(arg_state) char *arg_state; { register long *new_state = (long *)arg_state; register int type = new_state[0] % MAX_TYPES; register int rear = new_state[0] / MAX_TYPES; char *ostate = (char *)(&state[-1]); if (rand_type == TYPE_0) state[-1] = rand_type; else state[-1] = MAX_TYPES * (rptr - state) + rand_type; switch(type) { case TYPE_0: case TYPE_1: case TYPE_2: case TYPE_3: case TYPE_4: rand_type = type; rand_deg = degrees[type]; rand_sep = seps[type]; break; default: #ifdef DEBUG (void)fprintf(stderr, "random: state info corrupted; not changed.\n"); #endif break; } state = &new_state[1]; if (rand_type != TYPE_0) { rptr = &state[rear]; fptr = &state[(rear + rand_sep) % rand_deg]; } end_ptr = &state[rand_deg]; /* set end_ptr too */ return(ostate); } /* * random: * * If we are using the trivial TYPE_0 R.N.G., just do the old linear * congruential bit. Otherwise, we do our fancy trinomial stuff, which is * the same in all the other cases due to all the global variables that have * been set up. The basic operation is to add the number at the rear pointer * into the one at the front pointer. Then both pointers are advanced to * the next location cyclically in the table. The value returned is the sum * generated, reduced to 31 bits by throwing away the "least random" low bit. * * Note: the code takes advantage of the fact that both the front and * rear pointers can't wrap on the same call by not testing the rear * pointer if the front one has wrapped. * * Returns a 31-bit random number. */ long our_random() { long i; if (rand_type == TYPE_0) { i = state[0] = (state[0] * 1103515245 + 12345) & 0x7fffffff; } else { /* Make copies of "rptr" and "fptr" before working with them, in case we're being called concurrently by multiple threads: */ long* rp = rptr; long* fp = fptr; /* Make sure "rp" and "fp" are separated by the correct distance (again, allowing for concurrent access): */ if (!(fp == rp+SEP_3 || fp+DEG_3 == rp+SEP_3)) { /* A rare case that should occur only if we're being called concurrently by multiple threads. */ /* Restore the proper separation between the pointers: */ if (rp <= fp) rp = fp-SEP_3; else rp = fp+DEG_3-SEP_3; } *fp += *rp; i = (*fp >> 1) & 0x7fffffff; /* chucking least random bit */ if (++fp >= end_ptr) { fp = state; ++rp; } else if (++rp >= end_ptr) { rp = state; } /* Restore "rptr" and "fptr" from our working copies: */ rptr = rp; fptr = fp; } return i; } #endif u_int32_t our_random32() { /* Return a 32-bit random number. Because "our_random()" returns a 31-bit random number, we call it a second time, to generate the high bit. (Actually, to increase the likelhood of randomness, we take the middle 16 bits of two successive calls to "our_random()") */ long random_1 = our_random(); u_int32_t random16_1 = (u_int32_t)(random_1&0x00FFFF00); long random_2 = our_random(); u_int32_t random16_2 = (u_int32_t)(random_2&0x00FFFF00); return (random16_1<<8) | (random16_2>>8); } #ifdef USE_OUR_BZERO #ifndef __bzero void __bzero (to, count) char *to; int count; { while (count-- > 0) { *to++ = 0; } } #endif #endif live/groupsock/Makefile.tail000444 001751 000000 00000003506 12265042432 016347 0ustar00rsfwheel000000 000000 ##### End of variables to change NAME = libgroupsock ALL = $(NAME).$(LIB_SUFFIX) all: $(ALL) .$(C).$(OBJ): $(C_COMPILER) -c $(C_FLAGS) $< .$(CPP).$(OBJ): $(CPLUSPLUS_COMPILER) -c $(CPLUSPLUS_FLAGS) $< GROUPSOCK_LIB_OBJS = GroupsockHelper.$(OBJ) GroupEId.$(OBJ) inet.$(OBJ) Groupsock.$(OBJ) NetInterface.$(OBJ) NetAddress.$(OBJ) IOHandlers.$(OBJ) GroupsockHelper.$(CPP): include/GroupsockHelper.hh include/GroupsockHelper.hh: include/NetAddress.hh include/NetAddress.hh: include/NetCommon.h GroupEId.$(CPP): include/GroupEId.hh include/GroupEId.hh: include/NetAddress.hh inet.$(C): include/NetCommon.h Groupsock.$(CPP): include/Groupsock.hh include/GroupsockHelper.hh include/TunnelEncaps.hh include/Groupsock.hh: include/groupsock_version.hh include/NetInterface.hh include/GroupEId.hh include/NetInterface.hh: include/NetAddress.hh include/TunnelEncaps.hh: include/NetAddress.hh NetInterface.$(CPP): include/NetInterface.hh include/GroupsockHelper.hh NetAddress.$(CPP): include/NetAddress.hh include/GroupsockHelper.hh IOHandlers.$(CPP): include/IOHandlers.hh include/TunnelEncaps.hh libgroupsock.$(LIB_SUFFIX): $(GROUPSOCK_LIB_OBJS) \ $(PLATFORM_SPECIFIC_LIB_OBJS) $(LIBRARY_LINK)$@ $(LIBRARY_LINK_OPTS) \ $(GROUPSOCK_LIB_OBJS) clean: -rm -rf *.$(OBJ) $(ALL) core *.core *~ include/*~ install: install1 $(INSTALL2) install1: libgroupsock.$(LIB_SUFFIX) install -d $(DESTDIR)$(PREFIX)/include/groupsock $(DESTDIR)$(LIBDIR) install -m 644 include/*.hh include/*.h $(DESTDIR)$(PREFIX)/include/groupsock install -m 644 libgroupsock.$(LIB_SUFFIX) $(DESTDIR)$(LIBDIR) install_shared_libraries: libgroupsock.$(LIB_SUFFIX) ln -s libgroupsock.$(LIB_SUFFIX) $(DESTDIR)$(LIBDIR)/libgroupsock.$(SHORT_LIB_SUFFIX) ln -s libgroupsock.$(LIB_SUFFIX) $(DESTDIR)$(LIBDIR)/libgroupsock.so ##### Any additional, platform-specific rules come here: live/groupsock/GroupsockHelper.cpp000444 001751 000000 00000062017 12265042432 017601 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "mTunnel" multicast access service // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // Helper routines to implement 'group sockets' // Implementation #include "GroupsockHelper.hh" #if defined(__WIN32__) || defined(_WIN32) #include extern "C" int initializeWinsockIfNecessary(); #else #include #include #include #define initializeWinsockIfNecessary() 1 #endif #include // By default, use INADDR_ANY for the sending and receiving interfaces: netAddressBits SendingInterfaceAddr = INADDR_ANY; netAddressBits ReceivingInterfaceAddr = INADDR_ANY; static void socketErr(UsageEnvironment& env, char const* errorMsg) { env.setResultErrMsg(errorMsg); } NoReuse::NoReuse(UsageEnvironment& env) : fEnv(env) { groupsockPriv(fEnv)->reuseFlag = 0; } NoReuse::~NoReuse() { groupsockPriv(fEnv)->reuseFlag = 1; reclaimGroupsockPriv(fEnv); } _groupsockPriv* groupsockPriv(UsageEnvironment& env) { if (env.groupsockPriv == NULL) { // We need to create it _groupsockPriv* result = new _groupsockPriv; result->socketTable = NULL; result->reuseFlag = 1; // default value => allow reuse of socket numbers env.groupsockPriv = result; } return (_groupsockPriv*)(env.groupsockPriv); } void reclaimGroupsockPriv(UsageEnvironment& env) { _groupsockPriv* priv = (_groupsockPriv*)(env.groupsockPriv); if (priv->socketTable == NULL && priv->reuseFlag == 1/*default value*/) { // We can delete the structure (to save space); it will get created again, if needed: delete priv; env.groupsockPriv = NULL; } } static int createSocket(int type) { // Call "socket()" to create a (IPv4) socket of the specified type. // But also set it to have the 'close on exec' property (if we can) int sock; #ifdef SOCK_CLOEXEC sock = socket(AF_INET, type|SOCK_CLOEXEC, 0); if (sock != -1 || errno != EINVAL) return sock; // An "errno" of EINVAL likely means that the system wasn't happy with the SOCK_CLOEXEC; fall through and try again without it: #endif sock = socket(AF_INET, type, 0); #ifdef FD_CLOEXEC if (sock != -1) fcntl(sock, F_SETFD, FD_CLOEXEC); #endif return sock; } int setupDatagramSocket(UsageEnvironment& env, Port port) { if (!initializeWinsockIfNecessary()) { socketErr(env, "Failed to initialize 'winsock': "); return -1; } int newSocket = createSocket(SOCK_DGRAM); if (newSocket < 0) { socketErr(env, "unable to create datagram socket: "); return newSocket; } int reuseFlag = groupsockPriv(env)->reuseFlag; reclaimGroupsockPriv(env); if (setsockopt(newSocket, SOL_SOCKET, SO_REUSEADDR, (const char*)&reuseFlag, sizeof reuseFlag) < 0) { socketErr(env, "setsockopt(SO_REUSEADDR) error: "); closeSocket(newSocket); return -1; } #if defined(__WIN32__) || defined(_WIN32) // Windoze doesn't properly handle SO_REUSEPORT or IP_MULTICAST_LOOP #else #ifdef SO_REUSEPORT if (setsockopt(newSocket, SOL_SOCKET, SO_REUSEPORT, (const char*)&reuseFlag, sizeof reuseFlag) < 0) { socketErr(env, "setsockopt(SO_REUSEPORT) error: "); closeSocket(newSocket); return -1; } #endif #ifdef IP_MULTICAST_LOOP const u_int8_t loop = 1; if (setsockopt(newSocket, IPPROTO_IP, IP_MULTICAST_LOOP, (const char*)&loop, sizeof loop) < 0) { socketErr(env, "setsockopt(IP_MULTICAST_LOOP) error: "); closeSocket(newSocket); return -1; } #endif #endif // Note: Windoze requires binding, even if the port number is 0 netAddressBits addr = INADDR_ANY; #if defined(__WIN32__) || defined(_WIN32) #else if (port.num() != 0 || ReceivingInterfaceAddr != INADDR_ANY) { #endif if (port.num() == 0) addr = ReceivingInterfaceAddr; MAKE_SOCKADDR_IN(name, addr, port.num()); if (bind(newSocket, (struct sockaddr*)&name, sizeof name) != 0) { char tmpBuffer[100]; sprintf(tmpBuffer, "bind() error (port number: %d): ", ntohs(port.num())); socketErr(env, tmpBuffer); closeSocket(newSocket); return -1; } #if defined(__WIN32__) || defined(_WIN32) #else } #endif // Set the sending interface for multicasts, if it's not the default: if (SendingInterfaceAddr != INADDR_ANY) { struct in_addr addr; addr.s_addr = SendingInterfaceAddr; if (setsockopt(newSocket, IPPROTO_IP, IP_MULTICAST_IF, (const char*)&addr, sizeof addr) < 0) { socketErr(env, "error setting outgoing multicast interface: "); closeSocket(newSocket); return -1; } } return newSocket; } Boolean makeSocketNonBlocking(int sock) { #if defined(__WIN32__) || defined(_WIN32) unsigned long arg = 1; return ioctlsocket(sock, FIONBIO, &arg) == 0; #elif defined(VXWORKS) int arg = 1; return ioctl(sock, FIONBIO, (int)&arg) == 0; #else int curFlags = fcntl(sock, F_GETFL, 0); return fcntl(sock, F_SETFL, curFlags|O_NONBLOCK) >= 0; #endif } Boolean makeSocketBlocking(int sock) { #if defined(__WIN32__) || defined(_WIN32) unsigned long arg = 0; return ioctlsocket(sock, FIONBIO, &arg) == 0; #elif defined(VXWORKS) int arg = 0; return ioctl(sock, FIONBIO, (int)&arg) == 0; #else int curFlags = fcntl(sock, F_GETFL, 0); return fcntl(sock, F_SETFL, curFlags&(~O_NONBLOCK)) >= 0; #endif } int setupStreamSocket(UsageEnvironment& env, Port port, Boolean makeNonBlocking) { if (!initializeWinsockIfNecessary()) { socketErr(env, "Failed to initialize 'winsock': "); return -1; } int newSocket = createSocket(SOCK_STREAM); if (newSocket < 0) { socketErr(env, "unable to create stream socket: "); return newSocket; } int reuseFlag = groupsockPriv(env)->reuseFlag; reclaimGroupsockPriv(env); if (setsockopt(newSocket, SOL_SOCKET, SO_REUSEADDR, (const char*)&reuseFlag, sizeof reuseFlag) < 0) { socketErr(env, "setsockopt(SO_REUSEADDR) error: "); closeSocket(newSocket); return -1; } // SO_REUSEPORT doesn't really make sense for TCP sockets, so we // normally don't set them. However, if you really want to do this // #define REUSE_FOR_TCP #ifdef REUSE_FOR_TCP #if defined(__WIN32__) || defined(_WIN32) // Windoze doesn't properly handle SO_REUSEPORT #else #ifdef SO_REUSEPORT if (setsockopt(newSocket, SOL_SOCKET, SO_REUSEPORT, (const char*)&reuseFlag, sizeof reuseFlag) < 0) { socketErr(env, "setsockopt(SO_REUSEPORT) error: "); closeSocket(newSocket); return -1; } #endif #endif #endif // Note: Windoze requires binding, even if the port number is 0 #if defined(__WIN32__) || defined(_WIN32) #else if (port.num() != 0 || ReceivingInterfaceAddr != INADDR_ANY) { #endif MAKE_SOCKADDR_IN(name, ReceivingInterfaceAddr, port.num()); if (bind(newSocket, (struct sockaddr*)&name, sizeof name) != 0) { char tmpBuffer[100]; sprintf(tmpBuffer, "bind() error (port number: %d): ", ntohs(port.num())); socketErr(env, tmpBuffer); closeSocket(newSocket); return -1; } #if defined(__WIN32__) || defined(_WIN32) #else } #endif if (makeNonBlocking) { if (!makeSocketNonBlocking(newSocket)) { socketErr(env, "failed to make non-blocking: "); closeSocket(newSocket); return -1; } } return newSocket; } int readSocket(UsageEnvironment& env, int socket, unsigned char* buffer, unsigned bufferSize, struct sockaddr_in& fromAddress) { SOCKLEN_T addressSize = sizeof fromAddress; int bytesRead = recvfrom(socket, (char*)buffer, bufferSize, 0, (struct sockaddr*)&fromAddress, &addressSize); if (bytesRead < 0) { //##### HACK to work around bugs in Linux and Windows: int err = env.getErrno(); if (err == 111 /*ECONNREFUSED (Linux)*/ #if defined(__WIN32__) || defined(_WIN32) // What a piece of crap Windows is. Sometimes // recvfrom() returns -1, but with an 'errno' of 0. // This appears not to be a real error; just treat // it as if it were a read of zero bytes, and hope // we don't have to do anything else to 'reset' // this alleged error: || err == 0 || err == EWOULDBLOCK #else || err == EAGAIN #endif || err == 113 /*EHOSTUNREACH (Linux)*/) { // Why does Linux return this for datagram sock? fromAddress.sin_addr.s_addr = 0; return 0; } //##### END HACK socketErr(env, "recvfrom() error: "); } else if (bytesRead == 0) { // "recvfrom()" on a stream socket can return 0 if the remote end has closed the connection. Treat this as an error: return -1; } return bytesRead; } Boolean writeSocket(UsageEnvironment& env, int socket, struct in_addr address, Port port, u_int8_t ttlArg, unsigned char* buffer, unsigned bufferSize) { do { if (ttlArg != 0) { // Before sending, set the socket's TTL: #if defined(__WIN32__) || defined(_WIN32) #define TTL_TYPE int #else #define TTL_TYPE u_int8_t #endif TTL_TYPE ttl = (TTL_TYPE)ttlArg; if (setsockopt(socket, IPPROTO_IP, IP_MULTICAST_TTL, (const char*)&ttl, sizeof ttl) < 0) { socketErr(env, "setsockopt(IP_MULTICAST_TTL) error: "); break; } } MAKE_SOCKADDR_IN(dest, address.s_addr, port.num()); int bytesSent = sendto(socket, (char*)buffer, bufferSize, 0, (struct sockaddr*)&dest, sizeof dest); if (bytesSent != (int)bufferSize) { char tmpBuf[100]; sprintf(tmpBuf, "writeSocket(%d), sendTo() error: wrote %d bytes instead of %u: ", socket, bytesSent, bufferSize); socketErr(env, tmpBuf); break; } return True; } while (0); return False; } static unsigned getBufferSize(UsageEnvironment& env, int bufOptName, int socket) { unsigned curSize; SOCKLEN_T sizeSize = sizeof curSize; if (getsockopt(socket, SOL_SOCKET, bufOptName, (char*)&curSize, &sizeSize) < 0) { socketErr(env, "getBufferSize() error: "); return 0; } return curSize; } unsigned getSendBufferSize(UsageEnvironment& env, int socket) { return getBufferSize(env, SO_SNDBUF, socket); } unsigned getReceiveBufferSize(UsageEnvironment& env, int socket) { return getBufferSize(env, SO_RCVBUF, socket); } static unsigned setBufferTo(UsageEnvironment& env, int bufOptName, int socket, unsigned requestedSize) { SOCKLEN_T sizeSize = sizeof requestedSize; setsockopt(socket, SOL_SOCKET, bufOptName, (char*)&requestedSize, sizeSize); // Get and return the actual, resulting buffer size: return getBufferSize(env, bufOptName, socket); } unsigned setSendBufferTo(UsageEnvironment& env, int socket, unsigned requestedSize) { return setBufferTo(env, SO_SNDBUF, socket, requestedSize); } unsigned setReceiveBufferTo(UsageEnvironment& env, int socket, unsigned requestedSize) { return setBufferTo(env, SO_RCVBUF, socket, requestedSize); } static unsigned increaseBufferTo(UsageEnvironment& env, int bufOptName, int socket, unsigned requestedSize) { // First, get the current buffer size. If it's already at least // as big as what we're requesting, do nothing. unsigned curSize = getBufferSize(env, bufOptName, socket); // Next, try to increase the buffer to the requested size, // or to some smaller size, if that's not possible: while (requestedSize > curSize) { SOCKLEN_T sizeSize = sizeof requestedSize; if (setsockopt(socket, SOL_SOCKET, bufOptName, (char*)&requestedSize, sizeSize) >= 0) { // success return requestedSize; } requestedSize = (requestedSize+curSize)/2; } return getBufferSize(env, bufOptName, socket); } unsigned increaseSendBufferTo(UsageEnvironment& env, int socket, unsigned requestedSize) { return increaseBufferTo(env, SO_SNDBUF, socket, requestedSize); } unsigned increaseReceiveBufferTo(UsageEnvironment& env, int socket, unsigned requestedSize) { return increaseBufferTo(env, SO_RCVBUF, socket, requestedSize); } static void clearMulticastAllSocketOption(int socket) { #ifdef IP_MULTICAST_ALL // This option is defined in modern versions of Linux to overcome a bug in the Linux kernel's default behavior. // When set to 0, it ensures that we receive only packets that were sent to the specified IP multicast address, // even if some other process on the same system has joined a different multicast group with the same port number. int multicastAll = 0; (void)setsockopt(socket, IPPROTO_IP, IP_MULTICAST_ALL, (void*)&multicastAll, sizeof multicastAll); // Ignore the call's result. Should it fail, we'll still receive packets (just perhaps more than intended) #endif } Boolean socketJoinGroup(UsageEnvironment& env, int socket, netAddressBits groupAddress){ if (!IsMulticastAddress(groupAddress)) return True; // ignore this case struct ip_mreq imr; imr.imr_multiaddr.s_addr = groupAddress; imr.imr_interface.s_addr = ReceivingInterfaceAddr; if (setsockopt(socket, IPPROTO_IP, IP_ADD_MEMBERSHIP, (const char*)&imr, sizeof (struct ip_mreq)) < 0) { #if defined(__WIN32__) || defined(_WIN32) if (env.getErrno() != 0) { // That piece-of-shit toy operating system (Windows) sometimes lies // about setsockopt() failing! #endif socketErr(env, "setsockopt(IP_ADD_MEMBERSHIP) error: "); return False; #if defined(__WIN32__) || defined(_WIN32) } #endif } clearMulticastAllSocketOption(socket); return True; } Boolean socketLeaveGroup(UsageEnvironment&, int socket, netAddressBits groupAddress) { if (!IsMulticastAddress(groupAddress)) return True; // ignore this case struct ip_mreq imr; imr.imr_multiaddr.s_addr = groupAddress; imr.imr_interface.s_addr = ReceivingInterfaceAddr; if (setsockopt(socket, IPPROTO_IP, IP_DROP_MEMBERSHIP, (const char*)&imr, sizeof (struct ip_mreq)) < 0) { return False; } return True; } // The source-specific join/leave operations require special setsockopt() // commands, and a special structure (ip_mreq_source). If the include files // didn't define these, we do so here: #if !defined(IP_ADD_SOURCE_MEMBERSHIP) struct ip_mreq_source { struct in_addr imr_multiaddr; /* IP multicast address of group */ struct in_addr imr_sourceaddr; /* IP address of source */ struct in_addr imr_interface; /* local IP address of interface */ }; #endif #ifndef IP_ADD_SOURCE_MEMBERSHIP #ifdef LINUX #define IP_ADD_SOURCE_MEMBERSHIP 39 #define IP_DROP_SOURCE_MEMBERSHIP 40 #else #define IP_ADD_SOURCE_MEMBERSHIP 25 #define IP_DROP_SOURCE_MEMBERSHIP 26 #endif #endif Boolean socketJoinGroupSSM(UsageEnvironment& env, int socket, netAddressBits groupAddress, netAddressBits sourceFilterAddr) { if (!IsMulticastAddress(groupAddress)) return True; // ignore this case struct ip_mreq_source imr; #ifdef __ANDROID__ imr.imr_multiaddr = groupAddress; imr.imr_sourceaddr = sourceFilterAddr; imr.imr_interface = ReceivingInterfaceAddr; #else imr.imr_multiaddr.s_addr = groupAddress; imr.imr_sourceaddr.s_addr = sourceFilterAddr; imr.imr_interface.s_addr = ReceivingInterfaceAddr; #endif if (setsockopt(socket, IPPROTO_IP, IP_ADD_SOURCE_MEMBERSHIP, (const char*)&imr, sizeof (struct ip_mreq_source)) < 0) { socketErr(env, "setsockopt(IP_ADD_SOURCE_MEMBERSHIP) error: "); return False; } clearMulticastAllSocketOption(socket); return True; } Boolean socketLeaveGroupSSM(UsageEnvironment& /*env*/, int socket, netAddressBits groupAddress, netAddressBits sourceFilterAddr) { if (!IsMulticastAddress(groupAddress)) return True; // ignore this case struct ip_mreq_source imr; #ifdef __ANDROID__ imr.imr_multiaddr = groupAddress; imr.imr_sourceaddr = sourceFilterAddr; imr.imr_interface = ReceivingInterfaceAddr; #else imr.imr_multiaddr.s_addr = groupAddress; imr.imr_sourceaddr.s_addr = sourceFilterAddr; imr.imr_interface.s_addr = ReceivingInterfaceAddr; #endif if (setsockopt(socket, IPPROTO_IP, IP_DROP_SOURCE_MEMBERSHIP, (const char*)&imr, sizeof (struct ip_mreq_source)) < 0) { return False; } return True; } static Boolean getSourcePort0(int socket, portNumBits& resultPortNum/*host order*/) { sockaddr_in test; test.sin_port = 0; SOCKLEN_T len = sizeof test; if (getsockname(socket, (struct sockaddr*)&test, &len) < 0) return False; resultPortNum = ntohs(test.sin_port); return True; } Boolean getSourcePort(UsageEnvironment& env, int socket, Port& port) { portNumBits portNum = 0; if (!getSourcePort0(socket, portNum) || portNum == 0) { // Hack - call bind(), then try again: MAKE_SOCKADDR_IN(name, INADDR_ANY, 0); bind(socket, (struct sockaddr*)&name, sizeof name); if (!getSourcePort0(socket, portNum) || portNum == 0) { socketErr(env, "getsockname() error: "); return False; } } port = Port(portNum); return True; } static Boolean badAddressForUs(netAddressBits addr) { // Check for some possible erroneous addresses: netAddressBits nAddr = htonl(addr); return (nAddr == 0x7F000001 /* 127.0.0.1 */ || nAddr == 0 || nAddr == (netAddressBits)(~0)); } Boolean loopbackWorks = 1; netAddressBits ourIPAddress(UsageEnvironment& env) { static netAddressBits ourAddress = 0; int sock = -1; struct in_addr testAddr; if (ReceivingInterfaceAddr != INADDR_ANY) { // Hack: If we were told to receive on a specific interface address, then // define this to be our ip address: ourAddress = ReceivingInterfaceAddr; } if (ourAddress == 0) { // We need to find our source address struct sockaddr_in fromAddr; fromAddr.sin_addr.s_addr = 0; // Get our address by sending a (0-TTL) multicast packet, // receiving it, and looking at the source address used. // (This is kinda bogus, but it provides the best guarantee // that other nodes will think our address is the same as we do.) do { loopbackWorks = 0; // until we learn otherwise testAddr.s_addr = our_inet_addr("228.67.43.91"); // arbitrary Port testPort(15947); // ditto sock = setupDatagramSocket(env, testPort); if (sock < 0) break; if (!socketJoinGroup(env, sock, testAddr.s_addr)) break; unsigned char testString[] = "hostIdTest"; unsigned testStringLength = sizeof testString; if (!writeSocket(env, sock, testAddr, testPort, 0, testString, testStringLength)) break; // Block until the socket is readable (with a 5-second timeout): fd_set rd_set; FD_ZERO(&rd_set); FD_SET((unsigned)sock, &rd_set); const unsigned numFds = sock+1; struct timeval timeout; timeout.tv_sec = 5; timeout.tv_usec = 0; int result = select(numFds, &rd_set, NULL, NULL, &timeout); if (result <= 0) break; unsigned char readBuffer[20]; int bytesRead = readSocket(env, sock, readBuffer, sizeof readBuffer, fromAddr); if (bytesRead != (int)testStringLength || strncmp((char*)readBuffer, (char*)testString, testStringLength) != 0) { break; } // We use this packet's source address, if it's good: loopbackWorks = !badAddressForUs(fromAddr.sin_addr.s_addr); } while (0); if (sock >= 0) { socketLeaveGroup(env, sock, testAddr.s_addr); closeSocket(sock); } if (!loopbackWorks) do { // We couldn't find our address using multicast loopback, // so try instead to look it up directly - by first getting our host name, and then resolving this host name char hostname[100]; hostname[0] = '\0'; int result = gethostname(hostname, sizeof hostname); if (result != 0 || hostname[0] == '\0') { env.setResultErrMsg("initial gethostname() failed"); break; } // Try to resolve "hostname" to an IP address: NetAddressList addresses(hostname); NetAddressList::Iterator iter(addresses); NetAddress const* address; // Take the first address that's not bad: netAddressBits addr = 0; while ((address = iter.nextAddress()) != NULL) { netAddressBits a = *(netAddressBits*)(address->data()); if (!badAddressForUs(a)) { addr = a; break; } } // Assign the address that we found to "fromAddr" (as if the 'loopback' method had worked), to simplify the code below: fromAddr.sin_addr.s_addr = addr; } while (0); // Make sure we have a good address: netAddressBits from = fromAddr.sin_addr.s_addr; if (badAddressForUs(from)) { char tmp[100]; sprintf(tmp, "This computer has an invalid IP address: %s", AddressString(from).val()); env.setResultMsg(tmp); from = 0; } ourAddress = from; // Use our newly-discovered IP address, and the current time, // to initialize the random number generator's seed: struct timeval timeNow; gettimeofday(&timeNow, NULL); unsigned seed = ourAddress^timeNow.tv_sec^timeNow.tv_usec; our_srandom(seed); } return ourAddress; } netAddressBits chooseRandomIPv4SSMAddress(UsageEnvironment& env) { // First, a hack to ensure that our random number generator is seeded: (void) ourIPAddress(env); // Choose a random address in the range [232.0.1.0, 232.255.255.255) // i.e., [0xE8000100, 0xE8FFFFFF) netAddressBits const first = 0xE8000100, lastPlus1 = 0xE8FFFFFF; netAddressBits const range = lastPlus1 - first; return ntohl(first + ((netAddressBits)our_random())%range); } char const* timestampString() { struct timeval tvNow; gettimeofday(&tvNow, NULL); #if !defined(_WIN32_WCE) static char timeString[9]; // holds hh:mm:ss plus trailing '\0' char const* ctimeResult = ctime((time_t*)&tvNow.tv_sec); if (ctimeResult == NULL) { sprintf(timeString, "??:??:??"); } else { char const* from = &ctimeResult[11]; int i; for (i = 0; i < 8; ++i) { timeString[i] = from[i]; } timeString[i] = '\0'; } #else // WinCE apparently doesn't have "ctime()", so instead, construct // a timestamp string just using the integer and fractional parts // of "tvNow": static char timeString[50]; sprintf(timeString, "%lu.%06ld", tvNow.tv_sec, tvNow.tv_usec); #endif return (char const*)&timeString; } #if defined(__WIN32__) || defined(_WIN32) // For Windoze, we need to implement our own gettimeofday() // used to make sure that static variables in gettimeofday() aren't initialized simultaneously by multiple threads static LONG initializeLock_gettimeofday = 0; #if !defined(_WIN32_WCE) #include #endif int gettimeofday(struct timeval* tp, int* /*tz*/) { static LARGE_INTEGER tickFrequency, epochOffset; static Boolean isInitialized = False; LARGE_INTEGER tickNow; #if !defined(_WIN32_WCE) QueryPerformanceCounter(&tickNow); #else tickNow.QuadPart = GetTickCount(); #endif if (!isInitialized) { if(1 == InterlockedIncrement(&initializeLock_gettimeofday)) { #if !defined(_WIN32_WCE) // For our first call, use "ftime()", so that we get a time with a proper epoch. // For subsequent calls, use "QueryPerformanceCount()", because it's more fine-grain. struct timeb tb; ftime(&tb); tp->tv_sec = tb.time; tp->tv_usec = 1000*tb.millitm; // Also get our counter frequency: QueryPerformanceFrequency(&tickFrequency); #else /* FILETIME of Jan 1 1970 00:00:00. */ const LONGLONG epoch = 116444736000000000LL; FILETIME fileTime; LARGE_INTEGER time; GetSystemTimeAsFileTime(&fileTime); time.HighPart = fileTime.dwHighDateTime; time.LowPart = fileTime.dwLowDateTime; // convert to from 100ns time to unix timestamp in seconds, 1000*1000*10 tp->tv_sec = (long)((time.QuadPart - epoch) / 10000000L); /* GetSystemTimeAsFileTime has just a seconds resolution, thats why wince-version of gettimeofday is not 100% accurate, usec accuracy would be calculated like this: // convert 100 nanoseconds to usec tp->tv_usec= (long)((time.QuadPart - epoch)%10000000L) / 10L; */ tp->tv_usec = 0; // resolution of GetTickCounter() is always milliseconds tickFrequency.QuadPart = 1000; #endif // compute an offset to add to subsequent counter times, so we get a proper epoch: epochOffset.QuadPart = tp->tv_sec * tickFrequency.QuadPart + (tp->tv_usec * tickFrequency.QuadPart) / 1000000L - tickNow.QuadPart; // next caller can use ticks for time calculation isInitialized = True; return 0; } else { InterlockedDecrement(&initializeLock_gettimeofday); // wait until first caller has initialized static values while(!isInitialized){ Sleep(1); } } } // adjust our tick count so that we get a proper epoch: tickNow.QuadPart += epochOffset.QuadPart; tp->tv_sec = (long)(tickNow.QuadPart / tickFrequency.QuadPart); tp->tv_usec = (long)(((tickNow.QuadPart % tickFrequency.QuadPart) * 1000000L) / tickFrequency.QuadPart); return 0; } #endif live/groupsock/Makefile.head000440 001751 000000 00000000217 12265042432 016307 0ustar00rsfwheel000000 000000 INCLUDES = -Iinclude -I../UsageEnvironment/include PREFIX = /usr/local LIBDIR = $(PREFIX)/lib ##### Change the following for your environment: live/groupsock/Groupsock.cpp000444 001751 000000 00000045321 12265042432 016440 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // 'Group sockets' // Implementation #include "Groupsock.hh" #include "GroupsockHelper.hh" //##### Eventually fix the following #include; we shouldn't know about tunnels #include "TunnelEncaps.hh" #ifndef NO_SSTREAM #include #endif #include ///////// OutputSocket ////////// OutputSocket::OutputSocket(UsageEnvironment& env) : Socket(env, 0 /* let kernel choose port */), fSourcePort(0), fLastSentTTL(0) { } OutputSocket::OutputSocket(UsageEnvironment& env, Port port) : Socket(env, port), fSourcePort(0), fLastSentTTL(0) { } OutputSocket::~OutputSocket() { } Boolean OutputSocket::write(netAddressBits address, Port port, u_int8_t ttl, unsigned char* buffer, unsigned bufferSize) { if (ttl == fLastSentTTL) { // Optimization: So we don't do a 'set TTL' system call again ttl = 0; } else { fLastSentTTL = ttl; } struct in_addr destAddr; destAddr.s_addr = address; if (!writeSocket(env(), socketNum(), destAddr, port, ttl, buffer, bufferSize)) return False; if (sourcePortNum() == 0) { // Now that we've sent a packet, we can find out what the // kernel chose as our ephemeral source port number: if (!getSourcePort(env(), socketNum(), fSourcePort)) { if (DebugLevel >= 1) env() << *this << ": failed to get source port: " << env().getResultMsg() << "\n"; return False; } } return True; } // By default, we don't do reads: Boolean OutputSocket ::handleRead(unsigned char* /*buffer*/, unsigned /*bufferMaxSize*/, unsigned& /*bytesRead*/, struct sockaddr_in& /*fromAddress*/) { return True; } ///////// destRecord ////////// destRecord ::destRecord(struct in_addr const& addr, Port const& port, u_int8_t ttl, destRecord* next) : fNext(next), fGroupEId(addr, port.num(), ttl), fPort(port) { } destRecord::~destRecord() { delete fNext; } ///////// Groupsock ////////// NetInterfaceTrafficStats Groupsock::statsIncoming; NetInterfaceTrafficStats Groupsock::statsOutgoing; NetInterfaceTrafficStats Groupsock::statsRelayedIncoming; NetInterfaceTrafficStats Groupsock::statsRelayedOutgoing; // Constructor for a source-independent multicast group Groupsock::Groupsock(UsageEnvironment& env, struct in_addr const& groupAddr, Port port, u_int8_t ttl) : OutputSocket(env, port), deleteIfNoMembers(False), isSlave(False), fIncomingGroupEId(groupAddr, port.num(), ttl), fDests(NULL), fTTL(ttl) { addDestination(groupAddr, port); if (!socketJoinGroup(env, socketNum(), groupAddr.s_addr)) { if (DebugLevel >= 1) { env << *this << ": failed to join group: " << env.getResultMsg() << "\n"; } } // Make sure we can get our source address: if (ourIPAddress(env) == 0) { if (DebugLevel >= 0) { // this is a fatal error env << "Unable to determine our source address: " << env.getResultMsg() << "\n"; } } if (DebugLevel >= 2) env << *this << ": created\n"; } // Constructor for a source-specific multicast group Groupsock::Groupsock(UsageEnvironment& env, struct in_addr const& groupAddr, struct in_addr const& sourceFilterAddr, Port port) : OutputSocket(env, port), deleteIfNoMembers(False), isSlave(False), fIncomingGroupEId(groupAddr, sourceFilterAddr, port.num()), fDests(NULL), fTTL(255) { addDestination(groupAddr, port); // First try a SSM join. If that fails, try a regular join: if (!socketJoinGroupSSM(env, socketNum(), groupAddr.s_addr, sourceFilterAddr.s_addr)) { if (DebugLevel >= 3) { env << *this << ": SSM join failed: " << env.getResultMsg(); env << " - trying regular join instead\n"; } if (!socketJoinGroup(env, socketNum(), groupAddr.s_addr)) { if (DebugLevel >= 1) { env << *this << ": failed to join group: " << env.getResultMsg() << "\n"; } } } if (DebugLevel >= 2) env << *this << ": created\n"; } Groupsock::~Groupsock() { if (isSSM()) { if (!socketLeaveGroupSSM(env(), socketNum(), groupAddress().s_addr, sourceFilterAddress().s_addr)) { socketLeaveGroup(env(), socketNum(), groupAddress().s_addr); } } else { socketLeaveGroup(env(), socketNum(), groupAddress().s_addr); } delete fDests; if (DebugLevel >= 2) env() << *this << ": deleting\n"; } void Groupsock::changeDestinationParameters(struct in_addr const& newDestAddr, Port newDestPort, int newDestTTL) { if (fDests == NULL) return; struct in_addr destAddr = fDests->fGroupEId.groupAddress(); if (newDestAddr.s_addr != 0) { if (newDestAddr.s_addr != destAddr.s_addr && IsMulticastAddress(newDestAddr.s_addr)) { // If the new destination is a multicast address, then we assume that // we want to join it also. (If this is not in fact the case, then // call "multicastSendOnly()" afterwards.) socketLeaveGroup(env(), socketNum(), destAddr.s_addr); socketJoinGroup(env(), socketNum(), newDestAddr.s_addr); } destAddr.s_addr = newDestAddr.s_addr; } portNumBits destPortNum = fDests->fGroupEId.portNum(); if (newDestPort.num() != 0) { if (newDestPort.num() != destPortNum && IsMulticastAddress(destAddr.s_addr)) { // Also bind to the new port number: changePort(newDestPort); // And rejoin the multicast group: socketJoinGroup(env(), socketNum(), destAddr.s_addr); } destPortNum = newDestPort.num(); fDests->fPort = newDestPort; } u_int8_t destTTL = ttl(); if (newDestTTL != ~0) destTTL = (u_int8_t)newDestTTL; fDests->fGroupEId = GroupEId(destAddr, destPortNum, destTTL); } void Groupsock::addDestination(struct in_addr const& addr, Port const& port) { // Check whether this destination is already known: for (destRecord* dests = fDests; dests != NULL; dests = dests->fNext) { if (addr.s_addr == dests->fGroupEId.groupAddress().s_addr && port.num() == dests->fPort.num()) { return; } } fDests = new destRecord(addr, port, ttl(), fDests); } void Groupsock::removeDestination(struct in_addr const& addr, Port const& port) { for (destRecord** destsPtr = &fDests; *destsPtr != NULL; destsPtr = &((*destsPtr)->fNext)) { if (addr.s_addr == (*destsPtr)->fGroupEId.groupAddress().s_addr && port.num() == (*destsPtr)->fPort.num()) { // Remove the record pointed to by *destsPtr : destRecord* next = (*destsPtr)->fNext; (*destsPtr)->fNext = NULL; delete (*destsPtr); *destsPtr = next; return; } } } void Groupsock::removeAllDestinations() { delete fDests; fDests = NULL; } void Groupsock::multicastSendOnly() { // We disable this code for now, because - on some systems - leaving the multicast group seems to cause sent packets // to not be received by other applications (at least, on the same host). #if 0 socketLeaveGroup(env(), socketNum(), fIncomingGroupEId.groupAddress().s_addr); for (destRecord* dests = fDests; dests != NULL; dests = dests->fNext) { socketLeaveGroup(env(), socketNum(), dests->fGroupEId.groupAddress().s_addr); } #endif } Boolean Groupsock::output(UsageEnvironment& env, u_int8_t ttlToSend, unsigned char* buffer, unsigned bufferSize, DirectedNetInterface* interfaceNotToFwdBackTo) { do { // First, do the datagram send, to each destination: Boolean writeSuccess = True; for (destRecord* dests = fDests; dests != NULL; dests = dests->fNext) { if (!write(dests->fGroupEId.groupAddress().s_addr, dests->fPort, ttlToSend, buffer, bufferSize)) { writeSuccess = False; break; } } if (!writeSuccess) break; statsOutgoing.countPacket(bufferSize); statsGroupOutgoing.countPacket(bufferSize); // Then, forward to our members: int numMembers = 0; if (!members().IsEmpty()) { numMembers = outputToAllMembersExcept(interfaceNotToFwdBackTo, ttlToSend, buffer, bufferSize, ourIPAddress(env)); if (numMembers < 0) break; } if (DebugLevel >= 3) { env << *this << ": wrote " << bufferSize << " bytes, ttl " << (unsigned)ttlToSend; if (numMembers > 0) { env << "; relayed to " << numMembers << " members"; } env << "\n"; } return True; } while (0); if (DebugLevel >= 0) { // this is a fatal error env.setResultMsg("Groupsock write failed: ", env.getResultMsg()); } return False; } Boolean Groupsock::handleRead(unsigned char* buffer, unsigned bufferMaxSize, unsigned& bytesRead, struct sockaddr_in& fromAddress) { // Read data from the socket, and relay it across any attached tunnels //##### later make this code more general - independent of tunnels bytesRead = 0; int maxBytesToRead = bufferMaxSize - TunnelEncapsulationTrailerMaxSize; int numBytes = readSocket(env(), socketNum(), buffer, maxBytesToRead, fromAddress); if (numBytes < 0) { if (DebugLevel >= 0) { // this is a fatal error env().setResultMsg("Groupsock read failed: ", env().getResultMsg()); } return False; } // If we're a SSM group, make sure the source address matches: if (isSSM() && fromAddress.sin_addr.s_addr != sourceFilterAddress().s_addr) { return True; } // We'll handle this data. // Also write it (with the encapsulation trailer) to each member, // unless the packet was originally sent by us to begin with. bytesRead = numBytes; int numMembers = 0; if (!wasLoopedBackFromUs(env(), fromAddress)) { statsIncoming.countPacket(numBytes); statsGroupIncoming.countPacket(numBytes); numMembers = outputToAllMembersExcept(NULL, ttl(), buffer, bytesRead, fromAddress.sin_addr.s_addr); if (numMembers > 0) { statsRelayedIncoming.countPacket(numBytes); statsGroupRelayedIncoming.countPacket(numBytes); } } if (DebugLevel >= 3) { env() << *this << ": read " << bytesRead << " bytes from " << AddressString(fromAddress).val(); if (numMembers > 0) { env() << "; relayed to " << numMembers << " members"; } env() << "\n"; } return True; } Boolean Groupsock::wasLoopedBackFromUs(UsageEnvironment& env, struct sockaddr_in& fromAddress) { if (fromAddress.sin_addr.s_addr == ourIPAddress(env)) { if (fromAddress.sin_port == sourcePortNum()) { #ifdef DEBUG_LOOPBACK_CHECKING if (DebugLevel >= 3) { env() << *this << ": got looped-back packet\n"; } #endif return True; } } return False; } int Groupsock::outputToAllMembersExcept(DirectedNetInterface* exceptInterface, u_int8_t ttlToFwd, unsigned char* data, unsigned size, netAddressBits sourceAddr) { // Don't forward TTL-0 packets if (ttlToFwd == 0) return 0; DirectedNetInterfaceSet::Iterator iter(members()); unsigned numMembers = 0; DirectedNetInterface* interf; while ((interf = iter.next()) != NULL) { // Check whether we've asked to exclude this interface: if (interf == exceptInterface) continue; // Check that the packet's source address makes it OK to // be relayed across this interface: UsageEnvironment& saveEnv = env(); // because the following call may delete "this" if (!interf->SourceAddrOKForRelaying(saveEnv, sourceAddr)) { if (strcmp(saveEnv.getResultMsg(), "") != 0) { // Treat this as a fatal error return -1; } else { continue; } } if (numMembers == 0) { // We know that we're going to forward to at least one // member, so fill in the tunnel encapsulation trailer. // (Note: Allow for it not being 4-byte-aligned.) TunnelEncapsulationTrailer* trailerInPacket = (TunnelEncapsulationTrailer*)&data[size]; TunnelEncapsulationTrailer* trailer; Boolean misaligned = ((uintptr_t)trailerInPacket & 3) != 0; unsigned trailerOffset; u_int8_t tunnelCmd; if (isSSM()) { // add an 'auxilliary address' before the trailer trailerOffset = TunnelEncapsulationTrailerAuxSize; tunnelCmd = TunnelDataAuxCmd; } else { trailerOffset = 0; tunnelCmd = TunnelDataCmd; } unsigned trailerSize = TunnelEncapsulationTrailerSize + trailerOffset; unsigned tmpTr[TunnelEncapsulationTrailerMaxSize]; if (misaligned) { trailer = (TunnelEncapsulationTrailer*)&tmpTr; } else { trailer = trailerInPacket; } trailer += trailerOffset; if (fDests != NULL) { trailer->address() = fDests->fGroupEId.groupAddress().s_addr; trailer->port() = fDests->fPort; // structure copy, outputs in network order } trailer->ttl() = ttlToFwd; trailer->command() = tunnelCmd; if (isSSM()) { trailer->auxAddress() = sourceFilterAddress().s_addr; } if (misaligned) { memmove(trailerInPacket, trailer-trailerOffset, trailerSize); } size += trailerSize; } interf->write(data, size); ++numMembers; } return numMembers; } UsageEnvironment& operator<<(UsageEnvironment& s, const Groupsock& g) { UsageEnvironment& s1 = s << timestampString() << " Groupsock(" << g.socketNum() << ": " << AddressString(g.groupAddress()).val() << ", " << g.port() << ", "; if (g.isSSM()) { return s1 << "SSM source: " << AddressString(g.sourceFilterAddress()).val() << ")"; } else { return s1 << (unsigned)(g.ttl()) << ")"; } } ////////// GroupsockLookupTable ////////// // A hash table used to index Groupsocks by socket number. static HashTable*& getSocketTable(UsageEnvironment& env) { _groupsockPriv* priv = groupsockPriv(env); if (priv->socketTable == NULL) { // We need to create it priv->socketTable = HashTable::create(ONE_WORD_HASH_KEYS); } return priv->socketTable; } static Boolean unsetGroupsockBySocket(Groupsock const* groupsock) { do { if (groupsock == NULL) break; int sock = groupsock->socketNum(); // Make sure "sock" is in bounds: if (sock < 0) break; HashTable*& sockets = getSocketTable(groupsock->env()); Groupsock* gs = (Groupsock*)sockets->Lookup((char*)(long)sock); if (gs == NULL || gs != groupsock) break; sockets->Remove((char*)(long)sock); if (sockets->IsEmpty()) { // We can also delete the table (to reclaim space): delete sockets; sockets = NULL; reclaimGroupsockPriv(gs->env()); } return True; } while (0); return False; } static Boolean setGroupsockBySocket(UsageEnvironment& env, int sock, Groupsock* groupsock) { do { // Make sure the "sock" parameter is in bounds: if (sock < 0) { char buf[100]; sprintf(buf, "trying to use bad socket (%d)", sock); env.setResultMsg(buf); break; } HashTable* sockets = getSocketTable(env); // Make sure we're not replacing an existing Groupsock (although that shouldn't happen) Boolean alreadyExists = (sockets->Lookup((char*)(long)sock) != 0); if (alreadyExists) { char buf[100]; sprintf(buf, "Attempting to replace an existing socket (%d", sock); env.setResultMsg(buf); break; } sockets->Add((char*)(long)sock, groupsock); return True; } while (0); return False; } static Groupsock* getGroupsockBySocket(UsageEnvironment& env, int sock) { do { // Make sure the "sock" parameter is in bounds: if (sock < 0) break; HashTable* sockets = getSocketTable(env); return (Groupsock*)sockets->Lookup((char*)(long)sock); } while (0); return NULL; } Groupsock* GroupsockLookupTable::Fetch(UsageEnvironment& env, netAddressBits groupAddress, Port port, u_int8_t ttl, Boolean& isNew) { isNew = False; Groupsock* groupsock; do { groupsock = (Groupsock*) fTable.Lookup(groupAddress, (~0), port); if (groupsock == NULL) { // we need to create one: groupsock = AddNew(env, groupAddress, (~0), port, ttl); if (groupsock == NULL) break; isNew = True; } } while (0); return groupsock; } Groupsock* GroupsockLookupTable::Fetch(UsageEnvironment& env, netAddressBits groupAddress, netAddressBits sourceFilterAddr, Port port, Boolean& isNew) { isNew = False; Groupsock* groupsock; do { groupsock = (Groupsock*) fTable.Lookup(groupAddress, sourceFilterAddr, port); if (groupsock == NULL) { // we need to create one: groupsock = AddNew(env, groupAddress, sourceFilterAddr, port, 0); if (groupsock == NULL) break; isNew = True; } } while (0); return groupsock; } Groupsock* GroupsockLookupTable::Lookup(netAddressBits groupAddress, Port port) { return (Groupsock*) fTable.Lookup(groupAddress, (~0), port); } Groupsock* GroupsockLookupTable::Lookup(netAddressBits groupAddress, netAddressBits sourceFilterAddr, Port port) { return (Groupsock*) fTable.Lookup(groupAddress, sourceFilterAddr, port); } Groupsock* GroupsockLookupTable::Lookup(UsageEnvironment& env, int sock) { return getGroupsockBySocket(env, sock); } Boolean GroupsockLookupTable::Remove(Groupsock const* groupsock) { unsetGroupsockBySocket(groupsock); return fTable.Remove(groupsock->groupAddress().s_addr, groupsock->sourceFilterAddress().s_addr, groupsock->port()); } Groupsock* GroupsockLookupTable::AddNew(UsageEnvironment& env, netAddressBits groupAddress, netAddressBits sourceFilterAddress, Port port, u_int8_t ttl) { Groupsock* groupsock; do { struct in_addr groupAddr; groupAddr.s_addr = groupAddress; if (sourceFilterAddress == netAddressBits(~0)) { // regular, ISM groupsock groupsock = new Groupsock(env, groupAddr, port, ttl); } else { // SSM groupsock struct in_addr sourceFilterAddr; sourceFilterAddr.s_addr = sourceFilterAddress; groupsock = new Groupsock(env, groupAddr, sourceFilterAddr, port); } if (groupsock == NULL || groupsock->socketNum() < 0) break; if (!setGroupsockBySocket(env, groupsock->socketNum(), groupsock)) break; fTable.Add(groupAddress, sourceFilterAddress, port, (void*)groupsock); } while (0); return groupsock; } GroupsockLookupTable::Iterator::Iterator(GroupsockLookupTable& groupsocks) : fIter(AddressPortLookupTable::Iterator(groupsocks.fTable)) { } Groupsock* GroupsockLookupTable::Iterator::next() { return (Groupsock*) fIter.next(); }; live/groupsock/GroupEId.cpp000444 001751 000000 00000005507 12265042432 016144 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // Copyright (c) 1996-2014, Live Networks, Inc. All rights reserved // "Group Endpoint Id" // Implementation #include "GroupEId.hh" #include "strDup.hh" #include ////////// Scope ////////// void Scope::assign(u_int8_t ttl, const char* publicKey) { fTTL = ttl; fPublicKey = strDup(publicKey == NULL ? "nokey" : publicKey); } void Scope::clean() { delete[] fPublicKey; fPublicKey = NULL; } Scope::Scope(u_int8_t ttl, const char* publicKey) { assign(ttl, publicKey); } Scope::Scope(const Scope& orig) { assign(orig.ttl(), orig.publicKey()); } Scope& Scope::operator=(const Scope& rightSide) { if (&rightSide != this) { if (publicKey() == NULL || strcmp(publicKey(), rightSide.publicKey()) != 0) { clean(); assign(rightSide.ttl(), rightSide.publicKey()); } else { // need to assign TTL only fTTL = rightSide.ttl(); } } return *this; } Scope::~Scope() { clean(); } unsigned Scope::publicKeySize() const { return fPublicKey == NULL ? 0 : strlen(fPublicKey); } ////////// GroupEId ////////// GroupEId::GroupEId(struct in_addr const& groupAddr, portNumBits portNum, Scope const& scope, unsigned numSuccessiveGroupAddrs) { struct in_addr sourceFilterAddr; sourceFilterAddr.s_addr = ~0; // indicates no source filter init(groupAddr, sourceFilterAddr, portNum, scope, numSuccessiveGroupAddrs); } GroupEId::GroupEId(struct in_addr const& groupAddr, struct in_addr const& sourceFilterAddr, portNumBits portNum, unsigned numSuccessiveGroupAddrs) { init(groupAddr, sourceFilterAddr, portNum, 255, numSuccessiveGroupAddrs); } GroupEId::GroupEId() { } Boolean GroupEId::isSSM() const { return fSourceFilterAddress.s_addr != netAddressBits(~0); } void GroupEId::init(struct in_addr const& groupAddr, struct in_addr const& sourceFilterAddr, portNumBits portNum, Scope const& scope, unsigned numSuccessiveGroupAddrs) { fGroupAddress = groupAddr; fSourceFilterAddress = sourceFilterAddr; fNumSuccessiveGroupAddrs = numSuccessiveGroupAddrs; fPortNum = portNum; fScope = scope; } live/groupsock/NetAddress.cpp000444 001751 000000 00000021220 12265042432 016510 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "mTunnel" multicast access service // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // Network Addresses // Implementation #include "NetAddress.hh" #include "GroupsockHelper.hh" #include #include #if defined(__WIN32__) || defined(_WIN32) #define USE_GETHOSTBYNAME 1 /*because at least some Windows don't have getaddrinfo()*/ #else #ifndef INADDR_NONE #define INADDR_NONE 0xFFFFFFFF #endif #endif ////////// NetAddress ////////// NetAddress::NetAddress(u_int8_t const* data, unsigned length) { assign(data, length); } NetAddress::NetAddress(unsigned length) { fData = new u_int8_t[length]; if (fData == NULL) { fLength = 0; return; } for (unsigned i = 0; i < length; ++i) fData[i] = 0; fLength = length; } NetAddress::NetAddress(NetAddress const& orig) { assign(orig.data(), orig.length()); } NetAddress& NetAddress::operator=(NetAddress const& rightSide) { if (&rightSide != this) { clean(); assign(rightSide.data(), rightSide.length()); } return *this; } NetAddress::~NetAddress() { clean(); } void NetAddress::assign(u_int8_t const* data, unsigned length) { fData = new u_int8_t[length]; if (fData == NULL) { fLength = 0; return; } for (unsigned i = 0; i < length; ++i) fData[i] = data[i]; fLength = length; } void NetAddress::clean() { delete[] fData; fData = NULL; fLength = 0; } ////////// NetAddressList ////////// NetAddressList::NetAddressList(char const* hostname) : fNumAddresses(0), fAddressArray(NULL) { // First, check whether "hostname" is an IP address string: netAddressBits addr = our_inet_addr((char*)hostname); if (addr != INADDR_NONE) { // Yes, it was an IP address string. Return a 1-element list with this address: fNumAddresses = 1; fAddressArray = new NetAddress*[fNumAddresses]; if (fAddressArray == NULL) return; fAddressArray[0] = new NetAddress((u_int8_t*)&addr, sizeof (netAddressBits)); return; } // "hostname" is not an IP address string; try resolving it as a real host name instead: #if defined(USE_GETHOSTBYNAME) || defined(VXWORKS) struct hostent* host; #if defined(VXWORKS) char hostentBuf[512]; host = (struct hostent*)resolvGetHostByName((char*)hostname, (char*)&hostentBuf, sizeof hostentBuf); #else host = gethostbyname((char*)hostname); #endif if (host == NULL || host->h_length != 4 || host->h_addr_list == NULL) return; // no luck u_int8_t const** const hAddrPtr = (u_int8_t const**)host->h_addr_list; // First, count the number of addresses: u_int8_t const** hAddrPtr1 = hAddrPtr; while (*hAddrPtr1 != NULL) { ++fNumAddresses; ++hAddrPtr1; } // Next, set up the list: fAddressArray = new NetAddress*[fNumAddresses]; if (fAddressArray == NULL) return; for (unsigned i = 0; i < fNumAddresses; ++i) { fAddressArray[i] = new NetAddress(hAddrPtr[i], host->h_length); } #else // Use "getaddrinfo()" (rather than the older, deprecated "gethostbyname()"): struct addrinfo addrinfoHints; memset(&addrinfoHints, 0, sizeof addrinfoHints); addrinfoHints.ai_family = AF_INET; // For now, we're interested in IPv4 addresses only struct addrinfo* addrinfoResultPtr = NULL; int result = getaddrinfo(hostname, NULL, &addrinfoHints, &addrinfoResultPtr); if (result != 0 || addrinfoResultPtr == NULL) return; // no luck // First, count the number of addresses: const struct addrinfo* p = addrinfoResultPtr; while (p != NULL) { if (p->ai_addrlen < 4) continue; // sanity check: skip over addresses that are too small ++fNumAddresses; p = p->ai_next; } // Next, set up the list: fAddressArray = new NetAddress*[fNumAddresses]; if (fAddressArray == NULL) return; unsigned i = 0; p = addrinfoResultPtr; while (p != NULL) { if (p->ai_addrlen < 4) continue; fAddressArray[i++] = new NetAddress((u_int8_t const*)&(((struct sockaddr_in*)p->ai_addr)->sin_addr.s_addr), 4); p = p->ai_next; } // Finally, free the data that we had allocated by calling "getaddrinfo()": freeaddrinfo(addrinfoResultPtr); #endif } NetAddressList::NetAddressList(NetAddressList const& orig) { assign(orig.numAddresses(), orig.fAddressArray); } NetAddressList& NetAddressList::operator=(NetAddressList const& rightSide) { if (&rightSide != this) { clean(); assign(rightSide.numAddresses(), rightSide.fAddressArray); } return *this; } NetAddressList::~NetAddressList() { clean(); } void NetAddressList::assign(unsigned numAddresses, NetAddress** addressArray) { fAddressArray = new NetAddress*[numAddresses]; if (fAddressArray == NULL) { fNumAddresses = 0; return; } for (unsigned i = 0; i < numAddresses; ++i) { fAddressArray[i] = new NetAddress(*addressArray[i]); } fNumAddresses = numAddresses; } void NetAddressList::clean() { while (fNumAddresses-- > 0) { delete fAddressArray[fNumAddresses]; } delete[] fAddressArray; fAddressArray = NULL; } NetAddress const* NetAddressList::firstAddress() const { if (fNumAddresses == 0) return NULL; return fAddressArray[0]; } ////////// NetAddressList::Iterator ////////// NetAddressList::Iterator::Iterator(NetAddressList const& addressList) : fAddressList(addressList), fNextIndex(0) {} NetAddress const* NetAddressList::Iterator::nextAddress() { if (fNextIndex >= fAddressList.numAddresses()) return NULL; // no more return fAddressList.fAddressArray[fNextIndex++]; } ////////// Port ////////// Port::Port(portNumBits num /* in host byte order */) { fPortNum = htons(num); } UsageEnvironment& operator<<(UsageEnvironment& s, const Port& p) { return s << ntohs(p.num()); } ////////// AddressPortLookupTable ////////// AddressPortLookupTable::AddressPortLookupTable() : fTable(HashTable::create(3)) { // three-word keys are used } AddressPortLookupTable::~AddressPortLookupTable() { delete fTable; } void* AddressPortLookupTable::Add(netAddressBits address1, netAddressBits address2, Port port, void* value) { int key[3]; key[0] = (int)address1; key[1] = (int)address2; key[2] = (int)port.num(); return fTable->Add((char*)key, value); } void* AddressPortLookupTable::Lookup(netAddressBits address1, netAddressBits address2, Port port) { int key[3]; key[0] = (int)address1; key[1] = (int)address2; key[2] = (int)port.num(); return fTable->Lookup((char*)key); } Boolean AddressPortLookupTable::Remove(netAddressBits address1, netAddressBits address2, Port port) { int key[3]; key[0] = (int)address1; key[1] = (int)address2; key[2] = (int)port.num(); return fTable->Remove((char*)key); } AddressPortLookupTable::Iterator::Iterator(AddressPortLookupTable& table) : fIter(HashTable::Iterator::create(*(table.fTable))) { } AddressPortLookupTable::Iterator::~Iterator() { delete fIter; } void* AddressPortLookupTable::Iterator::next() { char const* key; // dummy return fIter->next(key); } ////////// isMulticastAddress() implementation ////////// Boolean IsMulticastAddress(netAddressBits address) { // Note: We return False for addresses in the range 224.0.0.0 // through 224.0.0.255, because these are non-routable // Note: IPv4-specific ##### netAddressBits addressInNetworkOrder = htonl(address); return addressInNetworkOrder > 0xE00000FF && addressInNetworkOrder <= 0xEFFFFFFF; } ////////// AddressString implementation ////////// AddressString::AddressString(struct sockaddr_in const& addr) { init(addr.sin_addr.s_addr); } AddressString::AddressString(struct in_addr const& addr) { init(addr.s_addr); } AddressString::AddressString(netAddressBits addr) { init(addr); } void AddressString::init(netAddressBits addr) { fVal = new char[16]; // large enough for "abc.def.ghi.jkl" netAddressBits addrNBO = htonl(addr); // make sure we have a value in a known byte order: big endian sprintf(fVal, "%u.%u.%u.%u", (addrNBO>>24)&0xFF, (addrNBO>>16)&0xFF, (addrNBO>>8)&0xFF, addrNBO&0xFF); } AddressString::~AddressString() { delete[] fVal; } live/groupsock/IOHandlers.cpp000444 001751 000000 00000003734 12265042432 016456 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "mTunnel" multicast access service // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // IO event handlers // Implementation #include "IOHandlers.hh" #include "TunnelEncaps.hh" //##### TEMP: Use a single buffer, sized for UDP tunnels: //##### This assumes that the I/O handlers are non-reentrant static unsigned const maxPacketLength = 50*1024; // bytes // This is usually overkill, because UDP packets are usually no larger // than the typical Ethernet MTU (1500 bytes). However, I've seen // reports of Windows Media Servers sending UDP packets as large as // 27 kBytes. These will probably undego lots of IP-level // fragmentation, but that occurs below us. We just have to hope that // fragments don't get lost. static unsigned const ioBufferSize = maxPacketLength + TunnelEncapsulationTrailerMaxSize; static unsigned char ioBuffer[ioBufferSize]; void socketReadHandler(Socket* sock, int /*mask*/) { unsigned bytesRead; struct sockaddr_in fromAddress; UsageEnvironment& saveEnv = sock->env(); // because handleRead(), if it fails, may delete "sock" if (!sock->handleRead(ioBuffer, ioBufferSize, bytesRead, fromAddress)) { saveEnv.reportBackgroundError(); } } live/groupsock/COPYING000755 001751 000000 00000000000 12265042432 016247 2../COPYINGustar00rsfwheel000000 000000 live/groupsock/NetInterface.cpp000444 001751 000000 00000011057 12265042432 017032 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "mTunnel" multicast access service // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // Network Interfaces // Implementation #include "NetInterface.hh" #include "GroupsockHelper.hh" #ifndef NO_SSTREAM #include #endif #include ////////// NetInterface ////////// UsageEnvironment* NetInterface::DefaultUsageEnvironment = NULL; NetInterface::NetInterface() { } NetInterface::~NetInterface() { } ////////// NetInterface ////////// DirectedNetInterface::DirectedNetInterface() { } DirectedNetInterface::~DirectedNetInterface() { } ////////// DirectedNetInterfaceSet ////////// DirectedNetInterfaceSet::DirectedNetInterfaceSet() : fTable(HashTable::create(ONE_WORD_HASH_KEYS)) { } DirectedNetInterfaceSet::~DirectedNetInterfaceSet() { delete fTable; } DirectedNetInterface* DirectedNetInterfaceSet::Add(DirectedNetInterface const* interf) { return (DirectedNetInterface*) fTable->Add((char*)interf, (void*)interf); } Boolean DirectedNetInterfaceSet::Remove(DirectedNetInterface const* interf) { return fTable->Remove((char*)interf); } DirectedNetInterfaceSet::Iterator:: Iterator(DirectedNetInterfaceSet& interfaces) : fIter(HashTable::Iterator::create(*(interfaces.fTable))) { } DirectedNetInterfaceSet::Iterator::~Iterator() { delete fIter; } DirectedNetInterface* DirectedNetInterfaceSet::Iterator::next() { char const* key; // dummy return (DirectedNetInterface*) fIter->next(key); }; ////////// Socket ////////// int Socket::DebugLevel = 1; // default value Socket::Socket(UsageEnvironment& env, Port port) : fEnv(DefaultUsageEnvironment != NULL ? *DefaultUsageEnvironment : env), fPort(port) { fSocketNum = setupDatagramSocket(fEnv, port); } void Socket::reset() { closeSocket(fSocketNum); fSocketNum = -1; } Socket::~Socket() { reset(); } Boolean Socket::changePort(Port newPort) { int oldSocketNum = fSocketNum; unsigned oldReceiveBufferSize = getReceiveBufferSize(fEnv, fSocketNum); unsigned oldSendBufferSize = getSendBufferSize(fEnv, fSocketNum); closeSocket(fSocketNum); fSocketNum = setupDatagramSocket(fEnv, newPort); if (fSocketNum < 0) { fEnv.taskScheduler().turnOffBackgroundReadHandling(oldSocketNum); return False; } setReceiveBufferTo(fEnv, fSocketNum, oldReceiveBufferSize); setSendBufferTo(fEnv, fSocketNum, oldSendBufferSize); if (fSocketNum != oldSocketNum) { // the socket number has changed, so move any event handling for it: fEnv.taskScheduler().moveSocketHandling(oldSocketNum, fSocketNum); } return True; } UsageEnvironment& operator<<(UsageEnvironment& s, const Socket& sock) { return s << timestampString() << " Socket(" << sock.socketNum() << ")"; } ////////// SocketLookupTable ////////// SocketLookupTable::SocketLookupTable() : fTable(HashTable::create(ONE_WORD_HASH_KEYS)) { } SocketLookupTable::~SocketLookupTable() { delete fTable; } Socket* SocketLookupTable::Fetch(UsageEnvironment& env, Port port, Boolean& isNew) { isNew = False; Socket* sock; do { sock = (Socket*) fTable->Lookup((char*)(long)(port.num())); if (sock == NULL) { // we need to create one: sock = CreateNew(env, port); if (sock == NULL || sock->socketNum() < 0) break; fTable->Add((char*)(long)(port.num()), (void*)sock); isNew = True; } return sock; } while (0); delete sock; return NULL; } Boolean SocketLookupTable::Remove(Socket const* sock) { return fTable->Remove( (char*)(long)(sock->port().num()) ); } ////////// NetInterfaceTrafficStats ////////// NetInterfaceTrafficStats::NetInterfaceTrafficStats() { fTotNumPackets = fTotNumBytes = 0.0; } void NetInterfaceTrafficStats::countPacket(unsigned packetSize) { fTotNumPackets += 1.0; fTotNumBytes += packetSize; } Boolean NetInterfaceTrafficStats::haveSeenTraffic() const { return fTotNumPackets != 0.0; } live/groupsock/include/groupsock_version.hh000444 001751 000000 00000000446 12265042432 021504 0ustar00rsfwheel000000 000000 // Version information for the "groupsock" library // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. #ifndef _GROUPSOCK_VERSION_HH #define _GROUPSOCK_VERSION_HH #define GROUPSOCK_LIBRARY_VERSION_STRING "2014.01.13" #define GROUPSOCK_LIBRARY_VERSION_INT 1389571200 #endif live/groupsock/include/NetCommon.h000444 001751 000000 00000006152 12265042432 017452 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ /* "groupsock" interface * Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. * Common include files, typically used for networking */ #ifndef _NET_COMMON_H #define _NET_COMMON_H #if defined(__WIN32__) || defined(_WIN32) || defined(_WIN32_WCE) /* Windows */ #if defined(WINNT) || defined(_WINNT) || defined(__BORLANDC__) || defined(__MINGW32__) || defined(_WIN32_WCE) || defined (_MSC_VER) #define _MSWSOCK_ #include #include #endif #include #include #include #define closeSocket closesocket #ifdef EWOULDBLOCK #undef EWOULDBLOCK #endif #ifdef EINPROGRESS #undef EINPROGRESS #endif #ifdef EAGAIN #undef EAGAIN #endif #ifdef EINTR #undef EINTR #endif #define EWOULDBLOCK WSAEWOULDBLOCK #define EINPROGRESS WSAEWOULDBLOCK #define EAGAIN WSAEWOULDBLOCK #define EINTR WSAEINTR #if defined(_WIN32_WCE) #define NO_STRSTREAM 1 #endif /* Definitions of size-specific types: */ typedef __int64 int64_t; typedef unsigned __int64 u_int64_t; typedef unsigned u_int32_t; typedef unsigned short u_int16_t; typedef unsigned char u_int8_t; // For "uintptr_t" and "intptr_t", we assume that if they're not already defined, then this must be // an old, 32-bit version of Windows: #if !defined(_MSC_STDINT_H_) && !defined(_UINTPTR_T_DEFINED) && !defined(_UINTPTR_T_DECLARED) && !defined(_UINTPTR_T) typedef unsigned uintptr_t; #endif #if !defined(_MSC_STDINT_H_) && !defined(_INTPTR_T_DEFINED) && !defined(_INTPTR_T_DECLARED) && !defined(_INTPTR_T) typedef int intptr_t; #endif #elif defined(VXWORKS) /* VxWorks */ #include #include #include #include #include #include #include typedef unsigned int u_int32_t; typedef unsigned short u_int16_t; typedef unsigned char u_int8_t; #else /* Unix */ #include #include #include #include #include #include #include #include #include #include #include #include #include #if defined(_QNX4) #include #include #endif #define closeSocket close #ifdef SOLARIS #define u_int64_t uint64_t #define u_int32_t uint32_t #define u_int16_t uint16_t #define u_int8_t uint8_t #endif #endif #ifndef SOCKLEN_T #define SOCKLEN_T int #endif #endif live/groupsock/include/Groupsock.hh000444 001751 000000 00000015060 12265042432 017675 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "mTunnel" multicast access service // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // 'Group sockets' // C++ header #ifndef _GROUPSOCK_HH #define _GROUPSOCK_HH #ifndef _GROUPSOCK_VERSION_HH #include "groupsock_version.hh" #endif #ifndef _NET_INTERFACE_HH #include "NetInterface.hh" #endif #ifndef _GROUPEID_HH #include "GroupEId.hh" #endif // An "OutputSocket" is (by default) used only to send packets. // No packets are received on it (unless a subclass arranges this) class OutputSocket: public Socket { public: OutputSocket(UsageEnvironment& env); virtual ~OutputSocket(); Boolean write(netAddressBits address, Port port, u_int8_t ttl, unsigned char* buffer, unsigned bufferSize); protected: OutputSocket(UsageEnvironment& env, Port port); portNumBits sourcePortNum() const {return fSourcePort.num();} private: // redefined virtual function virtual Boolean handleRead(unsigned char* buffer, unsigned bufferMaxSize, unsigned& bytesRead, struct sockaddr_in& fromAddress); private: Port fSourcePort; u_int8_t fLastSentTTL; }; class destRecord { public: destRecord(struct in_addr const& addr, Port const& port, u_int8_t ttl, destRecord* next); virtual ~destRecord(); public: destRecord* fNext; GroupEId fGroupEId; Port fPort; }; // A "Groupsock" is used to both send and receive packets. // As the name suggests, it was originally designed to send/receive // multicast, but it can send/receive unicast as well. class Groupsock: public OutputSocket { public: Groupsock(UsageEnvironment& env, struct in_addr const& groupAddr, Port port, u_int8_t ttl); // used for a 'source-independent multicast' group Groupsock(UsageEnvironment& env, struct in_addr const& groupAddr, struct in_addr const& sourceFilterAddr, Port port); // used for a 'source-specific multicast' group virtual ~Groupsock(); void changeDestinationParameters(struct in_addr const& newDestAddr, Port newDestPort, int newDestTTL); // By default, the destination address, port and ttl for // outgoing packets are those that were specified in // the constructor. This works OK for multicast sockets, // but for unicast we usually want the destination port // number, at least, to be different from the source port. // (If a parameter is 0 (or ~0 for ttl), then no change made.) // As a special case, we also allow multiple destinations (addresses & ports) // (This can be used to implement multi-unicast.) void addDestination(struct in_addr const& addr, Port const& port); void removeDestination(struct in_addr const& addr, Port const& port); void removeAllDestinations(); struct in_addr const& groupAddress() const { return fIncomingGroupEId.groupAddress(); } struct in_addr const& sourceFilterAddress() const { return fIncomingGroupEId.sourceFilterAddress(); } Boolean isSSM() const { return fIncomingGroupEId.isSSM(); } u_int8_t ttl() const { return fTTL; } void multicastSendOnly(); // send, but don't receive any multicast packets Boolean output(UsageEnvironment& env, u_int8_t ttl, unsigned char* buffer, unsigned bufferSize, DirectedNetInterface* interfaceNotToFwdBackTo = NULL); DirectedNetInterfaceSet& members() { return fMembers; } Boolean deleteIfNoMembers; Boolean isSlave; // for tunneling static NetInterfaceTrafficStats statsIncoming; static NetInterfaceTrafficStats statsOutgoing; static NetInterfaceTrafficStats statsRelayedIncoming; static NetInterfaceTrafficStats statsRelayedOutgoing; NetInterfaceTrafficStats statsGroupIncoming; // *not* static NetInterfaceTrafficStats statsGroupOutgoing; // *not* static NetInterfaceTrafficStats statsGroupRelayedIncoming; // *not* static NetInterfaceTrafficStats statsGroupRelayedOutgoing; // *not* static Boolean wasLoopedBackFromUs(UsageEnvironment& env, struct sockaddr_in& fromAddress); public: // redefined virtual functions virtual Boolean handleRead(unsigned char* buffer, unsigned bufferMaxSize, unsigned& bytesRead, struct sockaddr_in& fromAddress); private: int outputToAllMembersExcept(DirectedNetInterface* exceptInterface, u_int8_t ttlToFwd, unsigned char* data, unsigned size, netAddressBits sourceAddr); private: GroupEId fIncomingGroupEId; destRecord* fDests; u_int8_t fTTL; DirectedNetInterfaceSet fMembers; }; UsageEnvironment& operator<<(UsageEnvironment& s, const Groupsock& g); // A data structure for looking up a 'groupsock' // by (multicast address, port), or by socket number class GroupsockLookupTable { public: Groupsock* Fetch(UsageEnvironment& env, netAddressBits groupAddress, Port port, u_int8_t ttl, Boolean& isNew); // Creates a new Groupsock if none already exists Groupsock* Fetch(UsageEnvironment& env, netAddressBits groupAddress, netAddressBits sourceFilterAddr, Port port, Boolean& isNew); // Creates a new Groupsock if none already exists Groupsock* Lookup(netAddressBits groupAddress, Port port); // Returns NULL if none already exists Groupsock* Lookup(netAddressBits groupAddress, netAddressBits sourceFilterAddr, Port port); // Returns NULL if none already exists Groupsock* Lookup(UsageEnvironment& env, int sock); // Returns NULL if none already exists Boolean Remove(Groupsock const* groupsock); // Used to iterate through the groupsocks in the table class Iterator { public: Iterator(GroupsockLookupTable& groupsocks); Groupsock* next(); // NULL iff none private: AddressPortLookupTable::Iterator fIter; }; private: Groupsock* AddNew(UsageEnvironment& env, netAddressBits groupAddress, netAddressBits sourceFilterAddress, Port port, u_int8_t ttl); private: friend class Iterator; AddressPortLookupTable fTable; }; #endif live/groupsock/include/GroupsockHelper.hh000444 001751 000000 00000011133 12265042432 021032 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "mTunnel" multicast access service // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // Helper routines to implement 'group sockets' // C++ header #ifndef _GROUPSOCK_HELPER_HH #define _GROUPSOCK_HELPER_HH #ifndef _NET_ADDRESS_HH #include "NetAddress.hh" #endif int setupDatagramSocket(UsageEnvironment& env, Port port); int setupStreamSocket(UsageEnvironment& env, Port port, Boolean makeNonBlocking = True); int readSocket(UsageEnvironment& env, int socket, unsigned char* buffer, unsigned bufferSize, struct sockaddr_in& fromAddress); Boolean writeSocket(UsageEnvironment& env, int socket, struct in_addr address, Port port, u_int8_t ttlArg, unsigned char* buffer, unsigned bufferSize); unsigned getSendBufferSize(UsageEnvironment& env, int socket); unsigned getReceiveBufferSize(UsageEnvironment& env, int socket); unsigned setSendBufferTo(UsageEnvironment& env, int socket, unsigned requestedSize); unsigned setReceiveBufferTo(UsageEnvironment& env, int socket, unsigned requestedSize); unsigned increaseSendBufferTo(UsageEnvironment& env, int socket, unsigned requestedSize); unsigned increaseReceiveBufferTo(UsageEnvironment& env, int socket, unsigned requestedSize); Boolean makeSocketNonBlocking(int sock); Boolean makeSocketBlocking(int sock); Boolean socketJoinGroup(UsageEnvironment& env, int socket, netAddressBits groupAddress); Boolean socketLeaveGroup(UsageEnvironment&, int socket, netAddressBits groupAddress); // source-specific multicast join/leave Boolean socketJoinGroupSSM(UsageEnvironment& env, int socket, netAddressBits groupAddress, netAddressBits sourceFilterAddr); Boolean socketLeaveGroupSSM(UsageEnvironment&, int socket, netAddressBits groupAddress, netAddressBits sourceFilterAddr); Boolean getSourcePort(UsageEnvironment& env, int socket, Port& port); netAddressBits ourIPAddress(UsageEnvironment& env); // in network order // IP addresses of our sending and receiving interfaces. (By default, these // are INADDR_ANY (i.e., 0), specifying the default interface.) extern netAddressBits SendingInterfaceAddr; extern netAddressBits ReceivingInterfaceAddr; // Allocates a randomly-chosen IPv4 SSM (multicast) address: netAddressBits chooseRandomIPv4SSMAddress(UsageEnvironment& env); // Returns a simple "hh:mm:ss" string, for use in debugging output (e.g.) char const* timestampString(); #ifdef HAVE_SOCKADDR_LEN #define SET_SOCKADDR_SIN_LEN(var) var.sin_len = sizeof var #else #define SET_SOCKADDR_SIN_LEN(var) #endif #define MAKE_SOCKADDR_IN(var,adr,prt) /*adr,prt must be in network order*/\ struct sockaddr_in var;\ var.sin_family = AF_INET;\ var.sin_addr.s_addr = (adr);\ var.sin_port = (prt);\ SET_SOCKADDR_SIN_LEN(var); // By default, we create sockets with the SO_REUSE_* flag set. // If, instead, you want to create sockets without the SO_REUSE_* flags, // Then enclose the creation code with: // { // NoReuse dummy; // ... // } class NoReuse { public: NoReuse(UsageEnvironment& env); ~NoReuse(); private: UsageEnvironment& fEnv; }; // Define the "UsageEnvironment"-specific "groupsockPriv" structure: struct _groupsockPriv { // There should be only one of these allocated HashTable* socketTable; int reuseFlag; }; _groupsockPriv* groupsockPriv(UsageEnvironment& env); // allocates it if necessary void reclaimGroupsockPriv(UsageEnvironment& env); #if defined(__WIN32__) || defined(_WIN32) // For Windoze, we need to implement our own gettimeofday() extern int gettimeofday(struct timeval*, int*); #endif // The following are implemented in inet.c: extern "C" netAddressBits our_inet_addr(char const*); extern "C" void our_srandom(int x); extern "C" long our_random(); extern "C" u_int32_t our_random32(); // because "our_random()" returns a 31-bit number #endif live/groupsock/include/IOHandlers.hh000444 001751 000000 00000002142 12265042432 017706 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "mTunnel" multicast access service // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // IO event handlers // C++ header #ifndef _IO_HANDLERS_HH #define _IO_HANDLERS_HH #ifndef _NET_INTERFACE_HH #include "NetInterface.hh" #endif // Handles incoming data on sockets: void socketReadHandler(Socket* sock, int mask); #endif live/groupsock/include/NetAddress.hh000444 001751 000000 00000010667 12265042432 017765 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "mTunnel" multicast access service // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // Network Addresses // C++ header #ifndef _NET_ADDRESS_HH #define _NET_ADDRESS_HH #ifndef _HASH_TABLE_HH #include "HashTable.hh" #endif #ifndef _NET_COMMON_H #include "NetCommon.h" #endif #ifndef _USAGE_ENVIRONMENT_HH #include "UsageEnvironment.hh" #endif // Definition of a type representing a low-level network address. // At present, this is 32-bits, for IPv4. Later, generalize it, // to allow for IPv6. typedef u_int32_t netAddressBits; class NetAddress { public: NetAddress(u_int8_t const* data, unsigned length = 4 /* default: 32 bits */); NetAddress(unsigned length = 4); // sets address data to all-zeros NetAddress(NetAddress const& orig); NetAddress& operator=(NetAddress const& rightSide); virtual ~NetAddress(); unsigned length() const { return fLength; } u_int8_t const* data() const // always in network byte order { return fData; } private: void assign(u_int8_t const* data, unsigned length); void clean(); unsigned fLength; u_int8_t* fData; }; class NetAddressList { public: NetAddressList(char const* hostname); NetAddressList(NetAddressList const& orig); NetAddressList& operator=(NetAddressList const& rightSide); virtual ~NetAddressList(); unsigned numAddresses() const { return fNumAddresses; } NetAddress const* firstAddress() const; // Used to iterate through the addresses in a list: class Iterator { public: Iterator(NetAddressList const& addressList); NetAddress const* nextAddress(); // NULL iff none private: NetAddressList const& fAddressList; unsigned fNextIndex; }; private: void assign(netAddressBits numAddresses, NetAddress** addressArray); void clean(); friend class Iterator; unsigned fNumAddresses; NetAddress** fAddressArray; }; typedef u_int16_t portNumBits; class Port { public: Port(portNumBits num /* in host byte order */); portNumBits num() const // in network byte order { return fPortNum; } private: portNumBits fPortNum; // stored in network byte order #ifdef IRIX portNumBits filler; // hack to overcome a bug in IRIX C++ compiler #endif }; UsageEnvironment& operator<<(UsageEnvironment& s, const Port& p); // A generic table for looking up objects by (address1, address2, port) class AddressPortLookupTable { public: AddressPortLookupTable(); virtual ~AddressPortLookupTable(); void* Add(netAddressBits address1, netAddressBits address2, Port port, void* value); // Returns the old value if different, otherwise 0 Boolean Remove(netAddressBits address1, netAddressBits address2, Port port); void* Lookup(netAddressBits address1, netAddressBits address2, Port port); // Returns 0 if not found // Used to iterate through the entries in the table class Iterator { public: Iterator(AddressPortLookupTable& table); virtual ~Iterator(); void* next(); // NULL iff none private: HashTable::Iterator* fIter; }; private: friend class Iterator; HashTable* fTable; }; Boolean IsMulticastAddress(netAddressBits address); // A mechanism for displaying an IPv4 address in ASCII. This is intended to replace "inet_ntoa()", which is not thread-safe. class AddressString { public: AddressString(struct sockaddr_in const& addr); AddressString(struct in_addr const& addr); AddressString(netAddressBits addr); // "addr" is assumed to be in host byte order here virtual ~AddressString(); char const* val() const { return fVal; } private: void init(netAddressBits addr); // used to implement each of the constructors private: char* fVal; // The result ASCII string: allocated by the constructor; deleted by the destructor }; #endif live/groupsock/include/NetInterface.hh000444 001751 000000 00000007404 12265042432 020273 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "mTunnel" multicast access service // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // Network Interfaces // C++ header #ifndef _NET_INTERFACE_HH #define _NET_INTERFACE_HH #ifndef _NET_ADDRESS_HH #include "NetAddress.hh" #endif class NetInterface { public: virtual ~NetInterface(); static UsageEnvironment* DefaultUsageEnvironment; // if non-NULL, used for each new interfaces protected: NetInterface(); // virtual base class }; class DirectedNetInterface: public NetInterface { public: virtual ~DirectedNetInterface(); virtual Boolean write(unsigned char* data, unsigned numBytes) = 0; virtual Boolean SourceAddrOKForRelaying(UsageEnvironment& env, unsigned addr) = 0; protected: DirectedNetInterface(); // virtual base class }; class DirectedNetInterfaceSet { public: DirectedNetInterfaceSet(); virtual ~DirectedNetInterfaceSet(); DirectedNetInterface* Add(DirectedNetInterface const* interf); // Returns the old value if different, otherwise 0 Boolean Remove(DirectedNetInterface const* interf); Boolean IsEmpty() { return fTable->IsEmpty(); } // Used to iterate through the interfaces in the set class Iterator { public: Iterator(DirectedNetInterfaceSet& interfaces); virtual ~Iterator(); DirectedNetInterface* next(); // NULL iff none private: HashTable::Iterator* fIter; }; private: friend class Iterator; HashTable* fTable; }; class Socket: public NetInterface { public: virtual ~Socket(); void reset(); // closes the socket, and sets "fSocketNum" to -1 virtual Boolean handleRead(unsigned char* buffer, unsigned bufferMaxSize, unsigned& bytesRead, struct sockaddr_in& fromAddress) = 0; // Returns False on error; resultData == NULL if data ignored int socketNum() const { return fSocketNum; } Port port() const { return fPort; } UsageEnvironment& env() const { return fEnv; } static int DebugLevel; protected: Socket(UsageEnvironment& env, Port port); // virtual base class Boolean changePort(Port newPort); // will also cause socketNum() to change private: int fSocketNum; UsageEnvironment& fEnv; Port fPort; }; UsageEnvironment& operator<<(UsageEnvironment& s, const Socket& sock); // A data structure for looking up a Socket by port: class SocketLookupTable { public: virtual ~SocketLookupTable(); Socket* Fetch(UsageEnvironment& env, Port port, Boolean& isNew); // Creates a new Socket if none already exists Boolean Remove(Socket const* sock); protected: SocketLookupTable(); // abstract base class virtual Socket* CreateNew(UsageEnvironment& env, Port port) = 0; private: HashTable* fTable; }; // A data structure for counting traffic: class NetInterfaceTrafficStats { public: NetInterfaceTrafficStats(); void countPacket(unsigned packetSize); float totNumPackets() const {return fTotNumPackets;} float totNumBytes() const {return fTotNumBytes;} Boolean haveSeenTraffic() const; private: float fTotNumPackets; float fTotNumBytes; }; #endif live/groupsock/include/TunnelEncaps.hh000444 001751 000000 00000006577 12265042432 020335 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "mTunnel" multicast access service // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // Encapsulation trailer for tunnels // C++ header #ifndef _TUNNEL_ENCAPS_HH #define _TUNNEL_ENCAPS_HH #ifndef _NET_ADDRESS_HH #include "NetAddress.hh" #endif typedef u_int16_t Cookie; class TunnelEncapsulationTrailer { // The trailer is layed out as follows: // bytes 0-1: source 'cookie' // bytes 2-3: destination 'cookie' // bytes 4-7: address // bytes 8-9: port // byte 10: ttl // byte 11: command // Optionally, there may also be a 4-byte 'auxilliary address' // (e.g., for 'source-specific multicast' preceding this) // bytes -4 through -1: auxilliary address public: Cookie& srcCookie() { return *(Cookie*)byteOffset(0); } Cookie& dstCookie() { return *(Cookie*)byteOffset(2); } u_int32_t& address() { return *(u_int32_t*)byteOffset(4); } Port& port() { return *(Port*)byteOffset(8); } u_int8_t& ttl() { return *(u_int8_t*)byteOffset(10); } u_int8_t& command() { return *(u_int8_t*)byteOffset(11); } u_int32_t& auxAddress() { return *(u_int32_t*)byteOffset(-4); } private: inline char* byteOffset(int charIndex) { return ((char*)this) + charIndex; } }; const unsigned TunnelEncapsulationTrailerSize = 12; // bytes const unsigned TunnelEncapsulationTrailerAuxSize = 4; // bytes const unsigned TunnelEncapsulationTrailerMaxSize = TunnelEncapsulationTrailerSize + TunnelEncapsulationTrailerAuxSize; // Command codes: // 0: unused const u_int8_t TunnelDataCmd = 1; const u_int8_t TunnelJoinGroupCmd = 2; const u_int8_t TunnelLeaveGroupCmd = 3; const u_int8_t TunnelTearDownCmd = 4; const u_int8_t TunnelProbeCmd = 5; const u_int8_t TunnelProbeAckCmd = 6; const u_int8_t TunnelProbeNackCmd = 7; const u_int8_t TunnelJoinRTPGroupCmd = 8; const u_int8_t TunnelLeaveRTPGroupCmd = 9; // 0x0A through 0x10: currently unused. const u_int8_t TunnelExtensionFlag = 0x80; // a flag, not a cmd code const u_int8_t TunnelDataAuxCmd = (TunnelExtensionFlag|TunnelDataCmd); const u_int8_t TunnelJoinGroupAuxCmd = (TunnelExtensionFlag|TunnelJoinGroupCmd); const u_int8_t TunnelLeaveGroupAuxCmd = (TunnelExtensionFlag|TunnelLeaveGroupCmd); // Note: the TearDown, Probe, ProbeAck, ProbeNack cmds have no Aux version // 0x84 through 0x87: currently unused. const u_int8_t TunnelJoinRTPGroupAuxCmd = (TunnelExtensionFlag|TunnelJoinRTPGroupCmd); const u_int8_t TunnelLeaveRTPGroupAuxCmd = (TunnelExtensionFlag|TunnelLeaveRTPGroupCmd); // 0x8A through 0xFF: currently unused inline Boolean TunnelIsAuxCmd(u_int8_t cmd) { return (cmd&TunnelExtensionFlag) != 0; } #endif live/groupsock/include/GroupEId.hh000444 001751 000000 00000005376 12265042432 017410 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "multikit" Multicast Application Shell // Copyright (c) 1996-2014, Live Networks, Inc. All rights reserved // "Group Endpoint Id" // C++ header #ifndef _GROUPEID_HH #define _GROUPEID_HH #ifndef _BOOLEAN_HH #include "Boolean.hh" #endif #ifndef _NET_ADDRESS_HH #include "NetAddress.hh" #endif const u_int8_t MAX_TTL = 255; class Scope { public: Scope(u_int8_t ttl = 0, const char* publicKey = NULL); Scope(const Scope& orig); Scope& operator=(const Scope& rightSide); ~Scope(); u_int8_t ttl() const { return fTTL; } const char* publicKey() const { return fPublicKey; } unsigned publicKeySize() const; private: void assign(u_int8_t ttl, const char* publicKey); void clean(); u_int8_t fTTL; char* fPublicKey; }; class GroupEId { public: GroupEId(struct in_addr const& groupAddr, portNumBits portNum, Scope const& scope, unsigned numSuccessiveGroupAddrs = 1); // used for a 'source-independent multicast' group GroupEId(struct in_addr const& groupAddr, struct in_addr const& sourceFilterAddr, portNumBits portNum, unsigned numSuccessiveGroupAddrs = 1); // used for a 'source-specific multicast' group GroupEId(); // used only as a temp constructor prior to initialization struct in_addr const& groupAddress() const { return fGroupAddress; } struct in_addr const& sourceFilterAddress() const { return fSourceFilterAddress; } Boolean isSSM() const; unsigned numSuccessiveGroupAddrs() const { // could be >1 for hier encoding return fNumSuccessiveGroupAddrs; } portNumBits portNum() const { return fPortNum; } const Scope& scope() const { return fScope; } private: void init(struct in_addr const& groupAddr, struct in_addr const& sourceFilterAddr, portNumBits portNum, Scope const& scope, unsigned numSuccessiveGroupAddrs); private: struct in_addr fGroupAddress; struct in_addr fSourceFilterAddress; unsigned fNumSuccessiveGroupAddrs; portNumBits fPortNum; Scope fScope; }; #endif live/UsageEnvironment/include/000755 001751 000000 00000000000 12265042432 016655 5ustar00rsfwheel000000 000000 live/UsageEnvironment/HashTable.cpp000444 001751 000000 00000002623 12265042432 017572 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // Generic Hash Table // Implementation #include "HashTable.hh" HashTable::HashTable() { } HashTable::~HashTable() { } HashTable::Iterator::Iterator() { } HashTable::Iterator::~Iterator() {} void* HashTable::RemoveNext() { Iterator* iter = Iterator::create(*this); char const* key; void* removedValue = iter->next(key); if (removedValue != 0) Remove(key); delete iter; return removedValue; } void* HashTable::getFirst() { Iterator* iter = Iterator::create(*this); char const* key; void* firstValue = iter->next(key); delete iter; return firstValue; } live/UsageEnvironment/Makefile.tail000444 001751 000000 00000002401 12265042432 017615 0ustar00rsfwheel000000 000000 ##### End of variables to change NAME = libUsageEnvironment USAGE_ENVIRONMENT_LIB = $(NAME).$(LIB_SUFFIX) ALL = $(USAGE_ENVIRONMENT_LIB) all: $(ALL) OBJS = UsageEnvironment.$(OBJ) HashTable.$(OBJ) strDup.$(OBJ) $(USAGE_ENVIRONMENT_LIB): $(OBJS) $(LIBRARY_LINK)$@ $(LIBRARY_LINK_OPTS) $(OBJS) .$(C).$(OBJ): $(C_COMPILER) -c $(C_FLAGS) $< .$(CPP).$(OBJ): $(CPLUSPLUS_COMPILER) -c $(CPLUSPLUS_FLAGS) $< UsageEnvironment.$(CPP): include/UsageEnvironment.hh include/UsageEnvironment.hh: include/UsageEnvironment_version.hh include/Boolean.hh include/strDup.hh HashTable.$(CPP): include/HashTable.hh include/HashTable.hh: include/Boolean.hh strDup.$(CPP): include/strDup.hh clean: -rm -rf *.$(OBJ) $(ALL) core *.core *~ include/*~ install: install1 $(INSTALL2) install1: $(USAGE_ENVIRONMENT_LIB) install -d $(DESTDIR)$(PREFIX)/include/UsageEnvironment $(DESTDIR)$(LIBDIR) install -m 644 include/*.hh $(DESTDIR)$(PREFIX)/include/UsageEnvironment install -m 644 $(USAGE_ENVIRONMENT_LIB) $(DESTDIR)$(LIBDIR) install_shared_libraries: $(USAGE_ENVIRONMENT_LIB) ln -s $(NAME).$(LIB_SUFFIX) $(DESTDIR)$(LIBDIR)/$(NAME).$(SHORT_LIB_SUFFIX) ln -s $(NAME).$(LIB_SUFFIX) $(DESTDIR)$(LIBDIR)/$(NAME).so ##### Any additional, platform-specific rules come here: live/UsageEnvironment/Makefile.head000440 001751 000000 00000000210 12265042432 017555 0ustar00rsfwheel000000 000000 INCLUDES = -Iinclude -I../groupsock/include PREFIX = /usr/local LIBDIR = $(PREFIX)/lib ##### Change the following for your environment: live/UsageEnvironment/strDup.cpp000444 001751 000000 00000002537 12265042432 017224 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A C++ equivalent to the standard C routine "strdup()". // This generates a char* that can be deleted using "delete[]" // Implementation #include "strDup.hh" #include "string.h" char* strDup(char const* str) { if (str == NULL) return NULL; size_t len = strlen(str) + 1; char* copy = new char[len]; if (copy != NULL) { memcpy(copy, str, len); } return copy; } char* strDupSize(char const* str) { if (str == NULL) return NULL; size_t len = strlen(str) + 1; char* copy = new char[len]; return copy; } live/UsageEnvironment/COPYING000755 001751 000000 00000000000 12265042432 017524 2../COPYINGustar00rsfwheel000000 000000 live/UsageEnvironment/UsageEnvironment.cpp000444 001751 000000 00000003767 12265042432 021242 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // Usage Environment // Implementation #include "UsageEnvironment.hh" void UsageEnvironment::reclaim() { // We delete ourselves only if we have no remainining state: if (liveMediaPriv == NULL && groupsockPriv == NULL) delete this; } UsageEnvironment::UsageEnvironment(TaskScheduler& scheduler) : liveMediaPriv(NULL), groupsockPriv(NULL), fScheduler(scheduler) { } UsageEnvironment::~UsageEnvironment() { } // By default, we handle 'should not occur'-type library errors by calling abort(). Subclasses can redefine this, if desired. // (If your runtime library doesn't define the "abort()" function, then define your own (e.g., that does nothing).) void UsageEnvironment::internalError() { abort(); } TaskScheduler::TaskScheduler() { } TaskScheduler::~TaskScheduler() { } void TaskScheduler::rescheduleDelayedTask(TaskToken& task, int64_t microseconds, TaskFunc* proc, void* clientData) { unscheduleDelayedTask(task); task = scheduleDelayedTask(microseconds, proc, clientData); } // By default, we handle 'should not occur'-type library errors by calling abort(). Subclasses can redefine this, if desired. void TaskScheduler::internalError() { abort(); } live/UsageEnvironment/include/Boolean.hh000444 001751 000000 00000002363 12265042432 020557 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ #ifndef _BOOLEAN_HH #define _BOOLEAN_HH #if defined(__BORLANDC__) || (!defined(USE_LIVE555_BOOLEAN) && defined(_MSC_VER) && _MSC_VER >= 1400) // Use the "bool" type defined by the Borland compiler, and MSVC++ 8.0, Visual Studio 2005 and higher typedef bool Boolean; #define False false #define True true #else typedef unsigned char Boolean; #ifndef __MSHTML_LIBRARY_DEFINED__ #ifndef False const Boolean False = 0; #endif #ifndef True const Boolean True = 1; #endif #endif #endif #endif live/UsageEnvironment/include/HashTable.hh000444 001751 000000 00000004737 12265042432 021042 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // Generic Hash Table // C++ header #ifndef _HASH_TABLE_HH #define _HASH_TABLE_HH #ifndef _BOOLEAN_HH #include "Boolean.hh" #endif class HashTable { public: virtual ~HashTable(); // The following must be implemented by a particular // implementation (subclass): static HashTable* create(int keyType); virtual void* Add(char const* key, void* value) = 0; // Returns the old value if different, otherwise 0 virtual Boolean Remove(char const* key) = 0; virtual void* Lookup(char const* key) const = 0; // Returns 0 if not found virtual unsigned numEntries() const = 0; Boolean IsEmpty() const { return numEntries() == 0; } // Used to iterate through the members of the table: class Iterator { public: // The following must be implemented by a particular // implementation (subclass): static Iterator* create(HashTable const& hashTable); virtual ~Iterator(); virtual void* next(char const*& key) = 0; // returns 0 if none protected: Iterator(); // abstract base class }; // A shortcut that can be used to successively remove each of // the entries in the table (e.g., so that their values can be // deleted, if they happen to be pointers to allocated memory). void* RemoveNext(); // Returns the first entry in the table. // (This is useful for deleting each entry in the table, if the entry's destructor also removes itself from the table.) void* getFirst(); protected: HashTable(); // abstract base class }; // Warning: The following are deliberately the same as in // Tcl's hash table implementation int const STRING_HASH_KEYS = 0; int const ONE_WORD_HASH_KEYS = 1; #endif live/UsageEnvironment/include/UsageEnvironment.hh000444 001751 000000 00000014506 12265042432 022473 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // Usage Environment // C++ header #ifndef _USAGE_ENVIRONMENT_HH #define _USAGE_ENVIRONMENT_HH #ifndef _USAGEENVIRONMENT_VERSION_HH #include "UsageEnvironment_version.hh" #endif #ifndef _NETCOMMON_H #include "NetCommon.h" #endif #ifndef _BOOLEAN_HH #include "Boolean.hh" #endif #ifndef _STRDUP_HH // "strDup()" is used often, so include this here, so everyone gets it: #include "strDup.hh" #endif #ifndef NULL #define NULL 0 #endif #ifdef __BORLANDC__ #define _setmode setmode #define _O_BINARY O_BINARY #endif class TaskScheduler; // forward // An abstract base class, subclassed for each use of the library class UsageEnvironment { public: void reclaim(); // task scheduler: TaskScheduler& taskScheduler() const {return fScheduler;} // result message handling: typedef char const* MsgString; virtual MsgString getResultMsg() const = 0; virtual void setResultMsg(MsgString msg) = 0; virtual void setResultMsg(MsgString msg1, MsgString msg2) = 0; virtual void setResultMsg(MsgString msg1, MsgString msg2, MsgString msg3) = 0; virtual void setResultErrMsg(MsgString msg, int err = 0) = 0; // like setResultMsg(), except that an 'errno' message is appended. (If "err == 0", the "getErrno()" code is used instead.) virtual void appendToResultMsg(MsgString msg) = 0; virtual void reportBackgroundError() = 0; // used to report a (previously set) error message within // a background event virtual void internalError(); // used to 'handle' a 'should not occur'-type error condition within the library. // 'errno' virtual int getErrno() const = 0; // 'console' output: virtual UsageEnvironment& operator<<(char const* str) = 0; virtual UsageEnvironment& operator<<(int i) = 0; virtual UsageEnvironment& operator<<(unsigned u) = 0; virtual UsageEnvironment& operator<<(double d) = 0; virtual UsageEnvironment& operator<<(void* p) = 0; // a pointer to additional, optional, client-specific state void* liveMediaPriv; void* groupsockPriv; protected: UsageEnvironment(TaskScheduler& scheduler); // abstract base class virtual ~UsageEnvironment(); // we are deleted only by reclaim() private: TaskScheduler& fScheduler; }; typedef void TaskFunc(void* clientData); typedef void* TaskToken; typedef u_int32_t EventTriggerId; class TaskScheduler { public: virtual ~TaskScheduler(); virtual TaskToken scheduleDelayedTask(int64_t microseconds, TaskFunc* proc, void* clientData) = 0; // Schedules a task to occur (after a delay) when we next // reach a scheduling point. // (Does not delay if "microseconds" <= 0) // Returns a token that can be used in a subsequent call to // unscheduleDelayedTask() virtual void unscheduleDelayedTask(TaskToken& prevTask) = 0; // (Has no effect if "prevTask" == NULL) // Sets "prevTask" to NULL afterwards. virtual void rescheduleDelayedTask(TaskToken& task, int64_t microseconds, TaskFunc* proc, void* clientData); // Combines "unscheduleDelayedTask()" with "scheduleDelayedTask()" // (setting "task" to the new task token). // For handling socket operations in the background (from the event loop): typedef void BackgroundHandlerProc(void* clientData, int mask); // Possible bits to set in "mask". (These are deliberately defined // the same as those in Tcl, to make a Tcl-based subclass easy.) #define SOCKET_READABLE (1<<1) #define SOCKET_WRITABLE (1<<2) #define SOCKET_EXCEPTION (1<<3) virtual void setBackgroundHandling(int socketNum, int conditionSet, BackgroundHandlerProc* handlerProc, void* clientData) = 0; void disableBackgroundHandling(int socketNum) { setBackgroundHandling(socketNum, 0, NULL, NULL); } virtual void moveSocketHandling(int oldSocketNum, int newSocketNum) = 0; // Changes any socket handling for "oldSocketNum" so that occurs with "newSocketNum" instead. virtual void doEventLoop(char* watchVariable = NULL) = 0; // Causes further execution to take place within the event loop. // Delayed tasks, background I/O handling, and other events are handled, sequentially (as a single thread of control). // (If "watchVariable" is not NULL, then we return from this routine when *watchVariable != 0) virtual EventTriggerId createEventTrigger(TaskFunc* eventHandlerProc) = 0; // Creates a 'trigger' for an event, which - if it occurs - will be handled (from the event loop) using "eventHandlerProc". // (Returns 0 iff no such trigger can be created (e.g., because of implementation limits on the number of triggers).) virtual void deleteEventTrigger(EventTriggerId eventTriggerId) = 0; virtual void triggerEvent(EventTriggerId eventTriggerId, void* clientData = NULL) = 0; // Causes the (previously-registered) handler function for the specified event to be handled (from the event loop). // The handler function is called with "clientData" as parameter. // Note: This function (unlike other library functions) may be called from an external thread - to signal an external event. // The following two functions are deprecated, and are provided for backwards-compatibility only: void turnOnBackgroundReadHandling(int socketNum, BackgroundHandlerProc* handlerProc, void* clientData) { setBackgroundHandling(socketNum, SOCKET_READABLE, handlerProc, clientData); } void turnOffBackgroundReadHandling(int socketNum) { disableBackgroundHandling(socketNum); } virtual void internalError(); // used to 'handle' a 'should not occur'-type error condition within the library. protected: TaskScheduler(); // abstract base class }; #endif live/UsageEnvironment/include/UsageEnvironment_version.hh000444 001751 000000 00000000511 12265042432 024227 0ustar00rsfwheel000000 000000 // Version information for the "UsageEnvironment" library // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. #ifndef _USAGEENVIRONMENT_VERSION_HH #define _USAGEENVIRONMENT_VERSION_HH #define USAGEENVIRONMENT_LIBRARY_VERSION_STRING "2014.01.13" #define USAGEENVIRONMENT_LIBRARY_VERSION_INT 1389571200 #endif live/UsageEnvironment/include/strDup.hh000444 001751 000000 00000002362 12265042432 020460 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ #ifndef _STRDUP_HH #define _STRDUP_HH // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // A C++ equivalent to the standard C routine "strdup()". // This generates a char* that can be deleted using "delete[]" // Header char* strDup(char const* str); // Note: strDup(NULL) returns NULL char* strDupSize(char const* str); // Like "strDup()", except that it *doesn't* copy the original. // (Instead, it just allocates a string of the same size as the original.) #endif live/WindowsAudioInputDevice/WindowsAudioInputDevice_noMixer.hh000444 001751 000000 00000003632 12265042432 025315 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // Windows implementation of a generic audio input device // This version does not use Windows' built-in software mixer. // C++ header // // To use this, call "AudioInputDevice::createNew()". // You can also call "AudioInputDevice::getPortNames()" to get a list // of port names. #ifndef _WINDOWS_AUDIO_INPUT_DEVICE_NOMIXER_HH #define _WINDOWS_AUDIO_INPUT_DEVICE_NOMIXER_HH #ifndef _WINDOWS_AUDIO_INPUT_DEVICE_COMMON_HH #include "WindowsAudioInputDevice_common.hh" #endif class WindowsAudioInputDevice: public WindowsAudioInputDevice_common { private: friend class AudioInputDevice; WindowsAudioInputDevice(UsageEnvironment& env, int inputPortNumber, unsigned char bitsPerSample, unsigned char numChannels, unsigned samplingFrequency, unsigned granularityInMS, Boolean& success); // called only by createNew() virtual ~WindowsAudioInputDevice(); static void initializeIfNecessary(); private: // redefined virtual functions: virtual Boolean setInputPort(int portIndex); private: static unsigned numAudioInputPorts; static class AudioInputPort* ourAudioInputPorts; }; #endif live/WindowsAudioInputDevice/WindowsAudioInputDevice_common.cpp000444 001751 000000 00000025256 12265042432 025355 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // Copyright (c) 2001-2004 Live Networks, Inc. All rights reserved. // Windows implementation of a generic audio input device // Base class for both library versions: // One that uses Windows' built-in software mixer; another that doesn't. // Implementation #include "WindowsAudioInputDevice_common.hh" #include ////////// WindowsAudioInputDevice_common implementation ////////// unsigned WindowsAudioInputDevice_common::_bitsPerSample = 16; WindowsAudioInputDevice_common ::WindowsAudioInputDevice_common(UsageEnvironment& env, int inputPortNumber, unsigned char bitsPerSample, unsigned char numChannels, unsigned samplingFrequency, unsigned granularityInMS) : AudioInputDevice(env, bitsPerSample, numChannels, samplingFrequency, granularityInMS), fCurPortIndex(-1), fHaveStarted(False) { _bitsPerSample = bitsPerSample; } WindowsAudioInputDevice_common::~WindowsAudioInputDevice_common() { } Boolean WindowsAudioInputDevice_common::initialSetInputPort(int portIndex) { if (!setInputPort(portIndex)) { char errMsgPrefix[100]; sprintf(errMsgPrefix, "Failed to set audio input port number to %d: ", portIndex); char* errMsgSuffix = strDup(envir().getResultMsg()); envir().setResultMsg(errMsgPrefix, errMsgSuffix); delete[] errMsgSuffix; return False; } else { return True; } } void WindowsAudioInputDevice_common::doGetNextFrame() { if (!fHaveStarted) { // Before reading the first audio data, flush any existing data: while (readHead != NULL) releaseHeadBuffer(); fHaveStarted = True; } fTotalPollingDelay = 0; audioReadyPoller1(); } void WindowsAudioInputDevice_common::doStopGettingFrames() { // Turn off the audio poller: envir().taskScheduler().unscheduleDelayedTask(nextTask()); nextTask() = NULL; } double WindowsAudioInputDevice_common::getAverageLevel() const { // If the input audio queue is empty, return the previous level, // otherwise use the input queue to recompute "averageLevel": if (readHead != NULL) { double levelTotal = 0.0; unsigned totNumSamples = 0; WAVEHDR* curHdr = readHead; while (1) { short* samplePtr = (short*)(curHdr->lpData); unsigned numSamples = blockSize/2; totNumSamples += numSamples; while (numSamples-- > 0) { short sample = *samplePtr++; if (sample < 0) sample = -sample; levelTotal += (unsigned short)sample; } if (curHdr == readTail) break; curHdr = curHdr->lpNext; } averageLevel = levelTotal/(totNumSamples*(double)0x8000); } return averageLevel; } void WindowsAudioInputDevice_common::audioReadyPoller(void* clientData) { WindowsAudioInputDevice_common* inputDevice = (WindowsAudioInputDevice_common*)clientData; inputDevice->audioReadyPoller1(); } void WindowsAudioInputDevice_common::audioReadyPoller1() { if (readHead != NULL) { onceAudioIsReady(); } else { unsigned const maxPollingDelay = (100 + fGranularityInMS)*1000; if (fTotalPollingDelay > maxPollingDelay) { // We've waited too long for the audio device - assume it's down: handleClosure(this); return; } // Try again after a short delay: unsigned const uSecondsToDelay = fGranularityInMS*1000; fTotalPollingDelay += uSecondsToDelay; nextTask() = envir().taskScheduler().scheduleDelayedTask(uSecondsToDelay, (TaskFunc*)audioReadyPoller, this); } } void WindowsAudioInputDevice_common::onceAudioIsReady() { fFrameSize = readFromBuffers(fTo, fMaxSize, fPresentationTime); if (fFrameSize == 0) { // The source is no longer readable handleClosure(this); return; } fDurationInMicroseconds = 1000000/fSamplingFrequency; // Call our own 'after getting' function. Because we sometimes get here // after returning from a delay, we can call this directly, without risking // infinite recursion afterGetting(this); } static void CALLBACK waveInCallback(HWAVEIN /*hwi*/, UINT uMsg, DWORD /*dwInstance*/, DWORD dwParam1, DWORD /*dwParam2*/) { switch (uMsg) { case WIM_DATA: WAVEHDR* hdr = (WAVEHDR*)dwParam1; WindowsAudioInputDevice_common::waveInProc(hdr); break; } } Boolean WindowsAudioInputDevice_common::openWavInPort(int index, unsigned numChannels, unsigned samplingFrequency, unsigned granularityInMS) { uSecsPerByte = (8*1e6)/(_bitsPerSample*numChannels*samplingFrequency); // Configure the port, based on the specified parameters: WAVEFORMATEX wfx; wfx.wFormatTag = WAVE_FORMAT_PCM; wfx.nChannels = numChannels; wfx.nSamplesPerSec = samplingFrequency; wfx.wBitsPerSample = _bitsPerSample; wfx.nBlockAlign = (numChannels*_bitsPerSample)/8; wfx.nAvgBytesPerSec = samplingFrequency*wfx.nBlockAlign; wfx.cbSize = 0; blockSize = (wfx.nAvgBytesPerSec*granularityInMS)/1000; // Use a 10-second input buffer, to allow for CPU competition from video, etc., // and also for some audio cards that buffer as much as 5 seconds of audio. unsigned const bufferSeconds = 10; numBlocks = (bufferSeconds*1000)/granularityInMS; if (!waveIn_open(index, wfx)) return False; // Set this process's priority high. I'm not sure how much this is really needed, // but the "rat" code does this: SetPriorityClass(GetCurrentProcess(), HIGH_PRIORITY_CLASS); return True; } Boolean WindowsAudioInputDevice_common::waveIn_open(unsigned uid, WAVEFORMATEX& wfx) { if (shWaveIn != NULL) return True; // already open do { waveIn_reset(); if (waveInOpen(&shWaveIn, uid, &wfx, (DWORD)waveInCallback, 0, CALLBACK_FUNCTION) != MMSYSERR_NOERROR) break; // Allocate read buffers, and headers: readData = new unsigned char[numBlocks*blockSize]; if (readData == NULL) break; readHdrs = new WAVEHDR[numBlocks]; if (readHdrs == NULL) break; readHead = readTail = NULL; readTimes = new struct timeval[numBlocks]; if (readTimes == NULL) break; // Initialize headers: for (unsigned i = 0; i < numBlocks; ++i) { readHdrs[i].lpData = (char*)&readData[i*blockSize]; readHdrs[i].dwBufferLength = blockSize; readHdrs[i].dwFlags = 0; if (waveInPrepareHeader(shWaveIn, &readHdrs[i], sizeof (WAVEHDR)) != MMSYSERR_NOERROR) break; if (waveInAddBuffer(shWaveIn, &readHdrs[i], sizeof (WAVEHDR)) != MMSYSERR_NOERROR) break; } if (waveInStart(shWaveIn) != MMSYSERR_NOERROR) break; #ifdef UNICODE hAudioReady = CreateEvent(NULL, TRUE, FALSE, L"waveIn Audio Ready"); #else hAudioReady = CreateEvent(NULL, TRUE, FALSE, "waveIn Audio Ready"); #endif return True; } while (0); waveIn_reset(); return False; } void WindowsAudioInputDevice_common::waveIn_close() { if (shWaveIn == NULL) return; // already closed waveInStop(shWaveIn); waveInReset(shWaveIn); for (unsigned i = 0; i < numBlocks; ++i) { if (readHdrs[i].dwFlags & WHDR_PREPARED) { waveInUnprepareHeader(shWaveIn, &readHdrs[i], sizeof (WAVEHDR)); } } waveInClose(shWaveIn); waveIn_reset(); } void WindowsAudioInputDevice_common::waveIn_reset() { shWaveIn = NULL; delete[] readData; readData = NULL; bytesUsedAtReadHead = 0; delete[] readHdrs; readHdrs = NULL; readHead = readTail = NULL; delete[] readTimes; readTimes = NULL; hAudioReady = NULL; } unsigned WindowsAudioInputDevice_common::readFromBuffers(unsigned char* to, unsigned numBytesWanted, struct timeval& creationTime) { // Begin by computing the creation time of (the first bytes of) this returned audio data: if (readHead != NULL) { int hdrIndex = readHead - readHdrs; creationTime = readTimes[hdrIndex]; // Adjust this time to allow for any data that's already been read from this buffer: if (bytesUsedAtReadHead > 0) { creationTime.tv_usec += (unsigned)(uSecsPerByte*bytesUsedAtReadHead); creationTime.tv_sec += creationTime.tv_usec/1000000; creationTime.tv_usec %= 1000000; } } // Then, read from each available buffer, until we have the data that we want: unsigned numBytesRead = 0; while (readHead != NULL && numBytesRead < numBytesWanted) { unsigned thisRead = min(readHead->dwBytesRecorded - bytesUsedAtReadHead, numBytesWanted - numBytesRead); memmove(&to[numBytesRead], &readHead->lpData[bytesUsedAtReadHead], thisRead); numBytesRead += thisRead; bytesUsedAtReadHead += thisRead; if (bytesUsedAtReadHead == readHead->dwBytesRecorded) { // We're finished with the block; give it back to the device: releaseHeadBuffer(); } } return numBytesRead; } void WindowsAudioInputDevice_common::releaseHeadBuffer() { WAVEHDR* toRelease = readHead; if (readHead == NULL) return; readHead = readHead->lpNext; if (readHead == NULL) readTail = NULL; toRelease->lpNext = NULL; toRelease->dwBytesRecorded = 0; toRelease->dwFlags &= ~WHDR_DONE; waveInAddBuffer(shWaveIn, toRelease, sizeof (WAVEHDR)); bytesUsedAtReadHead = 0; } void WindowsAudioInputDevice_common::waveInProc(WAVEHDR* hdr) { unsigned hdrIndex = hdr - readHdrs; // Record the time that the data arrived: int dontCare; gettimeofday(&readTimes[hdrIndex], &dontCare); // Add the block to the tail of the queue: hdr->lpNext = NULL; if (readTail != NULL) { readTail->lpNext = hdr; readTail = hdr; } else { readHead = readTail = hdr; } SetEvent(hAudioReady); } HWAVEIN WindowsAudioInputDevice_common::shWaveIn = NULL; unsigned WindowsAudioInputDevice_common::blockSize = 0; unsigned WindowsAudioInputDevice_common::numBlocks = 0; unsigned char* WindowsAudioInputDevice_common::readData = NULL; DWORD WindowsAudioInputDevice_common::bytesUsedAtReadHead = 0; double WindowsAudioInputDevice_common::uSecsPerByte = 0.0; double WindowsAudioInputDevice_common::averageLevel = 0.0; WAVEHDR* WindowsAudioInputDevice_common::readHdrs = NULL; WAVEHDR* WindowsAudioInputDevice_common::readHead = NULL; WAVEHDR* WindowsAudioInputDevice_common::readTail = NULL; struct timeval* WindowsAudioInputDevice_common::readTimes = NULL; HANDLE WindowsAudioInputDevice_common::hAudioReady = NULL; live/WindowsAudioInputDevice/WindowsAudioInputDevice_mixer.cpp000444 001751 000000 00000037442 12265042432 025211 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // Copyright (c) 2001-2004 Live Networks, Inc. All rights reserved. // Windows implementation of a generic audio input device // This version uses Windows' built-in software mixer. // Implementation #include ////////// Mixer and AudioInputPort definition ////////// class AudioInputPort { public: int tag; DWORD dwComponentType; char name[MIXER_LONG_NAME_CHARS]; }; class Mixer { public: Mixer(); virtual ~Mixer(); void open(unsigned numChannels, unsigned samplingFrequency, unsigned granularityInMS); void open(); // open with default parameters void getPortsInfo(); Boolean enableInputPort(unsigned portIndex, char const*& errReason, MMRESULT& errCode); void close(); unsigned index; HMIXER hMixer; // valid when open DWORD dwRecLineID; // valid when open unsigned numPorts; AudioInputPort* ports; char name[MAXPNAMELEN]; }; ////////// AudioInputDevice (remaining) implementation ////////// AudioInputDevice* AudioInputDevice::createNew(UsageEnvironment& env, int inputPortNumber, unsigned char bitsPerSample, unsigned char numChannels, unsigned samplingFrequency, unsigned granularityInMS) { Boolean success; WindowsAudioInputDevice* newSource = new WindowsAudioInputDevice(env, inputPortNumber, bitsPerSample, numChannels, samplingFrequency, granularityInMS, success); if (!success) {delete newSource; newSource = NULL;} return newSource; } AudioPortNames* AudioInputDevice::getPortNames() { WindowsAudioInputDevice::initializeIfNecessary(); AudioPortNames* portNames = new AudioPortNames; portNames->numPorts = WindowsAudioInputDevice::numInputPortsTotal; portNames->portName = new char*[WindowsAudioInputDevice::numInputPortsTotal]; // If there's more than one mixer, print only the port name. // If there's two or more mixers, also include the mixer name // (to disambiguate port names that may be the same name in different mixers) char portNameBuffer[2*MAXPNAMELEN+10/*slop*/]; char mixerNameBuffer[MAXPNAMELEN]; char const* portNameFmt; if (WindowsAudioInputDevice::numMixers <= 1) { portNameFmt = "%s"; } else { portNameFmt = "%s (%s)"; } unsigned curPortNum = 0; for (unsigned i = 0; i < WindowsAudioInputDevice::numMixers; ++i) { Mixer& mixer = WindowsAudioInputDevice::ourMixers[i]; if (WindowsAudioInputDevice::numMixers <= 1) { mixerNameBuffer[0] = '\0'; } else { strncpy(mixerNameBuffer, mixer.name, sizeof mixerNameBuffer); #if 0 // Hack: Simplify the mixer name, by truncating after the first space character: for (int k = 0; k < sizeof mixerNameBuffer && mixerNameBuffer[k] != '\0'; ++k) { if (mixerNameBuffer[k] == ' ') { mixerNameBuffer[k] = '\0'; break; } } #endif } for (unsigned j = 0; j < mixer.numPorts; ++j) { sprintf(portNameBuffer, portNameFmt, mixer.ports[j].name, mixerNameBuffer); portNames->portName[curPortNum++] = strDup(portNameBuffer); } } return portNames; } ////////// WindowsAudioInputDevice implementation ////////// WindowsAudioInputDevice ::WindowsAudioInputDevice(UsageEnvironment& env, int inputPortNumber, unsigned char bitsPerSample, unsigned char numChannels, unsigned samplingFrequency, unsigned granularityInMS, Boolean& success) : WindowsAudioInputDevice_common(env, inputPortNumber, bitsPerSample, numChannels, samplingFrequency, granularityInMS), fCurMixerId(-1) { success = initialSetInputPort(inputPortNumber); } WindowsAudioInputDevice::~WindowsAudioInputDevice() { if (fCurMixerId >= 0) ourMixers[fCurMixerId].close(); delete[] ourMixers; ourMixers = NULL; numMixers = numInputPortsTotal = 0; } void WindowsAudioInputDevice::initializeIfNecessary() { if (ourMixers != NULL) return; // we've already been initialized numMixers = mixerGetNumDevs(); ourMixers = new Mixer[numMixers]; // Initialize each mixer: numInputPortsTotal = 0; for (unsigned i = 0; i < numMixers; ++i) { Mixer& mixer = ourMixers[i]; mixer.index = i; mixer.open(); if (mixer.hMixer != NULL) { // This device has a valid mixer. Get information about its ports: mixer.getPortsInfo(); mixer.close(); if (mixer.numPorts == 0) continue; numInputPortsTotal += mixer.numPorts; } else { mixer.ports = NULL; mixer.numPorts = 0; } } } Boolean WindowsAudioInputDevice::setInputPort(int portIndex) { initializeIfNecessary(); if (portIndex < 0 || portIndex >= (int)numInputPortsTotal) { // bad index envir().setResultMsg("Bad input port index\n"); return False; } // Find the mixer and port that corresponds to "portIndex": int newMixerId, portWithinMixer, portIndexCount = 0; for (newMixerId = 0; newMixerId < (int)numMixers; ++newMixerId) { int prevPortIndexCount = portIndexCount; portIndexCount += ourMixers[newMixerId].numPorts; if (portIndexCount > portIndex) { // it's with this mixer portWithinMixer = portIndex - prevPortIndexCount; break; } } // Check that this mixer is allowed: if (allowedDeviceNames != NULL) { int i; for (i = 0; allowedDeviceNames[i] != NULL; ++i) { if (strncmp(ourMixers[newMixerId].name, allowedDeviceNames[i], strlen(allowedDeviceNames[i])) == 0) { // The allowed device name is a prefix of this mixer's name break; // this mixer is allowed } } if (allowedDeviceNames[i] == NULL) { // this mixer is not on the allowed list envir().setResultMsg("Access to this audio device is not allowed\n"); return False; } } if (newMixerId != fCurMixerId) { // The mixer has changed, so close the old one and open the new one: if (fCurMixerId >= 0) ourMixers[fCurMixerId].close(); fCurMixerId = newMixerId; ourMixers[fCurMixerId].open(fNumChannels, fSamplingFrequency, fGranularityInMS); } if (portIndex != fCurPortIndex) { // Change the input port: fCurPortIndex = portIndex; char const* errReason; MMRESULT errCode; if (!ourMixers[newMixerId].enableInputPort(portWithinMixer, errReason, errCode)) { char resultMsg[100]; sprintf(resultMsg, "Failed to enable input port: %s failed (0x%08x)\n", errReason, errCode); envir().setResultMsg(resultMsg); return False; } // Later, may also need to transfer 'gain' to new port ##### } return True; } unsigned WindowsAudioInputDevice::numMixers = 0; Mixer* WindowsAudioInputDevice::ourMixers = NULL; unsigned WindowsAudioInputDevice::numInputPortsTotal = 0; ////////// Mixer and AudioInputPort implementation ////////// Mixer::Mixer() : hMixer(NULL), dwRecLineID(0), numPorts(0), ports(NULL) { } Mixer::~Mixer() { delete[] ports; } void Mixer::open(unsigned numChannels, unsigned samplingFrequency, unsigned granularityInMS) { HMIXER newHMixer = NULL; do { MIXERCAPS mc; if (mixerGetDevCaps(index, &mc, sizeof mc) != MMSYSERR_NOERROR) break; #ifdef UNICODE // Copy the mixer name: wcstombs(name, mc.szPname, MAXPNAMELEN); #else strncpy(name, mc.szPname, MAXPNAMELEN); #endif // Find the correct line for this mixer: unsigned i, uWavIn; unsigned nWavIn = waveInGetNumDevs(); for (i = 0; i < nWavIn; ++i) { WAVEINCAPS wic; if (waveInGetDevCaps(i, &wic, sizeof wic) != MMSYSERR_NOERROR) continue; MIXERLINE ml; ml.cbStruct = sizeof ml; ml.Target.dwType = MIXERLINE_TARGETTYPE_WAVEIN; #ifdef UNICODE wcsncpy(ml.Target.szPname, wic.szPname, MAXPNAMELEN); #else strncpy(ml.Target.szPname, wic.szPname, MAXPNAMELEN); #endif ml.Target.vDriverVersion = wic.vDriverVersion; ml.Target.wMid = wic.wMid; ml.Target.wPid = wic.wPid; if (mixerGetLineInfo((HMIXEROBJ)index, &ml, MIXER_GETLINEINFOF_TARGETTYPE/*|MIXER_OBJECTF_MIXER*/) == MMSYSERR_NOERROR) { // this is the right line uWavIn = i; dwRecLineID = ml.dwLineID; break; } } if (i >= nWavIn) break; // error: we couldn't find the right line if (mixerOpen(&newHMixer, index, (unsigned long)NULL, (unsigned long)NULL, MIXER_OBJECTF_MIXER) != MMSYSERR_NOERROR) break; if (newHMixer == NULL) break; // Sanity check: re-call "mixerGetDevCaps()" using the mixer device handle: if (mixerGetDevCaps((UINT)newHMixer, &mc, sizeof mc) != MMSYSERR_NOERROR) break; if (mc.cDestinations < 1) break; // error: this mixer has no destinations if (!WindowsAudioInputDevice_common::openWavInPort(uWavIn, numChannels, samplingFrequency, granularityInMS)) break; hMixer = newHMixer; return; } while (0); // An error occurred: close(); } void Mixer::open() { open(1, 8000, 20); } void Mixer::getPortsInfo() { MIXERCAPS mc; mixerGetDevCaps((UINT)hMixer, &mc, sizeof mc); MIXERLINE mlt; unsigned i; for (i = 0; i < mc.cDestinations; ++i) { memset(&mlt, 0, sizeof mlt); mlt.cbStruct = sizeof mlt; mlt.dwDestination = i; if (mixerGetLineInfo((HMIXEROBJ)hMixer, &mlt, MIXER_GETLINEINFOF_DESTINATION) != MMSYSERR_NOERROR) continue; if (mlt.dwLineID == dwRecLineID) break; // this is the destination we're interested in } ports = new AudioInputPort[mlt.cConnections]; numPorts = mlt.cConnections; for (i = 0; i < numPorts; ++i) { MIXERLINE mlc; memcpy(&mlc, &mlt, sizeof mlc); mlc.dwSource = i; mixerGetLineInfo((HMIXEROBJ)hMixer, &mlc, MIXER_GETLINEINFOF_SOURCE/*|MIXER_OBJECTF_HMIXER*/); ports[i].tag = mlc.dwLineID; ports[i].dwComponentType = mlc.dwComponentType; #ifdef UNICODE wcstombs(ports[i].name, mlc.szName, MIXER_LONG_NAME_CHARS); #else strncpy(ports[i].name, mlc.szName, MIXER_LONG_NAME_CHARS); #endif } // Make the microphone the first port in the list: for (i = 1; i < numPorts; ++i) { #ifdef OLD_MICROPHONE_TESTING_CODE if (_strnicmp("mic", ports[i].name, 3) == 0 || _strnicmp("mik", ports[i].name, 3) == 0) { #else if (ports[i].dwComponentType == MIXERLINE_COMPONENTTYPE_SRC_MICROPHONE) { #endif AudioInputPort tmp = ports[0]; ports[0] = ports[i]; ports[i] = tmp; } } } Boolean Mixer::enableInputPort(unsigned portIndex, char const*& errReason, MMRESULT& errCode) { errReason = NULL; // unless there's an error AudioInputPort& port = ports[portIndex]; MIXERCONTROL mc; mc.cMultipleItems = 1; // in case it doesn't get set below MIXERLINECONTROLS mlc; #if 0 // the following doesn't seem to be needed, and can fail: mlc.cbStruct = sizeof mlc; mlc.pamxctrl = &mc; mlc.cbmxctrl = sizeof (MIXERCONTROL); mlc.dwLineID = port.tag; mlc.dwControlType = MIXERCONTROL_CONTROLTYPE_VOLUME; if ((errCode = mixerGetLineControls((HMIXEROBJ)hMixer, &mlc, MIXER_GETLINECONTROLSF_ONEBYTYPE/*|MIXER_OBJECTF_HMIXER*/)) != MMSYSERR_NOERROR) { errReason = "mixerGetLineControls()"; return False; } #endif MIXERLINE ml; memset(&ml, 0, sizeof (MIXERLINE)); ml.cbStruct = sizeof (MIXERLINE); ml.dwLineID = port.tag; if ((errCode = mixerGetLineInfo((HMIXEROBJ)hMixer, &ml, MIXER_GETLINEINFOF_LINEID)) != MMSYSERR_NOERROR) { errReason = "mixerGetLineInfo()1"; return False; } #ifdef UNICODE wchar_t portname[MIXER_LONG_NAME_CHARS+1]; wcsncpy(portname, ml.szName, MIXER_LONG_NAME_CHARS); #else char portname[MIXER_LONG_NAME_CHARS+1]; strncpy(portname, ml.szName, MIXER_LONG_NAME_CHARS); #endif memset(&ml, 0, sizeof (MIXERLINE)); ml.cbStruct = sizeof (MIXERLINE); ml.dwLineID = dwRecLineID; if ((errCode = mixerGetLineInfo((HMIXEROBJ)hMixer, &ml, MIXER_GETLINEINFOF_LINEID/*|MIXER_OBJECTF_HMIXER*/)) != MMSYSERR_NOERROR) { errReason = "mixerGetLineInfo()2"; return False; } // Get Mixer/MUX control information (need control id to set and get control details) mlc.cbStruct = sizeof mlc; mlc.dwLineID = ml.dwLineID; mlc.cControls = 1; mc.cbStruct = sizeof mc; // Needed???##### mc.dwControlID = 0xDEADBEEF; // For testing ##### mlc.pamxctrl = &mc; mlc.cbmxctrl = sizeof mc; mlc.dwControlType = MIXERCONTROL_CONTROLTYPE_MUX; // Single Select if ((errCode = mixerGetLineControls((HMIXEROBJ)hMixer, &mlc, MIXER_GETLINECONTROLSF_ONEBYTYPE/*|MIXER_OBJECTF_HMIXER*/)) != MMSYSERR_NOERROR) { mlc.dwControlType = MIXERCONTROL_CONTROLTYPE_MIXER; // Multiple Select mixerGetLineControls((HMIXEROBJ)hMixer, &mlc, MIXER_GETLINECONTROLSF_ONEBYTYPE/*|MIXER_OBJECTF_HMIXER*/); } unsigned matchLine = 0; if (mc.cMultipleItems > 1) { // Before getting control, we need to know which line to grab. // We figure this out by listing the lines, and comparing names: MIXERCONTROLDETAILS mcd; mcd.cbStruct = sizeof mcd; mcd.cChannels = ml.cChannels; mcd.cMultipleItems = mc.cMultipleItems; MIXERCONTROLDETAILS_LISTTEXT* mcdlText = new MIXERCONTROLDETAILS_LISTTEXT[mc.cMultipleItems]; mcd.cbDetails = sizeof (MIXERCONTROLDETAILS_LISTTEXT); mcd.paDetails = mcdlText; if (mc.dwControlID != 0xDEADBEEF) { // we know the control id for real mcd.dwControlID = mc.dwControlID; if ((errCode = mixerGetControlDetails((HMIXEROBJ)hMixer, &mcd, MIXER_GETCONTROLDETAILSF_LISTTEXT/*|MIXER_OBJECTF_HMIXER*/)) != MMSYSERR_NOERROR) { delete[] mcdlText; errReason = "mixerGetControlDetails()1"; return False; } } else { // Hack: We couldn't find a MUX or MIXER control, so try to guess the control id: for (mc.dwControlID = 0; mc.dwControlID < 32; ++mc.dwControlID) { mcd.dwControlID = mc.dwControlID; if ((errCode = mixerGetControlDetails((HMIXEROBJ)hMixer, &mcd, MIXER_GETCONTROLDETAILSF_LISTTEXT/*|MIXER_OBJECTF_HMIXER*/)) == MMSYSERR_NOERROR) break; } if (mc.dwControlID == 32) { // unable to guess mux/mixer control id delete[] mcdlText; errReason = "mixerGetControlDetails()2"; return False; } } #ifdef UNICODE for (unsigned i = 0; i < mcd.cMultipleItems; ++i) { if (wcscmp(mcdlText[i].szName, portname) == 0) { matchLine = i; break; } } #else for (unsigned i = 0; i < mcd.cMultipleItems; ++i) { if (strcmp(mcdlText[i].szName, portname) == 0) { matchLine = i; break; } } #endif delete[] mcdlText; } // Now get control itself: MIXERCONTROLDETAILS mcd; mcd.cbStruct = sizeof mcd; mcd.dwControlID = mc.dwControlID; mcd.cChannels = ml.cChannels; mcd.cMultipleItems = mc.cMultipleItems; MIXERCONTROLDETAILS_BOOLEAN* mcdbState = new MIXERCONTROLDETAILS_BOOLEAN[mc.cMultipleItems]; mcd.paDetails = mcdbState; mcd.cbDetails = sizeof (MIXERCONTROLDETAILS_BOOLEAN); if ((errCode = mixerGetControlDetails((HMIXEROBJ)hMixer, &mcd, MIXER_GETCONTROLDETAILSF_VALUE/*|MIXER_OBJECTF_HMIXER*/)) != MMSYSERR_NOERROR) { delete[] mcdbState; errReason = "mixerGetControlDetails()3"; return False; } for (unsigned j = 0; j < mcd.cMultipleItems; ++j) { mcdbState[j].fValue = (j == matchLine); } if ((errCode = mixerSetControlDetails((HMIXEROBJ)hMixer, &mcd, MIXER_OBJECTF_HMIXER)) != MMSYSERR_NOERROR) { delete[] mcdbState; errReason = "mixerSetControlDetails()"; return False; } delete[] mcdbState; return True; } void Mixer::close() { WindowsAudioInputDevice_common::waveIn_close(); if (hMixer != NULL) mixerClose(hMixer); hMixer = NULL; dwRecLineID = 0; } live/WindowsAudioInputDevice/WindowsAudioInputDevice_noMixer.cpp000444 001751 000000 00000013110 12265042432 025470 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // Copyright (c) 2001-2004 Live Networks, Inc. All rights reserved. // Windows implementation of a generic audio input device // This version does not use Windows' built-in software mixer. // Implementation #include ////////// AudioInputPort definition ////////// class AudioInputPort { public: void open(unsigned numChannels, unsigned samplingFrequency, unsigned granularityInMS); void open(); // open with default parameters void close(); public: int index; char name[MAXPNAMELEN]; }; ////////// AudioInputDevice (remaining) implementation ////////// AudioInputDevice* AudioInputDevice::createNew(UsageEnvironment& env, int inputPortNumber, unsigned char bitsPerSample, unsigned char numChannels, unsigned samplingFrequency, unsigned granularityInMS) { Boolean success; WindowsAudioInputDevice* newSource = new WindowsAudioInputDevice(env, inputPortNumber, bitsPerSample, numChannels, samplingFrequency, granularityInMS, success); if (!success) {delete newSource; newSource = NULL;} return newSource; } AudioPortNames* AudioInputDevice::getPortNames() { WindowsAudioInputDevice::initializeIfNecessary(); AudioPortNames* portNames = new AudioPortNames; portNames->numPorts = WindowsAudioInputDevice::numAudioInputPorts; portNames->portName = new char*[WindowsAudioInputDevice::numAudioInputPorts]; for (unsigned i = 0; i < WindowsAudioInputDevice::numAudioInputPorts; ++i) { AudioInputPort& audioInputPort = WindowsAudioInputDevice::ourAudioInputPorts[i]; portNames->portName[i] = strDup(audioInputPort.name); } return portNames; } ////////// WindowsAudioInputDevice implementation ////////// WindowsAudioInputDevice ::WindowsAudioInputDevice(UsageEnvironment& env, int inputPortNumber, unsigned char bitsPerSample, unsigned char numChannels, unsigned samplingFrequency, unsigned granularityInMS, Boolean& success) : WindowsAudioInputDevice_common(env, inputPortNumber, bitsPerSample, numChannels, samplingFrequency, granularityInMS) { success = initialSetInputPort(inputPortNumber); } WindowsAudioInputDevice::~WindowsAudioInputDevice() { if (fCurPortIndex >= 0) ourAudioInputPorts[fCurPortIndex].close(); delete[] ourAudioInputPorts; ourAudioInputPorts = NULL; numAudioInputPorts = 0; } void WindowsAudioInputDevice::initializeIfNecessary() { if (ourAudioInputPorts != NULL) return; // we've already been initialized numAudioInputPorts = waveInGetNumDevs(); ourAudioInputPorts = new AudioInputPort[numAudioInputPorts]; // Initialize each audio input port for (unsigned i = 0; i < numAudioInputPorts; ++i) { AudioInputPort& port = ourAudioInputPorts[i]; port.index = i; port.open(); // to set the port name port.close(); } } Boolean WindowsAudioInputDevice::setInputPort(int portIndex) { initializeIfNecessary(); if (portIndex < 0 || portIndex >= (int)numAudioInputPorts) { // bad index envir().setResultMsg("Bad input port index\n"); return False; } // Check that this port is allowed: if (allowedDeviceNames != NULL) { int i; for (i = 0; allowedDeviceNames[i] != NULL; ++i) { if (strncmp(ourAudioInputPorts[portIndex].name, allowedDeviceNames[i], strlen(allowedDeviceNames[i])) == 0) { // The allowed device name is a prefix of this port's name break; // this port is allowed } } if (allowedDeviceNames[i] == NULL) { // this port is not on the allowed list envir().setResultMsg("Access to this audio device is not allowed\n"); return False; } } if (portIndex != fCurPortIndex) { // The port has changed, so close the old one and open the new one: if (fCurPortIndex >= 0) ourAudioInputPorts[fCurPortIndex].close();; fCurPortIndex = portIndex; ourAudioInputPorts[fCurPortIndex].open(fNumChannels, fSamplingFrequency, fGranularityInMS); } fCurPortIndex = portIndex; return True; } unsigned WindowsAudioInputDevice::numAudioInputPorts = 0; AudioInputPort* WindowsAudioInputDevice::ourAudioInputPorts = NULL; ////////// AudioInputPort implementation ////////// void AudioInputPort::open(unsigned numChannels, unsigned samplingFrequency, unsigned granularityInMS) { do { // Get the port name: WAVEINCAPS wic; if (waveInGetDevCaps(index, &wic, sizeof wic) != MMSYSERR_NOERROR) { name[0] = '\0'; break; } #ifdef UNICODE // Copy the mixer name: wcstombs(name, wic.szPname, MAXPNAMELEN); #else strncpy(name, wic.szPname, MAXPNAMELEN); #endif if (!WindowsAudioInputDevice_common::openWavInPort(index, numChannels, samplingFrequency, granularityInMS)) break; return; } while (0); // An error occurred: close(); } void AudioInputPort::open() { open(1, 8000, 20); } void AudioInputPort::close() { WindowsAudioInputDevice_common::waveIn_close(); } live/WindowsAudioInputDevice/showAudioInputPorts.cpp000444 001751 000000 00000002507 12265042432 023235 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // Copyright (c) 1996-2014, Live Networks, Inc. All rights reserved // A program that prints out this computer's audio input ports #include "AudioInputDevice.hh" #include int main(int argc, char** argv) { AudioPortNames* portNames = AudioInputDevice::getPortNames(); if (portNames == NULL) { fprintf(stderr, "AudioInputDevice::getPortNames() failed!\n"); exit(1); } printf("%d available audio input ports:\n", portNames->numPorts); for (unsigned i = 0; i < portNames->numPorts; ++i) { printf("%d\t%s\n", i, portNames->portName[i]); } return 0; } live/WindowsAudioInputDevice/WindowsAudioInputDevice_common.hh000444 001751 000000 00000006014 12265042432 025161 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // Windows implementation of a generic audio input device // Base class for both library versions: // One that uses Windows' built-in software mixer; another that doesn't. // C++ header #ifndef _WINDOWS_AUDIO_INPUT_DEVICE_COMMON_HH #define _WINDOWS_AUDIO_INPUT_DEVICE_COMMON_HH #ifndef _AUDIO_INPUT_DEVICE_HH #include "AudioInputDevice.hh" #endif class WindowsAudioInputDevice_common: public AudioInputDevice { public: static Boolean openWavInPort(int index, unsigned numChannels, unsigned samplingFrequency, unsigned granularityInMS); static void waveIn_close(); static void waveInProc(WAVEHDR* hdr); // Windows audio callback function protected: WindowsAudioInputDevice_common(UsageEnvironment& env, int inputPortNumber, unsigned char bitsPerSample, unsigned char numChannels, unsigned samplingFrequency, unsigned granularityInMS); // virtual base class virtual ~WindowsAudioInputDevice_common(); Boolean initialSetInputPort(int portIndex); protected: int fCurPortIndex; private: // redefined virtual functions: virtual void doGetNextFrame(); virtual void doStopGettingFrames(); virtual double getAverageLevel() const; private: static void audioReadyPoller(void* clientData); void audioReadyPoller1(); void onceAudioIsReady(); // Audio input buffering: static Boolean waveIn_open(unsigned uid, WAVEFORMATEX& wfx); static void waveIn_reset(); // used to implement both of the above static unsigned readFromBuffers(unsigned char* to, unsigned numBytesWanted, struct timeval& creationTime); static void releaseHeadBuffer(); // from the input header queue private: static unsigned _bitsPerSample; static HWAVEIN shWaveIn; static unsigned blockSize, numBlocks; static unsigned char* readData; // buffer for incoming audio data static DWORD bytesUsedAtReadHead; // number of bytes that have already been read at head static double uSecsPerByte; // used to adjust the time for # bytes consumed since arrival static double averageLevel; static WAVEHDR *readHdrs, *readHead, *readTail; // input header queue static struct timeval* readTimes; static HANDLE hAudioReady; // audio ready event Boolean fHaveStarted; unsigned fTotalPollingDelay; // uSeconds }; #endif live/WindowsAudioInputDevice/WindowsAudioInputDevice_mixer.hh000444 001751 000000 00000003655 12265042432 025025 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // Windows implementation of a generic audio input device // This version uses Windows' built-in software mixer. // C++ header // // To use this, call "AudioInputDevice::createNew()". // You can also call "AudioInputDevice::getPortNames()" to get a list // of port names. #ifndef _WINDOWS_AUDIO_INPUT_DEVICE_MIXER_HH #define _WINDOWS_AUDIO_INPUT_DEVICE_MIXER_HH #ifndef _WINDOWS_AUDIO_INPUT_DEVICE_COMMON_HH #include "WindowsAudioInputDevice_common.hh" #endif class WindowsAudioInputDevice: public WindowsAudioInputDevice_common { private: friend class AudioInputDevice; WindowsAudioInputDevice(UsageEnvironment& env, int inputPortNumber, unsigned char bitsPerSample, unsigned char numChannels, unsigned samplingFrequency, unsigned granularityInMS, Boolean& success); // called only by createNew() virtual ~WindowsAudioInputDevice(); static void initializeIfNecessary(); private: // redefined virtual functions: virtual Boolean setInputPort(int portIndex); private: static unsigned numMixers; static class Mixer* ourMixers; static unsigned numInputPortsTotal; int fCurMixerId; }; #endif live/WindowsAudioInputDevice/WindowsAudioInputDevice.mak000444 001751 000000 00000011000 12265042432 023751 0ustar00rsfwheel000000 000000 INCLUDES = -I../UsageEnvironment/include -I../groupsock/include -I../liveMedia/include ##### Change the following for your environment: # Comment out the following line to produce Makefiles that generate debuggable code: NODEBUG=1 # The following definition ensures that we are properly matching # the WinSock2 library file with the correct header files. # (will link with "ws2_32.lib" and include "winsock2.h" & "Ws2tcpip.h") TARGETOS = WINNT # If for some reason you wish to use WinSock1 instead, uncomment the # following two definitions. # (will link with "wsock32.lib" and include "winsock.h") #TARGETOS = WIN95 #APPVER = 4.0 !include UI_OPTS = $(guilflags) $(guilibsdll) # Use the following to get a console (e.g., for debugging): CONSOLE_UI_OPTS = $(conlflags) $(conlibsdll) CPU=i386 TOOLS32 = I:\Program Files\DevStudio\Vc COMPILE_OPTS = $(INCLUDES) $(cdebug) $(cflags) $(cvarsdll) -I. -I"$(TOOLS32)\include" C = c C_COMPILER = "$(TOOLS32)\bin\cl" C_FLAGS = $(COMPILE_OPTS) CPP = cpp CPLUSPLUS_COMPILER = $(C_COMPILER) CPLUSPLUS_FLAGS = $(COMPILE_OPTS) OBJ = obj LINK = $(link) -out: LIBRARY_LINK = lib -out: LINK_OPTS_0 = $(linkdebug) msvcirt.lib LIBRARY_LINK_OPTS = LINK_OPTS = $(LINK_OPTS_0) $(UI_OPTS) CONSOLE_LINK_OPTS = $(LINK_OPTS_0) $(CONSOLE_UI_OPTS) SERVICE_LINK_OPTS = kernel32.lib advapi32.lib shell32.lib -subsystem:console,$(APPVER) LIB_SUFFIX = lib LIBS_FOR_CONSOLE_APPLICATION = LIBS_FOR_GUI_APPLICATION = MULTIMEDIA_LIBS = winmm.lib EXE = .exe rc32 = "$(TOOLS32)\bin\rc" .rc.res: $(rc32) $< ##### End of variables to change WINDOWSAUDIOINPUTDEVICE_NOMIXER_LIB = libWindowsAudioInputDevice_noMixer.$(LIB_SUFFIX) WINDOWSAUDIOINPUTDEVICE_MIXER_LIB = libWindowsAudioInputDevice_mixer.$(LIB_SUFFIX) ALL = $(WINDOWSAUDIOINPUTDEVICE_NOMIXER_LIB) $(WINDOWSAUDIOINPUTDEVICE_MIXER_LIB) \ showAudioInputPorts_noMixer$(EXE) showAudioInputPorts_mixer$(EXE) all:: $(ALL) .$(C).$(OBJ): $(C_COMPILER) -c $(C_FLAGS) $< .$(CPP).$(OBJ): $(CPLUSPLUS_COMPILER) -c $(CPLUSPLUS_FLAGS) $< WINDOWSAUDIOINPUTDEVICE_NOMIXER_LIB_OBJS = WindowsAudioInputDevice_common.$(OBJ) WindowsAudioInputDevice_noMixer.$(OBJ) WINDOWSAUDIOINPUTDEVICE_MIXER_LIB_OBJS = WindowsAudioInputDevice_common.$(OBJ) WindowsAudioInputDevice_mixer.$(OBJ) WindowsAudioInputDevice_common.$(CPP): WindowsAudioInputDevice_common.hh WindowsAudioInputDevice_noMixer.$(CPP): WindowsAudioInputDevice_noMixer.hh WindowsAudioInputDevice_noMixer.hh: WindowsAudioInputDevice_common.hh WindowsAudioInputDevice_mixer.$(CPP): WindowsAudioInputDevice_mixer.hh WindowsAudioInputDevice_mixer.hh: WindowsAudioInputDevice_common.hh $(WINDOWSAUDIOINPUTDEVICE_NOMIXER_LIB): $(WINDOWSAUDIOINPUTDEVICE_NOMIXER_LIB_OBJS) \ $(PLATFORM_SPECIFIC_LIB_OBJS) $(LIBRARY_LINK)$@ $(LIBRARY_LINK_OPTS) \ $(WINDOWSAUDIOINPUTDEVICE_NOMIXER_LIB_OBJS) $(WINDOWSAUDIOINPUTDEVICE_MIXER_LIB): $(WINDOWSAUDIOINPUTDEVICE_MIXER_LIB_OBJS) \ $(PLATFORM_SPECIFIC_LIB_OBJS) $(LIBRARY_LINK)$@ $(LIBRARY_LINK_OPTS) \ $(WINDOWSAUDIOINPUTDEVICE_MIXER_LIB_OBJS) USAGE_ENVIRONMENT_DIR = ../UsageEnvironment USAGE_ENVIRONMENT_LIB = $(USAGE_ENVIRONMENT_DIR)/libUsageEnvironment.$(LIB_SUFFIX) BASIC_USAGE_ENVIRONMENT_DIR = ../BasicUsageEnvironment BASIC_USAGE_ENVIRONMENT_LIB = $(BASIC_USAGE_ENVIRONMENT_DIR)/libBasicUsageEnvironment.$(LIB_SUFFIX) LIVEMEDIA_DIR = ../liveMedia LIVEMEDIA_LIB = $(LIVEMEDIA_DIR)/libliveMedia.$(LIB_SUFFIX) GROUPSOCK_DIR = ../groupsock GROUPSOCK_LIB = $(GROUPSOCK_DIR)/libgroupsock.$(LIB_SUFFIX) LOCAL_LIBS = $(LIVEMEDIA_LIB) $(GROUPSOCK_LIB) \ $(USAGE_ENVIRONMENT_LIB) $(BASIC_USAGE_ENVIRONMENT_LIB) LOCAL_LIBS_NOMIXER = $(WINDOWSAUDIOINPUTDEVICE_NOMIXER_LIB) $(LOCAL_LIBS) LOCAL_LIBS_MIXER = $(WINDOWSAUDIOINPUTDEVICE_MIXER_LIB) $(LOCAL_LIBS) MULTIMEDIA_LIBS = winmm.lib LIBS_NOMIXER = $(LOCAL_LIBS_NOMIXER) $(LIBS_FOR_CONSOLE_APPLICATION) $(MULTIMEDIA_LIBS) LIBS_MIXER = $(LOCAL_LIBS_MIXER) $(LIBS_FOR_CONSOLE_APPLICATION) $(MULTIMEDIA_LIBS) SHOW_AUDIO_INPUT_PORTS_OBJS = showAudioInputPorts.$(OBJ) showAudioInputPorts_noMixer$(EXE): $(SHOW_AUDIO_INPUT_PORTS_OBJS) $(LOCAL_LIBS_NOMIXER) $(LINK)$@ $(CONSOLE_LINK_OPTS) $(SHOW_AUDIO_INPUT_PORTS_OBJS) $(LIBS_NOMIXER) showAudioInputPorts_mixer$(EXE): $(SHOW_AUDIO_INPUT_PORTS_OBJS) $(LOCAL_LIBS_MIXER) $(LINK)$@ $(CONSOLE_LINK_OPTS) $(SHOW_AUDIO_INPUT_PORTS_OBJS) $(LIBS_MIXER) clean: -rm -rf *.$(OBJ) $(ALL) tcl2array$(EXE) core *.core *~ -rm -rf $(TCL_EMBEDDED_CPLUSPLUS_FILES) $(TK_EMBEDDED_CPLUSPLUS_FILES) $(MISC_EMBEDDED_CPLUSPLUS_FILES) ##### Any additional, platform-specific rules come here: live/testProgs/registerRTSPStream.cpp000400 001751 000000 00000006760 12265042432 020147 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // Copyright (c) 1996-2014, Live Networks, Inc. All rights reserved // A demonstration application that uses our custom RTSP "REGISTER" command to register a stream // (given by "rtsp://" URL) with a RTSP client or proxy server // splits it into Audio (AC3) and Video (MPEG) Elementary Streams, // and streams both using RTP. // main program #include "liveMedia.hh" #include "BasicUsageEnvironment.hh" char const* programName; UsageEnvironment* env; Boolean requestStreamingViaTCP = False; char const* username = NULL; char const* password = NULL; void registerResponseHandler(RTSPClient* rtspClient, int resultCode, char* resultString) { Medium::close(rtspClient); // We're done: exit(0); } void usage() { *env << "usage: " << programName << " [-t] [-u ] " " " " [proxy-URL-suffix]\n"; exit(1); } int main(int argc, char const** argv) { // Begin by setting up our usage environment: TaskScheduler* scheduler = BasicTaskScheduler::createNew(); env = BasicUsageEnvironment::createNew(*scheduler); // Parse command-line options and arguments: // (Unfortunately we can't use getopt() here; Windoze doesn't have it) programName = argv[0]; while (argc > 2) { char const* const opt = argv[1]; if (opt[0] != '-') break; switch (opt[1]) { case 't': { // ask the remote client to access the stream via TCP instead of UDP requestStreamingViaTCP = True; break; } case 'u': { // specify a username and password if (argc < 4) usage(); // there's no argv[3] (for the "password") username = argv[2]; password = argv[3]; argv+=2; argc-=2; break; } default: { usage(); break; } } ++argv; --argc; } if (argc != 4 && argc != 5) usage(); char const* remoteClientNameOrAddress = argv[1]; portNumBits remoteClientPortNum; if (sscanf(argv[2], "%hu", &remoteClientPortNum) != 1 || remoteClientPortNum == 0 || remoteClientPortNum == 0xFFFF) usage(); char const* rtspURLToRegister = argv[3]; char const* proxyURLSuffix = argc == 5 ? argv[4] : NULL; Authenticator* ourAuthenticator = username == NULL ? NULL : new Authenticator(username, password); // We have the command-line arguments. Send the command: RTSPRegisterSender::createNew(*env, remoteClientNameOrAddress, remoteClientPortNum, rtspURLToRegister, registerResponseHandler, ourAuthenticator, requestStreamingViaTCP, proxyURLSuffix, False/*reuseConnection*/, 1/*verbosityLevel*/, programName); // Note: This object will be deleted later, by the response handler env->taskScheduler().doEventLoop(); // does not return return 0; // only to prevent compiler warning } live/testProgs/testMP3Streamer.cpp000400 001751 000000 00000015252 12265042432 017434 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // Copyright (c) 1996-2014, Live Networks, Inc. All rights reserved // A test program that streams a MP3 file via RTP/RTCP // main program #include "liveMedia.hh" #include "GroupsockHelper.hh" #include "BasicUsageEnvironment.hh" // To stream using 'ADUs' rather than raw MP3 frames, uncomment the following: //#define STREAM_USING_ADUS 1 // To also reorder ADUs before streaming, uncomment the following: //#define INTERLEAVE_ADUS 1 // (For more information about ADUs and interleaving, // see ) // To stream using "source-specific multicast" (SSM), uncomment the following: //#define USE_SSM 1 #ifdef USE_SSM Boolean const isSSM = True; #else Boolean const isSSM = False; #endif // To set up an internal RTSP server, uncomment the following: //#define IMPLEMENT_RTSP_SERVER 1 // (Note that this RTSP server works for multicast only) #ifdef IMPLEMENT_RTSP_SERVER RTSPServer* rtspServer; #endif UsageEnvironment* env; // A structure to hold the state of the current session. // It is used in the "afterPlaying()" function to clean up the session. struct sessionState_t { FramedSource* source; RTPSink* sink; RTCPInstance* rtcpInstance; Groupsock* rtpGroupsock; Groupsock* rtcpGroupsock; } sessionState; char const* inputFileName = "test.mp3"; void play(); // forward int main(int argc, char** argv) { // Begin by setting up our usage environment: TaskScheduler* scheduler = BasicTaskScheduler::createNew(); env = BasicUsageEnvironment::createNew(*scheduler); // Create 'groupsocks' for RTP and RTCP: char const* destinationAddressStr #ifdef USE_SSM = "232.255.42.42"; #else = "239.255.42.42"; // Note: This is a multicast address. If you wish to stream using // unicast instead, then replace this string with the unicast address // of the (single) destination. (You may also need to make a similar // change to the receiver program.) #endif const unsigned short rtpPortNum = 6666; const unsigned short rtcpPortNum = rtpPortNum+1; const unsigned char ttl = 1; // low, in case routers don't admin scope struct in_addr destinationAddress; destinationAddress.s_addr = our_inet_addr(destinationAddressStr); const Port rtpPort(rtpPortNum); const Port rtcpPort(rtcpPortNum); sessionState.rtpGroupsock = new Groupsock(*env, destinationAddress, rtpPort, ttl); sessionState.rtcpGroupsock = new Groupsock(*env, destinationAddress, rtcpPort, ttl); #ifdef USE_SSM sessionState.rtpGroupsock->multicastSendOnly(); sessionState.rtcpGroupsock->multicastSendOnly(); #endif // Create a 'MP3 RTP' sink from the RTP 'groupsock': #ifdef STREAM_USING_ADUS unsigned char rtpPayloadFormat = 96; // A dynamic payload format code sessionState.sink = MP3ADURTPSink::createNew(*env, sessionState.rtpGroupsock, rtpPayloadFormat); #else sessionState.sink = MPEG1or2AudioRTPSink::createNew(*env, sessionState.rtpGroupsock); #endif // Create (and start) a 'RTCP instance' for this RTP sink: const unsigned estimatedSessionBandwidth = 160; // in kbps; for RTCP b/w share const unsigned maxCNAMElen = 100; unsigned char CNAME[maxCNAMElen+1]; gethostname((char*)CNAME, maxCNAMElen); CNAME[maxCNAMElen] = '\0'; // just in case sessionState.rtcpInstance = RTCPInstance::createNew(*env, sessionState.rtcpGroupsock, estimatedSessionBandwidth, CNAME, sessionState.sink, NULL /* we're a server */, isSSM); // Note: This starts RTCP running automatically #ifdef IMPLEMENT_RTSP_SERVER rtspServer = RTSPServer::createNew(*env); // Note that this (attempts to) start a server on the default RTSP server // port: 554. To use a different port number, add it as an extra // (optional) parameter to the "RTSPServer::createNew()" call above. if (rtspServer == NULL) { *env << "Failed to create RTSP server: " << env->getResultMsg() << "\n"; exit(1); } ServerMediaSession* sms = ServerMediaSession::createNew(*env, "testStream", inputFileName, "Session streamed by \"testMP3Streamer\"", isSSM); sms->addSubsession(PassiveServerMediaSubsession::createNew(*sessionState.sink, sessionState.rtcpInstance)); rtspServer->addServerMediaSession(sms); char* url = rtspServer->rtspURL(sms); *env << "Play this stream using the URL \"" << url << "\"\n"; delete[] url; #endif play(); env->taskScheduler().doEventLoop(); // does not return return 0; // only to prevent compiler warning } void afterPlaying(void* clientData); // forward void play() { // Open the file as a 'MP3 file source': sessionState.source = MP3FileSource::createNew(*env, inputFileName); if (sessionState.source == NULL) { *env << "Unable to open file \"" << inputFileName << "\" as a MP3 file source\n"; exit(1); } #ifdef STREAM_USING_ADUS // Add a filter that converts the source MP3s to ADUs: sessionState.source = ADUFromMP3Source::createNew(*env, sessionState.source); if (sessionState.source == NULL) { *env << "Unable to create a MP3->ADU filter for the source\n"; exit(1); } #ifdef INTERLEAVE_ADUS // Add another filter that interleaves the ADUs before packetizing them: unsigned char interleaveCycle[] = {0,2,1,3}; // or choose your own order... unsigned const interleaveCycleSize = (sizeof interleaveCycle)/(sizeof (unsigned char)); Interleaving interleaving(interleaveCycleSize, interleaveCycle); sessionState.source = MP3ADUinterleaver::createNew(*env, interleaving, sessionState.source); if (sessionState.source == NULL) { *env << "Unable to create an ADU interleaving filter for the source\n"; exit(1); } #endif #endif // Finally, start the streaming: *env << "Beginning streaming...\n"; sessionState.sink->startPlaying(*sessionState.source, afterPlaying, NULL); } void afterPlaying(void* /*clientData*/) { *env << "...done streaming\n"; sessionState.sink->stopPlaying(); // End this loop by closing the current source: Medium::close(sessionState.source); // And start another loop: play(); } live/testProgs/Makefile.tail000444 001751 000000 00000017560 12265042432 016332 0ustar00rsfwheel000000 000000 ##### End of variables to change MULTICAST_STREAMER_APPS = testMP3Streamer$(EXE) testMPEG1or2VideoStreamer$(EXE) testMPEG1or2AudioVideoStreamer$(EXE) testMPEG2TransportStreamer$(EXE) testMPEG4VideoStreamer$(EXE) testH264VideoStreamer$(EXE) testH265VideoStreamer$(EXE) testDVVideoStreamer$(EXE) testWAVAudioStreamer$(EXE) testAMRAudioStreamer$(EXE) vobStreamer$(EXE) MULTICAST_RECEIVER_APPS = testMP3Receiver$(EXE) testMPEG1or2VideoReceiver$(EXE) testMPEG2TransportReceiver$(EXE) sapWatch$(EXE) MULTICAST_MISC_APPS = testRelay$(EXE) testReplicator$(EXE) MULTICAST_APPS = $(MULTICAST_STREAMER_APPS) $(MULTICAST_RECEIVER_APPS) $(MULTICAST_MISC_APPS) UNICAST_STREAMER_APPS = testOnDemandRTSPServer$(EXE) UNICAST_RECEIVER_APPS = testRTSPClient$(EXE) openRTSP$(EXE) playSIP$(EXE) UNICAST_APPS = $(UNICAST_STREAMER_APPS) $(UNICAST_RECEIVER_APPS) MISC_APPS = testMPEG1or2Splitter$(EXE) testMPEG1or2ProgramToTransportStream$(EXE) testH264VideoToTransportStream$(EXE) testH265VideoToTransportStream$(EXE) MPEG2TransportStreamIndexer$(EXE) testMPEG2TransportStreamTrickPlay$(EXE) registerRTSPStream$(EXE) PREFIX = /usr/local ALL = $(MULTICAST_APPS) $(UNICAST_APPS) $(MISC_APPS) all: $(ALL) extra: testGSMStreamer$(EXE) .$(C).$(OBJ): $(C_COMPILER) -c $(C_FLAGS) $< .$(CPP).$(OBJ): $(CPLUSPLUS_COMPILER) -c $(CPLUSPLUS_FLAGS) $< MP3_STREAMER_OBJS = testMP3Streamer.$(OBJ) MP3_RECEIVER_OBJS = testMP3Receiver.$(OBJ) RELAY_OBJS = testRelay.$(OBJ) REPLICATOR_OBJS = testReplicator.$(OBJ) MPEG_1OR2_SPLITTER_OBJS = testMPEG1or2Splitter.$(OBJ) MPEG_1OR2_VIDEO_STREAMER_OBJS = testMPEG1or2VideoStreamer.$(OBJ) MPEG_1OR2_VIDEO_RECEIVER_OBJS = testMPEG1or2VideoReceiver.$(OBJ) MPEG2_TRANSPORT_RECEIVER_OBJS = testMPEG2TransportReceiver.$(OBJ) MPEG_1OR2_AUDIO_VIDEO_STREAMER_OBJS = testMPEG1or2AudioVideoStreamer.$(OBJ) MPEG2_TRANSPORT_STREAMER_OBJS = testMPEG2TransportStreamer.$(OBJ) MPEG4_VIDEO_STREAMER_OBJS = testMPEG4VideoStreamer.$(OBJ) H264_VIDEO_STREAMER_OBJS = testH264VideoStreamer.$(OBJ) H265_VIDEO_STREAMER_OBJS = testH265VideoStreamer.$(OBJ) DV_VIDEO_STREAMER_OBJS = testDVVideoStreamer.$(OBJ) WAV_AUDIO_STREAMER_OBJS = testWAVAudioStreamer.$(OBJ) AMR_AUDIO_STREAMER_OBJS = testAMRAudioStreamer.$(OBJ) ON_DEMAND_RTSP_SERVER_OBJS = testOnDemandRTSPServer.$(OBJ) VOB_STREAMER_OBJS = vobStreamer.$(OBJ) TEST_RTSP_CLIENT_OBJS = testRTSPClient.$(OBJ) OPEN_RTSP_OBJS = openRTSP.$(OBJ) playCommon.$(OBJ) PLAY_SIP_OBJS = playSIP.$(OBJ) playCommon.$(OBJ) SAP_WATCH_OBJS = sapWatch.$(OBJ) MPEG_1OR2_PROGRAM_TO_TRANSPORT_STREAM_OBJS = testMPEG1or2ProgramToTransportStream.$(OBJ) H264_VIDEO_TO_TRANSPORT_STREAM_OBJS = testH264VideoToTransportStream.$(OBJ) H265_VIDEO_TO_TRANSPORT_STREAM_OBJS = testH265VideoToTransportStream.$(OBJ) MPEG2_TRANSPORT_STREAM_INDEXER_OBJS = MPEG2TransportStreamIndexer.$(OBJ) MPEG2_TRANSPORT_STREAM_TRICK_PLAY_OBJS = testMPEG2TransportStreamTrickPlay.$(OBJ) REGISTER_RTSP_STREAM_OBJS = registerRTSPStream.$(OBJ) GSM_STREAMER_OBJS = testGSMStreamer.$(OBJ) testGSMEncoder.$(OBJ) openRTSP.$(CPP): playCommon.hh playCommon.$(CPP): playCommon.hh playSIP.$(CPP): playCommon.hh USAGE_ENVIRONMENT_DIR = ../UsageEnvironment USAGE_ENVIRONMENT_LIB = $(USAGE_ENVIRONMENT_DIR)/libUsageEnvironment.$(libUsageEnvironment_LIB_SUFFIX) BASIC_USAGE_ENVIRONMENT_DIR = ../BasicUsageEnvironment BASIC_USAGE_ENVIRONMENT_LIB = $(BASIC_USAGE_ENVIRONMENT_DIR)/libBasicUsageEnvironment.$(libBasicUsageEnvironment_LIB_SUFFIX) LIVEMEDIA_DIR = ../liveMedia LIVEMEDIA_LIB = $(LIVEMEDIA_DIR)/libliveMedia.$(libliveMedia_LIB_SUFFIX) GROUPSOCK_DIR = ../groupsock GROUPSOCK_LIB = $(GROUPSOCK_DIR)/libgroupsock.$(libgroupsock_LIB_SUFFIX) LOCAL_LIBS = $(LIVEMEDIA_LIB) $(GROUPSOCK_LIB) \ $(BASIC_USAGE_ENVIRONMENT_LIB) $(USAGE_ENVIRONMENT_LIB) LIBS = $(LOCAL_LIBS) $(LIBS_FOR_CONSOLE_APPLICATION) testMP3Streamer$(EXE): $(MP3_STREAMER_OBJS) $(LOCAL_LIBS) $(LINK)$@ $(CONSOLE_LINK_OPTS) $(MP3_STREAMER_OBJS) $(LIBS) testMP3Receiver$(EXE): $(MP3_RECEIVER_OBJS) $(LOCAL_LIBS) $(LINK)$@ $(CONSOLE_LINK_OPTS) $(MP3_RECEIVER_OBJS) $(LIBS) testRelay$(EXE): $(RELAY_OBJS) $(LOCAL_LIBS) $(LINK)$@ $(CONSOLE_LINK_OPTS) $(RELAY_OBJS) $(LIBS) testReplicator$(EXE): $(REPLICATOR_OBJS) $(LOCAL_LIBS) $(LINK)$@ $(CONSOLE_LINK_OPTS) $(REPLICATOR_OBJS) $(LIBS) testMPEG1or2Splitter$(EXE): $(MPEG_1OR2_SPLITTER_OBJS) $(LOCAL_LIBS) $(LINK)$@ $(CONSOLE_LINK_OPTS) $(MPEG_1OR2_SPLITTER_OBJS) $(LIBS) testMPEG1or2VideoStreamer$(EXE): $(MPEG_1OR2_VIDEO_STREAMER_OBJS) $(LOCAL_LIBS) $(LINK)$@ $(CONSOLE_LINK_OPTS) $(MPEG_1OR2_VIDEO_STREAMER_OBJS) $(LIBS) testMPEG1or2VideoReceiver$(EXE): $(MPEG_1OR2_VIDEO_RECEIVER_OBJS) $(LOCAL_LIBS) $(LINK)$@ $(CONSOLE_LINK_OPTS) $(MPEG_1OR2_VIDEO_RECEIVER_OBJS) $(LIBS) testMPEG1or2AudioVideoStreamer$(EXE): $(MPEG_1OR2_AUDIO_VIDEO_STREAMER_OBJS) $(LOCAL_LIBS) $(LINK)$@ $(CONSOLE_LINK_OPTS) $(MPEG_1OR2_AUDIO_VIDEO_STREAMER_OBJS) $(LIBS) testMPEG2TransportStreamer$(EXE): $(MPEG2_TRANSPORT_STREAMER_OBJS) $(LOCAL_LIBS) $(LINK)$@ $(CONSOLE_LINK_OPTS) $(MPEG2_TRANSPORT_STREAMER_OBJS) $(LIBS) testMPEG2TransportReceiver$(EXE): $(MPEG2_TRANSPORT_RECEIVER_OBJS) $(LOCAL_LIBS) $(LINK)$@ $(CONSOLE_LINK_OPTS) $(MPEG2_TRANSPORT_RECEIVER_OBJS) $(LIBS) testMPEG4VideoStreamer$(EXE): $(MPEG4_VIDEO_STREAMER_OBJS) $(LOCAL_LIBS) $(LINK)$@ $(CONSOLE_LINK_OPTS) $(MPEG4_VIDEO_STREAMER_OBJS) $(LIBS) testH264VideoStreamer$(EXE): $(H264_VIDEO_STREAMER_OBJS) $(LOCAL_LIBS) $(LINK)$@ $(CONSOLE_LINK_OPTS) $(H264_VIDEO_STREAMER_OBJS) $(LIBS) testH265VideoStreamer$(EXE): $(H265_VIDEO_STREAMER_OBJS) $(LOCAL_LIBS) $(LINK)$@ $(CONSOLE_LINK_OPTS) $(H265_VIDEO_STREAMER_OBJS) $(LIBS) testDVVideoStreamer$(EXE): $(DV_VIDEO_STREAMER_OBJS) $(LOCAL_LIBS) $(LINK)$@ $(CONSOLE_LINK_OPTS) $(DV_VIDEO_STREAMER_OBJS) $(LIBS) testWAVAudioStreamer$(EXE): $(WAV_AUDIO_STREAMER_OBJS) $(LOCAL_LIBS) $(LINK)$@ $(CONSOLE_LINK_OPTS) $(WAV_AUDIO_STREAMER_OBJS) $(LIBS) testAMRAudioStreamer$(EXE): $(AMR_AUDIO_STREAMER_OBJS) $(LOCAL_LIBS) $(LINK)$@ $(CONSOLE_LINK_OPTS) $(AMR_AUDIO_STREAMER_OBJS) $(LIBS) testOnDemandRTSPServer$(EXE): $(ON_DEMAND_RTSP_SERVER_OBJS) $(LOCAL_LIBS) $(LINK)$@ $(CONSOLE_LINK_OPTS) $(ON_DEMAND_RTSP_SERVER_OBJS) $(LIBS) vobStreamer$(EXE): $(VOB_STREAMER_OBJS) $(LOCAL_LIBS) $(LINK)$@ $(CONSOLE_LINK_OPTS) $(VOB_STREAMER_OBJS) $(LIBS) testRTSPClient$(EXE): $(TEST_RTSP_CLIENT_OBJS) $(LOCAL_LIBS) $(LINK)$@ $(CONSOLE_LINK_OPTS) $(TEST_RTSP_CLIENT_OBJS) $(LIBS) openRTSP$(EXE): $(OPEN_RTSP_OBJS) $(LOCAL_LIBS) $(LINK)$@ $(CONSOLE_LINK_OPTS) $(OPEN_RTSP_OBJS) $(LIBS) playSIP$(EXE): $(PLAY_SIP_OBJS) $(LOCAL_LIBS) $(LINK)$@ $(CONSOLE_LINK_OPTS) $(PLAY_SIP_OBJS) $(LIBS) sapWatch$(EXE): $(SAP_WATCH_OBJS) $(LOCAL_LIBS) $(LINK)$@ $(CONSOLE_LINK_OPTS) $(SAP_WATCH_OBJS) $(LIBS) testMPEG1or2ProgramToTransportStream$(EXE): $(MPEG_1OR2_PROGRAM_TO_TRANSPORT_STREAM_OBJS) $(LOCAL_LIBS) $(LINK)$@ $(CONSOLE_LINK_OPTS) $(MPEG_1OR2_PROGRAM_TO_TRANSPORT_STREAM_OBJS) $(LIBS) testH264VideoToTransportStream$(EXE): $(H264_VIDEO_TO_TRANSPORT_STREAM_OBJS) $(LOCAL_LIBS) $(LINK)$@ $(CONSOLE_LINK_OPTS) $(H264_VIDEO_TO_TRANSPORT_STREAM_OBJS) $(LIBS) testH265VideoToTransportStream$(EXE): $(H265_VIDEO_TO_TRANSPORT_STREAM_OBJS) $(LOCAL_LIBS) $(LINK)$@ $(CONSOLE_LINK_OPTS) $(H265_VIDEO_TO_TRANSPORT_STREAM_OBJS) $(LIBS) MPEG2TransportStreamIndexer$(EXE): $(MPEG2_TRANSPORT_STREAM_INDEXER_OBJS) $(LOCAL_LIBS) $(LINK)$@ $(CONSOLE_LINK_OPTS) $(MPEG2_TRANSPORT_STREAM_INDEXER_OBJS) $(LIBS) testMPEG2TransportStreamTrickPlay$(EXE): $(MPEG2_TRANSPORT_STREAM_TRICK_PLAY_OBJS) $(LOCAL_LIBS) $(LINK)$@ $(CONSOLE_LINK_OPTS) $(MPEG2_TRANSPORT_STREAM_TRICK_PLAY_OBJS) $(LIBS) registerRTSPStream$(EXE): $(REGISTER_RTSP_STREAM_OBJS) $(LOCAL_LIBS) $(LINK)$@ $(CONSOLE_LINK_OPTS) $(REGISTER_RTSP_STREAM_OBJS) $(LIBS) testGSMStreamer$(EXE): $(GSM_STREAMER_OBJS) $(LOCAL_LIBS) $(LINK)$@ $(CONSOLE_LINK_OPTS) $(GSM_STREAMER_OBJS) $(LIBS) clean: -rm -rf *.$(OBJ) $(ALL) core *.core *~ include/*~ install: $(ALL) install -d $(DESTDIR)$(PREFIX)/bin install -m 755 $(ALL) $(DESTDIR)$(PREFIX)/bin ##### Any additional, platform-specific rules come here: live/testProgs/playSIP.cpp000444 001751 000000 00000015314 12265042432 015762 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // Copyright (c) 1996-2014, Live Networks, Inc. All rights reserved // A SIP client test program that opens a SIP URL argument, // and extracts the data from each incoming RTP stream. #include "playCommon.hh" #include "SIPClient.hh" static char* getLine(char* startOfLine) { // returns the start of the next line, or NULL if none for (char* ptr = startOfLine; *ptr != '\0'; ++ptr) { if (*ptr == '\r' || *ptr == '\n') { // We found the end of the line *ptr++ = '\0'; if (*ptr == '\n') ++ptr; return ptr; } } return NULL; } SIPClient* ourSIPClient = NULL; Medium* createClient(UsageEnvironment& env, char const* /*url*/, int verbosityLevel, char const* applicationName) { // First, trim any directory prefixes from "applicationName": char const* suffix = &applicationName[strlen(applicationName)]; while (suffix != applicationName) { if (*suffix == '/' || *suffix == '\\') { applicationName = ++suffix; break; } --suffix; } extern unsigned char desiredAudioRTPPayloadFormat; extern char* mimeSubtype; return ourSIPClient = SIPClient::createNew(env, desiredAudioRTPPayloadFormat, mimeSubtype, verbosityLevel, applicationName); } // The followign function is implemented, but is not used for "playSIP": void assignClient(Medium* /*client*/) { } void getOptions(RTSPClient::responseHandler* afterFunc) { ourSIPClient->envir().setResultMsg("NOT SUPPORTED IN CLIENT"); afterFunc(NULL, -1, strDup(ourSIPClient->envir().getResultMsg())); } void getSDPDescription(RTSPClient::responseHandler* afterFunc) { extern char* proxyServerName; if (proxyServerName != NULL) { // Tell the SIP client about the proxy: NetAddressList addresses(proxyServerName); if (addresses.numAddresses() == 0) { ourSIPClient->envir() << "Failed to find network address for \"" << proxyServerName << "\"\n"; } else { NetAddress address = *(addresses.firstAddress()); unsigned proxyServerAddress // later, allow for IPv6 ##### = *(unsigned*)(address.data()); extern unsigned short proxyServerPortNum; if (proxyServerPortNum == 0) proxyServerPortNum = 5060; // default ourSIPClient->setProxyServer(proxyServerAddress, proxyServerPortNum); } } extern unsigned short desiredPortNum; unsigned short clientStartPortNum = desiredPortNum; if (clientStartPortNum == 0) clientStartPortNum = 8000; // default ourSIPClient->setClientStartPortNum(clientStartPortNum); extern char const* streamURL; char const* username = ourAuthenticator == NULL ? NULL : ourAuthenticator->username(); char const* password = ourAuthenticator == NULL ? NULL : ourAuthenticator->password(); char* result; if (username != NULL && password != NULL) { result = ourSIPClient->inviteWithPassword(streamURL, username, password); } else { result = ourSIPClient->invite(streamURL); } int resultCode = result == NULL ? -1 : 0; afterFunc(NULL, resultCode, strDup(result)); } void setupSubsession(MediaSubsession* subsession, Boolean /*streamUsingTCP*/, Boolean /*forceMulticastOnUnspecified*/,RTSPClient::responseHandler* afterFunc) { subsession->setSessionId("mumble"); // anything that's non-NULL will work ////////// BEGIN hack code that should really be implemented in SIPClient ////////// // Parse the "Transport:" header parameters: // We do not send audio, but we need port for RTCP char* serverAddressStr; portNumBits serverPortNum; unsigned char rtpChannelId, rtcpChannelId; rtpChannelId = rtcpChannelId = 0xff; serverPortNum = 0; serverAddressStr = NULL; char* sdp = strDup(ourSIPClient->getInviteSdpReply()); char* lineStart; char* nextLineStart = sdp; while (1) { lineStart = nextLineStart; if (lineStart == NULL) { break; } nextLineStart = getLine(lineStart); char* toTagStr = strDupSize(lineStart); if (sscanf(lineStart, "m=audio %[^/\r\n]", toTagStr) == 1) { sscanf(toTagStr, "%hu", &serverPortNum); } else if (sscanf(lineStart, "c=IN IP4 %[^/\r\n]", toTagStr) == 1) { serverAddressStr = strDup(toTagStr); } delete[] toTagStr; } if(sdp != NULL) { delete[] sdp; } delete[] subsession->connectionEndpointName(); subsession->connectionEndpointName() = serverAddressStr; subsession->serverPortNum = serverPortNum; subsession->rtpChannelId = rtpChannelId; subsession->rtcpChannelId = rtcpChannelId; // Set the RTP and RTCP sockets' destination address and port from the information in the SETUP response (if present): netAddressBits destAddress = subsession->connectionEndpointAddress(); if (destAddress != 0) { subsession->setDestinations(destAddress); } ////////// END hack code that should really be implemented in SIPClient ////////// afterFunc(NULL, 0, NULL); } void startPlayingSession(MediaSession* /*session*/, double /*start*/, double /*end*/, float /*scale*/, RTSPClient::responseHandler* afterFunc) { if (ourSIPClient->sendACK()) { //##### This isn't quite right, because we should really be allowing //##### for the possibility of this ACK getting lost, by retransmitting //##### it *each time* we get a 2xx response from the server. afterFunc(NULL, 0, NULL); } else { afterFunc(NULL, -1, strDup(ourSIPClient->envir().getResultMsg())); } } void startPlayingSession(MediaSession* /*session*/, const char* /*start*/, const char* /*end*/, float /*scale*/, RTSPClient::responseHandler* afterFunc) { startPlayingSession(NULL,(double)0,(double)0,0,afterFunc); } void tearDownSession(MediaSession* /*session*/, RTSPClient::responseHandler* afterFunc) { if (ourSIPClient == NULL || ourSIPClient->sendBYE()) { afterFunc(NULL, 0, NULL); } else { afterFunc(NULL, -1, strDup(ourSIPClient->envir().getResultMsg())); } } void setUserAgentString(char const* userAgentString) { ourSIPClient->setUserAgentString(userAgentString); } Boolean allowProxyServers = True; Boolean controlConnectionUsesTCP = False; Boolean supportCodecSelection = True; char const* clientProtocolName = "SIP"; live/testProgs/COPYING000755 001751 000000 00000000000 12265042432 016225 2../COPYINGustar00rsfwheel000000 000000 live/testProgs/testMP3Receiver.cpp000444 001751 000000 00000012523 12265042432 017424 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // Copyright (c) 1996-2014, Live Networks, Inc. All rights reserved // A test program that receives a RTP/RTCP multicast MP3 stream, // and outputs the resulting MP3 file stream to 'stdout' // main program #include "liveMedia.hh" #include "GroupsockHelper.hh" #include "BasicUsageEnvironment.hh" // To receive a stream of 'ADUs' rather than raw MP3 frames, uncomment this: //#define STREAM_USING_ADUS 1 // (For more information about ADUs and interleaving, // see ) // To receive a "source-specific multicast" (SSM) stream, uncomment this: //#define USE_SSM 1 void afterPlaying(void* clientData); // forward // A structure to hold the state of the current session. // It is used in the "afterPlaying()" function to clean up the session. struct sessionState_t { FramedSource* source; FileSink* sink; RTCPInstance* rtcpInstance; } sessionState; UsageEnvironment* env; int main(int argc, char** argv) { // Begin by setting up our usage environment: TaskScheduler* scheduler = BasicTaskScheduler::createNew(); env = BasicUsageEnvironment::createNew(*scheduler); // Create the data sink for 'stdout': sessionState.sink = FileSink::createNew(*env, "stdout"); // Note: The string "stdout" is handled as a special case. // A real file name could have been used instead. // Create 'groupsocks' for RTP and RTCP: char const* sessionAddressStr #ifdef USE_SSM = "232.255.42.42"; #else = "239.255.42.42"; // Note: If the session is unicast rather than multicast, // then replace this string with "0.0.0.0" #endif const unsigned short rtpPortNum = 6666; const unsigned short rtcpPortNum = rtpPortNum+1; #ifndef USE_SSM const unsigned char ttl = 1; // low, in case routers don't admin scope #endif struct in_addr sessionAddress; sessionAddress.s_addr = our_inet_addr(sessionAddressStr); const Port rtpPort(rtpPortNum); const Port rtcpPort(rtcpPortNum); #ifdef USE_SSM char* sourceAddressStr = "aaa.bbb.ccc.ddd"; // replace this with the real source address struct in_addr sourceFilterAddress; sourceFilterAddress.s_addr = our_inet_addr(sourceAddressStr); Groupsock rtpGroupsock(*env, sessionAddress, sourceFilterAddress, rtpPort); Groupsock rtcpGroupsock(*env, sessionAddress, sourceFilterAddress, rtcpPort); rtcpGroupsock.changeDestinationParameters(sourceFilterAddress,0,~0); // our RTCP "RR"s are sent back using unicast #else Groupsock rtpGroupsock(*env, sessionAddress, rtpPort, ttl); Groupsock rtcpGroupsock(*env, sessionAddress, rtcpPort, ttl); #endif RTPSource* rtpSource; #ifndef STREAM_USING_ADUS // Create the data source: a "MPEG Audio RTP source" rtpSource = MPEG1or2AudioRTPSource::createNew(*env, &rtpGroupsock); #else // Create the data source: a "MP3 *ADU* RTP source" unsigned char rtpPayloadFormat = 96; // a dynamic payload type rtpSource = MP3ADURTPSource::createNew(*env, &rtpGroupsock, rtpPayloadFormat); #endif // Create (and start) a 'RTCP instance' for the RTP source: const unsigned estimatedSessionBandwidth = 160; // in kbps; for RTCP b/w share const unsigned maxCNAMElen = 100; unsigned char CNAME[maxCNAMElen+1]; gethostname((char*)CNAME, maxCNAMElen); CNAME[maxCNAMElen] = '\0'; // just in case sessionState.rtcpInstance = RTCPInstance::createNew(*env, &rtcpGroupsock, estimatedSessionBandwidth, CNAME, NULL /* we're a client */, rtpSource); // Note: This starts RTCP running automatically sessionState.source = rtpSource; #ifdef STREAM_USING_ADUS // Add a filter that deinterleaves the ADUs after depacketizing them: sessionState.source = MP3ADUdeinterleaver::createNew(*env, sessionState.source); if (sessionState.source == NULL) { *env << "Unable to create an ADU deinterleaving filter for the source\n"; exit(1); } // Add another filter that converts these ADUs to MP3s: sessionState.source = MP3FromADUSource::createNew(*env, sessionState.source); if (sessionState.source == NULL) { *env << "Unable to create an ADU->MP3 filter for the source\n"; exit(1); } #endif // Finally, start receiving the multicast stream: *env << "Beginning receiving multicast stream...\n"; sessionState.sink->startPlaying(*sessionState.source, afterPlaying, NULL); env->taskScheduler().doEventLoop(); // does not return return 0; // only to prevent compiler warning } void afterPlaying(void* /*clientData*/) { *env << "...done receiving\n"; // End by closing the media: Medium::close(sessionState.rtcpInstance); // Note: Sends a RTCP BYE Medium::close(sessionState.sink); Medium::close(sessionState.source); } live/testProgs/sapWatch.cpp000444 001751 000000 00000005447 12265042432 016221 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // Copyright (c) 1996-2014, Live Networks, Inc. All rights reserved // A program that receives and prints SDP/SAP announcements // (on the default SDP/SAP directory: 224.2.127.254/9875) #include "Groupsock.hh" #include "GroupsockHelper.hh" #include "BasicUsageEnvironment.hh" #include static unsigned const maxPacketSize = 65536; static unsigned char packet[maxPacketSize+1]; int main(int argc, char** argv) { // Begin by setting up our usage environment: TaskScheduler* scheduler = BasicTaskScheduler::createNew(); UsageEnvironment* env = BasicUsageEnvironment::createNew(*scheduler); // Create a 'groupsock' for the input multicast group,port: char const* sessionAddressStr = "224.2.127.254"; struct in_addr sessionAddress; sessionAddress.s_addr = our_inet_addr(sessionAddressStr); const Port port(9875); const unsigned char ttl = 0; // we're only reading from this mcast group Groupsock inputGroupsock(*env, sessionAddress, port, ttl); // Start reading and printing incoming packets // (Because this is the only thing we do, we can just do this // synchronously, in a loop, so we don't need to set up an asynchronous // event handler like we do in most of the other test programs.) unsigned packetSize; struct sockaddr_in fromAddress; while (inputGroupsock.handleRead(packet, maxPacketSize, packetSize, fromAddress)) { printf("\n[packet from %s (%d bytes)]\n", AddressString(fromAddress).val(), packetSize); // Ignore the first 8 bytes (SAP header). if (packetSize < 8) { *env << "Ignoring short packet from " << AddressString(fromAddress).val() << "%s!\n"; continue; } // convert "application/sdp\0" -> "application/sdp\0x20" // or all other nonprintable characters to blank, except new line unsigned idx = 8; while (idx < packetSize) { if (packet[idx] < 0x20 && packet[idx] != '\n') packet[idx] = 0x20; idx++; } packet[packetSize] = '\0'; // just in case printf("%s", (char*)(packet+8)); } return 0; // only to prevent compiler warning } live/testProgs/vobStreamer.cpp000400 001751 000000 00000023572 12265042432 016727 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // Copyright (c) 1996-2014, Live Networks, Inc. All rights reserved // A test program that reads a VOB file // splits it into Audio (AC3) and Video (MPEG) Elementary Streams, // and streams both using RTP. // main program #include "liveMedia.hh" #include "AC3AudioStreamFramer.hh" #include "BasicUsageEnvironment.hh" #include "GroupsockHelper.hh" char const* programName; // Whether to stream *only* "I" (key) frames // (e.g., to reduce network bandwidth): Boolean iFramesOnly = False; unsigned const VOB_AUDIO = 1<<0; unsigned const VOB_VIDEO = 1<<1; unsigned mediaToStream = VOB_AUDIO|VOB_VIDEO; // by default char const** inputFileNames; char const** curInputFileName; Boolean haveReadOneFile = False; UsageEnvironment* env; MPEG1or2Demux* mpegDemux; AC3AudioStreamFramer* audioSource = NULL; FramedSource* videoSource = NULL; RTPSink* audioSink = NULL; RTCPInstance* audioRTCP = NULL; RTPSink* videoSink = NULL; RTCPInstance* videoRTCP = NULL; RTSPServer* rtspServer = NULL; unsigned short const defaultRTSPServerPortNum = 554; unsigned short rtspServerPortNum = defaultRTSPServerPortNum; Groupsock* rtpGroupsockAudio; Groupsock* rtcpGroupsockAudio; Groupsock* rtpGroupsockVideo; Groupsock* rtcpGroupsockVideo; void usage() { *env << "usage: " << programName << " [-i] [-a|-v] " "[-p ] " "...\n"; exit(1); } void play(); // forward int main(int argc, char const** argv) { // Begin by setting up our usage environment: TaskScheduler* scheduler = BasicTaskScheduler::createNew(); env = BasicUsageEnvironment::createNew(*scheduler); // Parse command-line options: // (Unfortunately we can't use getopt() here; Windoze doesn't have it) programName = argv[0]; while (argc > 2) { char const* const opt = argv[1]; if (opt[0] != '-') break; switch (opt[1]) { case 'i': { // transmit video I-frames only iFramesOnly = True; break; } case 'a': { // transmit audio, but not video mediaToStream &=~ VOB_VIDEO; break; } case 'v': { // transmit video, but not audio mediaToStream &=~ VOB_AUDIO; break; } case 'p': { // specify port number for built-in RTSP server int portArg; if (sscanf(argv[2], "%d", &portArg) != 1) { usage(); } if (portArg <= 0 || portArg >= 65536) { *env << "bad port number: " << portArg << " (must be in the range (0,65536))\n"; usage(); } rtspServerPortNum = (unsigned short)portArg; ++argv; --argc; break; } default: { usage(); break; } } ++argv; --argc; } if (argc < 2) usage(); if (mediaToStream == 0) { *env << "The -a and -v flags cannot both be used!\n"; usage(); } if (iFramesOnly && (mediaToStream&VOB_VIDEO) == 0) { *env << "Warning: Because we're not streaming video, the -i flag has no effect.\n"; } inputFileNames = &argv[1]; curInputFileName = inputFileNames; // Create 'groupsocks' for RTP and RTCP: struct in_addr destinationAddress; destinationAddress.s_addr = chooseRandomIPv4SSMAddress(*env); const unsigned short rtpPortNumAudio = 4444; const unsigned short rtcpPortNumAudio = rtpPortNumAudio+1; const unsigned short rtpPortNumVideo = 8888; const unsigned short rtcpPortNumVideo = rtpPortNumVideo+1; const unsigned char ttl = 255; const Port rtpPortAudio(rtpPortNumAudio); const Port rtcpPortAudio(rtcpPortNumAudio); const Port rtpPortVideo(rtpPortNumVideo); const Port rtcpPortVideo(rtcpPortNumVideo); const unsigned maxCNAMElen = 100; unsigned char CNAME[maxCNAMElen+1]; gethostname((char*)CNAME, maxCNAMElen); CNAME[maxCNAMElen] = '\0'; // just in case if (mediaToStream&VOB_AUDIO) { rtpGroupsockAudio = new Groupsock(*env, destinationAddress, rtpPortAudio, ttl); rtpGroupsockAudio->multicastSendOnly(); // because we're a SSM source // Create an 'AC3 Audio RTP' sink from the RTP 'groupsock': audioSink = AC3AudioRTPSink::createNew(*env, rtpGroupsockAudio, 96, 0); // set the RTP timestamp frequency 'for real' later // Create (and start) a 'RTCP instance' for this RTP sink: rtcpGroupsockAudio = new Groupsock(*env, destinationAddress, rtcpPortAudio, ttl); rtcpGroupsockAudio->multicastSendOnly(); // because we're a SSM source const unsigned estimatedSessionBandwidthAudio = 160; // in kbps; for RTCP b/w share audioRTCP = RTCPInstance::createNew(*env, rtcpGroupsockAudio, estimatedSessionBandwidthAudio, CNAME, audioSink, NULL /* we're a server */, True /* we're a SSM source */); // Note: This starts RTCP running automatically } if (mediaToStream&VOB_VIDEO) { rtpGroupsockVideo = new Groupsock(*env, destinationAddress, rtpPortVideo, ttl); rtpGroupsockVideo->multicastSendOnly(); // because we're a SSM source // Create a 'MPEG Video RTP' sink from the RTP 'groupsock': videoSink = MPEG1or2VideoRTPSink::createNew(*env, rtpGroupsockVideo); // Create (and start) a 'RTCP instance' for this RTP sink: rtcpGroupsockVideo = new Groupsock(*env, destinationAddress, rtcpPortVideo, ttl); rtcpGroupsockVideo->multicastSendOnly(); // because we're a SSM source const unsigned estimatedSessionBandwidthVideo = 4500; // in kbps; for RTCP b/w share videoRTCP = RTCPInstance::createNew(*env, rtcpGroupsockVideo, estimatedSessionBandwidthVideo, CNAME, videoSink, NULL /* we're a server */, True /* we're a SSM source */); // Note: This starts RTCP running automatically } if (rtspServer == NULL) { rtspServer = RTSPServer::createNew(*env, rtspServerPortNum); if (rtspServer == NULL) { *env << "Failed to create RTSP server: " << env->getResultMsg() << "\n"; *env << "To change the RTSP server's port number, use the \"-p \" option.\n"; exit(1); } ServerMediaSession* sms = ServerMediaSession::createNew(*env, "vobStream", *curInputFileName, "Session streamed by \"vobStreamer\"", True /*SSM*/); if (audioSink != NULL) sms->addSubsession(PassiveServerMediaSubsession::createNew(*audioSink, audioRTCP)); if (videoSink != NULL) sms->addSubsession(PassiveServerMediaSubsession::createNew(*videoSink, videoRTCP)); rtspServer->addServerMediaSession(sms); *env << "Created RTSP server.\n"; // Display our "rtsp://" URL, for clients to connect to: char* url = rtspServer->rtspURL(sms); *env << "Access this stream using the URL:\n\t" << url << "\n"; delete[] url; } // Finally, start the streaming: *env << "Beginning streaming...\n"; play(); env->taskScheduler().doEventLoop(); // does not return return 0; // only to prevent compiler warning } void afterPlaying(void* clientData) { // One of the sinks has ended playing. // Check whether any of the sources have a pending read. If so, // wait until its sink ends playing also: if ((audioSource != NULL && audioSource->isCurrentlyAwaitingData()) || (videoSource != NULL && videoSource->isCurrentlyAwaitingData())) { return; } // Now that both sinks have ended, close both input sources, // and start playing again: *env << "...done reading from file\n"; if (audioSink != NULL) audioSink->stopPlaying(); if (videoSink != NULL) videoSink->stopPlaying(); // ensures that both are shut down Medium::close(audioSource); Medium::close(videoSource); Medium::close(mpegDemux); // Note: This also closes the input file that this source read from. // Move to the next file name (if any): ++curInputFileName; // Start playing once again: play(); } void play() { if (*curInputFileName == NULL) { // We have reached the end of the file name list. // Start again, unless we didn't succeed in reading any files: if (!haveReadOneFile) exit(1); haveReadOneFile = False; curInputFileName = inputFileNames; } // Open the current input file as a 'byte-stream file source': ByteStreamFileSource* fileSource = ByteStreamFileSource::createNew(*env, *curInputFileName); if (fileSource == NULL) { *env << "Unable to open file \"" << *curInputFileName << "\" as a byte-stream file source\n"; // Try the next file instead: ++curInputFileName; play(); return; } haveReadOneFile = True; // We must demultiplex Audio and Video Elementary Streams // from the input source: mpegDemux = MPEG1or2Demux::createNew(*env, fileSource); if (mediaToStream&VOB_AUDIO) { FramedSource* audioES = mpegDemux->newElementaryStream(0xBD); // Because, in a VOB file, the AC3 audio has stream id 0xBD audioSource = AC3AudioStreamFramer::createNew(*env, audioES, 0x80); } if (mediaToStream&VOB_VIDEO) { FramedSource* videoES = mpegDemux->newVideoStream(); videoSource = MPEG1or2VideoStreamFramer::createNew(*env, videoES, iFramesOnly); } // Finally, start playing each sink. *env << "Beginning to read from \"" << *curInputFileName << "\"...\n"; if (videoSink != NULL) { videoSink->startPlaying(*videoSource, afterPlaying, videoSink); } if (audioSink != NULL) { audioSink->setRTPTimestampFrequency(audioSource->samplingRate()); audioSink->startPlaying(*audioSource, afterPlaying, audioSink); } } live/testProgs/testMP3.sdp000444 001751 000000 00000000320 12265042432 015733 0ustar00rsfwheel000000 000000 v=0 o=- 49452 4 IN IP4 127.0.0.1 s=Test MP3 session i=Parameters for the session streamed by "testMP3Streamer" t=0 0 a=tool:testMP3Streamer a=type:broadcast m=audio 6666 RTP/AVP 14 c=IN IP4 239.255.42.42/127 live/testProgs/testRelay.cpp000444 001751 000000 00000006406 12265042432 016417 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // Copyright (c) 1996-2014, Live Networks, Inc. All rights reserved // A test program that receives a UDP multicast stream // and retransmits it to another (multicast or unicast) address & port // main program #include #include "BasicUsageEnvironment.hh" #include "GroupsockHelper.hh" UsageEnvironment* env; // To receive a "source-specific multicast" (SSM) stream, uncomment this: //#define USE_SSM 1 int main(int argc, char** argv) { // Begin by setting up our usage environment: TaskScheduler* scheduler = BasicTaskScheduler::createNew(); env = BasicUsageEnvironment::createNew(*scheduler); // Create a 'groupsock' for the input multicast group,port: char const* inputAddressStr #ifdef USE_SSM = "232.255.42.42"; #else = "239.255.42.42"; #endif struct in_addr inputAddress; inputAddress.s_addr = our_inet_addr(inputAddressStr); Port const inputPort(8888); unsigned char const inputTTL = 0; // we're only reading from this mcast group #ifdef USE_SSM char* sourceAddressStr = "aaa.bbb.ccc.ddd"; // replace this with the real source address struct in_addr sourceFilterAddress; sourceFilterAddress.s_addr = our_inet_addr(sourceAddressStr); Groupsock inputGroupsock(*env, inputAddress, sourceFilterAddress, inputPort); #else Groupsock inputGroupsock(*env, inputAddress, inputPort, inputTTL); #endif // Then create a liveMedia 'source' object, encapsulating this groupsock: FramedSource* source = BasicUDPSource::createNew(*env, &inputGroupsock); // Create a 'groupsock' for the destination address and port: char const* outputAddressStr = "239.255.43.43"; // this could also be unicast // Note: You may change "outputAddressStr" to use a different multicast // (or unicast address), but do *not* change it to use the same multicast // address as "inputAddressStr". struct in_addr outputAddress; outputAddress.s_addr = our_inet_addr(outputAddressStr); Port const outputPort(4444); unsigned char const outputTTL = 255; Groupsock outputGroupsock(*env, outputAddress, outputPort, outputTTL); // Then create a liveMedia 'sink' object, encapsulating this groupsock: unsigned const maxPacketSize = 65536; // allow for large UDP packets MediaSink* sink = BasicUDPSink::createNew(*env, &outputGroupsock, maxPacketSize); // Now, start playing, feeding the sink object from the source: sink->startPlaying(*source, NULL, NULL); env->taskScheduler().doEventLoop(); // does not return return 0; // only to prevent compiler warning } live/testProgs/testAMRAudioStreamer.cpp000444 001751 000000 00000010513 12265042432 020441 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // Copyright (c) 1996-2014, Live Networks, Inc. All rights reserved // A test program that reads an AMR audio file (as defined in RFC 3267) // and streams it using RTP // main program #include "liveMedia.hh" #include "BasicUsageEnvironment.hh" #include "GroupsockHelper.hh" UsageEnvironment* env; char const* inputFileName = "test.amr"; AMRAudioFileSource* audioSource; RTPSink* audioSink; void play(); // forward int main(int argc, char** argv) { // Begin by setting up our usage environment: TaskScheduler* scheduler = BasicTaskScheduler::createNew(); env = BasicUsageEnvironment::createNew(*scheduler); // Create 'groupsocks' for RTP and RTCP: struct in_addr destinationAddress; destinationAddress.s_addr = chooseRandomIPv4SSMAddress(*env); // Note: This is a multicast address. If you wish instead to stream // using unicast, then you should use the "testOnDemandRTSPServer" // test program - not this test program - as a model. const unsigned short rtpPortNum = 16666; const unsigned short rtcpPortNum = rtpPortNum+1; const unsigned char ttl = 255; const Port rtpPort(rtpPortNum); const Port rtcpPort(rtcpPortNum); Groupsock rtpGroupsock(*env, destinationAddress, rtpPort, ttl); rtpGroupsock.multicastSendOnly(); // we're a SSM source Groupsock rtcpGroupsock(*env, destinationAddress, rtcpPort, ttl); rtcpGroupsock.multicastSendOnly(); // we're a SSM source // Create a 'AMR Audio RTP' sink from the RTP 'groupsock': audioSink = AMRAudioRTPSink::createNew(*env, &rtpGroupsock, 96); // Create (and start) a 'RTCP instance' for this RTP sink: const unsigned estimatedSessionBandwidth = 10; // in kbps; for RTCP b/w share const unsigned maxCNAMElen = 100; unsigned char CNAME[maxCNAMElen+1]; gethostname((char*)CNAME, maxCNAMElen); CNAME[maxCNAMElen] = '\0'; // just in case RTCPInstance* rtcp = RTCPInstance::createNew(*env, &rtcpGroupsock, estimatedSessionBandwidth, CNAME, audioSink, NULL /* we're a server */, True /* we're a SSM source */); // Note: This starts RTCP running automatically // Create and start a RTSP server to serve this stream. RTSPServer* rtspServer = RTSPServer::createNew(*env, 8554); if (rtspServer == NULL) { *env << "Failed to create RTSP server: " << env->getResultMsg() << "\n"; exit(1); } ServerMediaSession* sms = ServerMediaSession::createNew(*env, "testStream", inputFileName, "Session streamed by \"testAMRAudioStreamer\"", True /*SSM*/); sms->addSubsession(PassiveServerMediaSubsession::createNew(*audioSink, rtcp)); rtspServer->addServerMediaSession(sms); char* url = rtspServer->rtspURL(sms); *env << "Play this stream using the URL \"" << url << "\"\n"; delete[] url; // Start the streaming: *env << "Beginning streaming...\n"; play(); env->taskScheduler().doEventLoop(); // does not return return 0; // only to prevent compiler warning } void afterPlaying(void* /*clientData*/) { *env << "...done reading from file\n"; audioSink->stopPlaying(); Medium::close(audioSource); // Note that this also closes the input file that this source read from. play(); } void play() { // Open the input file as an 'AMR audio file source': AMRAudioFileSource* audioSource = AMRAudioFileSource::createNew(*env, inputFileName); if (audioSource == NULL) { *env << "Unable to open file \"" << inputFileName << "\" as an AMR audio file source: " << env->getResultMsg() << "\n"; exit(1); } // Finally, start playing: *env << "Beginning to read from file...\n"; audioSink->startPlaying(*audioSource, afterPlaying, audioSink); } live/testProgs/testMPEG1or2AudioVideo.sdp000444 001751 000000 00000000456 12265042432 020553 0ustar00rsfwheel000000 000000 v=0 o=- 49451 3 IN IP4 127.0.0.1 s=Test MPEG Audio+Video session i=Parameters for the session streamed by "testMPEG1or2AudioVideoStreamer" t=0 0 a=tool:testMPEG1or2AudioVideoStreamer a=type:broadcast m=audio 6666 RTP/AVP 14 c=IN IP4 239.255.42.42/127 m=video 8888 RTP/AVP 32 c=IN IP4 239.255.42.42/127 live/testProgs/testOnDemandRTSPServer.cpp000444 001751 000000 00000036271 12265042432 020733 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // Copyright (c) 1996-2014, Live Networks, Inc. All rights reserved // A test program that demonstrates how to stream - via unicast RTP // - various kinds of file on demand, using a built-in RTSP server. // main program #include "liveMedia.hh" #include "BasicUsageEnvironment.hh" UsageEnvironment* env; // To make the second and subsequent client for each stream reuse the same // input stream as the first client (rather than playing the file from the // start for each client), change the following "False" to "True": Boolean reuseFirstSource = False; // To stream *only* MPEG-1 or 2 video "I" frames // (e.g., to reduce network bandwidth), // change the following "False" to "True": Boolean iFramesOnly = False; static void announceStream(RTSPServer* rtspServer, ServerMediaSession* sms, char const* streamName, char const* inputFileName); // fwd static char newMatroskaDemuxWatchVariable; static MatroskaFileServerDemux* demux; static void onMatroskaDemuxCreation(MatroskaFileServerDemux* newDemux, void* /*clientData*/) { demux = newDemux; newMatroskaDemuxWatchVariable = 1; } int main(int argc, char** argv) { // Begin by setting up our usage environment: TaskScheduler* scheduler = BasicTaskScheduler::createNew(); env = BasicUsageEnvironment::createNew(*scheduler); UserAuthenticationDatabase* authDB = NULL; #ifdef ACCESS_CONTROL // To implement client access control to the RTSP server, do the following: authDB = new UserAuthenticationDatabase; authDB->addUserRecord("username1", "password1"); // replace these with real strings // Repeat the above with each , that you wish to allow // access to the server. #endif // Create the RTSP server: RTSPServer* rtspServer = RTSPServer::createNew(*env, 8554, authDB); if (rtspServer == NULL) { *env << "Failed to create RTSP server: " << env->getResultMsg() << "\n"; exit(1); } char const* descriptionString = "Session streamed by \"testOnDemandRTSPServer\""; // Set up each of the possible streams that can be served by the // RTSP server. Each such stream is implemented using a // "ServerMediaSession" object, plus one or more // "ServerMediaSubsession" objects for each audio/video substream. // A MPEG-4 video elementary stream: { char const* streamName = "mpeg4ESVideoTest"; char const* inputFileName = "test.m4e"; ServerMediaSession* sms = ServerMediaSession::createNew(*env, streamName, streamName, descriptionString); sms->addSubsession(MPEG4VideoFileServerMediaSubsession ::createNew(*env, inputFileName, reuseFirstSource)); rtspServer->addServerMediaSession(sms); announceStream(rtspServer, sms, streamName, inputFileName); } // A H.264 video elementary stream: { char const* streamName = "h264ESVideoTest"; char const* inputFileName = "test.264"; ServerMediaSession* sms = ServerMediaSession::createNew(*env, streamName, streamName, descriptionString); sms->addSubsession(H264VideoFileServerMediaSubsession ::createNew(*env, inputFileName, reuseFirstSource)); rtspServer->addServerMediaSession(sms); announceStream(rtspServer, sms, streamName, inputFileName); } // A H.265 video elementary stream: { char const* streamName = "h265ESVideoTest"; char const* inputFileName = "test.265"; ServerMediaSession* sms = ServerMediaSession::createNew(*env, streamName, streamName, descriptionString); sms->addSubsession(H265VideoFileServerMediaSubsession ::createNew(*env, inputFileName, reuseFirstSource)); rtspServer->addServerMediaSession(sms); announceStream(rtspServer, sms, streamName, inputFileName); } // A MPEG-1 or 2 audio+video program stream: { char const* streamName = "mpeg1or2AudioVideoTest"; char const* inputFileName = "test.mpg"; // NOTE: This *must* be a Program Stream; not an Elementary Stream ServerMediaSession* sms = ServerMediaSession::createNew(*env, streamName, streamName, descriptionString); MPEG1or2FileServerDemux* demux = MPEG1or2FileServerDemux::createNew(*env, inputFileName, reuseFirstSource); sms->addSubsession(demux->newVideoServerMediaSubsession(iFramesOnly)); sms->addSubsession(demux->newAudioServerMediaSubsession()); rtspServer->addServerMediaSession(sms); announceStream(rtspServer, sms, streamName, inputFileName); } // A MPEG-1 or 2 video elementary stream: { char const* streamName = "mpeg1or2ESVideoTest"; char const* inputFileName = "testv.mpg"; // NOTE: This *must* be a Video Elementary Stream; not a Program Stream ServerMediaSession* sms = ServerMediaSession::createNew(*env, streamName, streamName, descriptionString); sms->addSubsession(MPEG1or2VideoFileServerMediaSubsession ::createNew(*env, inputFileName, reuseFirstSource, iFramesOnly)); rtspServer->addServerMediaSession(sms); announceStream(rtspServer, sms, streamName, inputFileName); } // A MP3 audio stream (actually, any MPEG-1 or 2 audio file will work): // To stream using 'ADUs' rather than raw MP3 frames, uncomment the following: //#define STREAM_USING_ADUS 1 // To also reorder ADUs before streaming, uncomment the following: //#define INTERLEAVE_ADUS 1 // (For more information about ADUs and interleaving, // see ) { char const* streamName = "mp3AudioTest"; char const* inputFileName = "test.mp3"; ServerMediaSession* sms = ServerMediaSession::createNew(*env, streamName, streamName, descriptionString); Boolean useADUs = False; Interleaving* interleaving = NULL; #ifdef STREAM_USING_ADUS useADUs = True; #ifdef INTERLEAVE_ADUS unsigned char interleaveCycle[] = {0,2,1,3}; // or choose your own... unsigned const interleaveCycleSize = (sizeof interleaveCycle)/(sizeof (unsigned char)); interleaving = new Interleaving(interleaveCycleSize, interleaveCycle); #endif #endif sms->addSubsession(MP3AudioFileServerMediaSubsession ::createNew(*env, inputFileName, reuseFirstSource, useADUs, interleaving)); rtspServer->addServerMediaSession(sms); announceStream(rtspServer, sms, streamName, inputFileName); } // A WAV audio stream: { char const* streamName = "wavAudioTest"; char const* inputFileName = "test.wav"; ServerMediaSession* sms = ServerMediaSession::createNew(*env, streamName, streamName, descriptionString); // To convert 16-bit PCM data to 8-bit u-law, prior to streaming, // change the following to True: Boolean convertToULaw = False; sms->addSubsession(WAVAudioFileServerMediaSubsession ::createNew(*env, inputFileName, reuseFirstSource, convertToULaw)); rtspServer->addServerMediaSession(sms); announceStream(rtspServer, sms, streamName, inputFileName); } // An AMR audio stream: { char const* streamName = "amrAudioTest"; char const* inputFileName = "test.amr"; ServerMediaSession* sms = ServerMediaSession::createNew(*env, streamName, streamName, descriptionString); sms->addSubsession(AMRAudioFileServerMediaSubsession ::createNew(*env, inputFileName, reuseFirstSource)); rtspServer->addServerMediaSession(sms); announceStream(rtspServer, sms, streamName, inputFileName); } // A 'VOB' file (e.g., from an unencrypted DVD): { char const* streamName = "vobTest"; char const* inputFileName = "test.vob"; ServerMediaSession* sms = ServerMediaSession::createNew(*env, streamName, streamName, descriptionString); // Note: VOB files are MPEG-2 Program Stream files, but using AC-3 audio MPEG1or2FileServerDemux* demux = MPEG1or2FileServerDemux::createNew(*env, inputFileName, reuseFirstSource); sms->addSubsession(demux->newVideoServerMediaSubsession(iFramesOnly)); sms->addSubsession(demux->newAC3AudioServerMediaSubsession()); rtspServer->addServerMediaSession(sms); announceStream(rtspServer, sms, streamName, inputFileName); } // A MPEG-2 Transport Stream: { char const* streamName = "mpeg2TransportStreamTest"; char const* inputFileName = "test.ts"; char const* indexFileName = "test.tsx"; ServerMediaSession* sms = ServerMediaSession::createNew(*env, streamName, streamName, descriptionString); sms->addSubsession(MPEG2TransportFileServerMediaSubsession ::createNew(*env, inputFileName, indexFileName, reuseFirstSource)); rtspServer->addServerMediaSession(sms); announceStream(rtspServer, sms, streamName, inputFileName); } // An AAC audio stream (ADTS-format file): { char const* streamName = "aacAudioTest"; char const* inputFileName = "test.aac"; ServerMediaSession* sms = ServerMediaSession::createNew(*env, streamName, streamName, descriptionString); sms->addSubsession(ADTSAudioFileServerMediaSubsession ::createNew(*env, inputFileName, reuseFirstSource)); rtspServer->addServerMediaSession(sms); announceStream(rtspServer, sms, streamName, inputFileName); } // A DV video stream: { // First, make sure that the RTPSinks' buffers will be large enough to handle the huge size of DV frames (as big as 288000). OutPacketBuffer::maxSize = 300000; char const* streamName = "dvVideoTest"; char const* inputFileName = "test.dv"; ServerMediaSession* sms = ServerMediaSession::createNew(*env, streamName, streamName, descriptionString); sms->addSubsession(DVVideoFileServerMediaSubsession ::createNew(*env, inputFileName, reuseFirstSource)); rtspServer->addServerMediaSession(sms); announceStream(rtspServer, sms, streamName, inputFileName); } // A AC3 video elementary stream: { char const* streamName = "ac3AudioTest"; char const* inputFileName = "test.ac3"; ServerMediaSession* sms = ServerMediaSession::createNew(*env, streamName, streamName, descriptionString); sms->addSubsession(AC3AudioFileServerMediaSubsession ::createNew(*env, inputFileName, reuseFirstSource)); rtspServer->addServerMediaSession(sms); announceStream(rtspServer, sms, streamName, inputFileName); } // A Matroska ('.mkv') file, with video+audio+subtitle streams: { char const* streamName = "matroskaFileTest"; char const* inputFileName = "test.mkv"; ServerMediaSession* sms = ServerMediaSession::createNew(*env, streamName, streamName, descriptionString); newMatroskaDemuxWatchVariable = 0; MatroskaFileServerDemux::createNew(*env, inputFileName, onMatroskaDemuxCreation, NULL); env->taskScheduler().doEventLoop(&newMatroskaDemuxWatchVariable); Boolean sessionHasTracks = False; ServerMediaSubsession* smss; while ((smss = demux->newServerMediaSubsession()) != NULL) { sms->addSubsession(smss); sessionHasTracks = True; } if (sessionHasTracks) { rtspServer->addServerMediaSession(sms); } // otherwise, because the stream has no tracks, we don't add a ServerMediaSession to the server. announceStream(rtspServer, sms, streamName, inputFileName); } // A WebM ('.webm') file, with video(VP8)+audio(Vorbis) streams: // (Note: ".webm' files are special types of Matroska files, so we use the same code as the Matroska ('.mkv') file code above.) { char const* streamName = "webmFileTest"; char const* inputFileName = "test.webm"; ServerMediaSession* sms = ServerMediaSession::createNew(*env, streamName, streamName, descriptionString); newMatroskaDemuxWatchVariable = 0; MatroskaFileServerDemux::createNew(*env, inputFileName, onMatroskaDemuxCreation, NULL); env->taskScheduler().doEventLoop(&newMatroskaDemuxWatchVariable); Boolean sessionHasTracks = False; ServerMediaSubsession* smss; while ((smss = demux->newServerMediaSubsession()) != NULL) { sms->addSubsession(smss); sessionHasTracks = True; } if (sessionHasTracks) { rtspServer->addServerMediaSession(sms); } // otherwise, because the stream has no tracks, we don't add a ServerMediaSession to the server. announceStream(rtspServer, sms, streamName, inputFileName); } // A MPEG-2 Transport Stream, coming from a live UDP (raw-UDP or RTP/UDP) source: { char const* streamName = "mpeg2TransportStreamFromUDPSourceTest"; char const* inputAddressStr = "239.255.42.42"; // This causes the server to take its input from the stream sent by the "testMPEG2TransportStreamer" demo application. // (Note: If the input UDP source is unicast rather than multicast, then change this to NULL.) portNumBits const inputPortNum = 1234; // This causes the server to take its input from the stream sent by the "testMPEG2TransportStreamer" demo application. Boolean const inputStreamIsRawUDP = False; ServerMediaSession* sms = ServerMediaSession::createNew(*env, streamName, streamName, descriptionString); sms->addSubsession(MPEG2TransportUDPServerMediaSubsession ::createNew(*env, inputAddressStr, inputPortNum, inputStreamIsRawUDP)); rtspServer->addServerMediaSession(sms); char* url = rtspServer->rtspURL(sms); *env << "\n\"" << streamName << "\" stream, from a UDP Transport Stream input source \n\t("; if (inputAddressStr != NULL) { *env << "IP multicast address " << inputAddressStr << ","; } else { *env << "unicast;"; } *env << " port " << inputPortNum << ")\n"; *env << "Play this stream using the URL \"" << url << "\"\n"; delete[] url; } // Also, attempt to create a HTTP server for RTSP-over-HTTP tunneling. // Try first with the default HTTP port (80), and then with the alternative HTTP // port numbers (8000 and 8080). if (rtspServer->setUpTunnelingOverHTTP(80) || rtspServer->setUpTunnelingOverHTTP(8000) || rtspServer->setUpTunnelingOverHTTP(8080)) { *env << "\n(We use port " << rtspServer->httpServerPortNum() << " for optional RTSP-over-HTTP tunneling.)\n"; } else { *env << "\n(RTSP-over-HTTP tunneling is not available.)\n"; } env->taskScheduler().doEventLoop(); // does not return return 0; // only to prevent compiler warning } static void announceStream(RTSPServer* rtspServer, ServerMediaSession* sms, char const* streamName, char const* inputFileName) { char* url = rtspServer->rtspURL(sms); UsageEnvironment& env = rtspServer->envir(); env << "\n\"" << streamName << "\" stream, from the file \"" << inputFileName << "\"\n"; env << "Play this stream using the URL \"" << url << "\"\n"; delete[] url; } live/testProgs/testH265VideoStreamer.cpp000444 001751 000000 00000011734 12265042432 020461 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA **********/ // Copyright (c) 1996-2014, Live Networks, Inc. All rights reserved // A test program that reads a H.265 Elementary Stream video file // and streams it using RTP // main program // // NOTE: For this application to work, the H.265 Elementary Stream video file *must* contain // VPS, SPS and PPS NAL units, ideally at or near the start of the file. // These VPS, SPS and PPS NAL units are used to specify 'configuration' information that is set in // the output stream's SDP description (by the RTSP server that is built in to this application). // Note also that - unlike some other "*Streamer" demo applications - the resulting stream can be // received only using a RTSP client (such as "openRTSP") #include #include #include UsageEnvironment* env; char const* inputFileName = "test.265"; H265VideoStreamFramer* videoSource; RTPSink* videoSink; void play(); // forward int main(int argc, char** argv) { // Begin by setting up our usage environment: TaskScheduler* scheduler = BasicTaskScheduler::createNew(); env = BasicUsageEnvironment::createNew(*scheduler); // Create 'groupsocks' for RTP and RTCP: struct in_addr destinationAddress; destinationAddress.s_addr = chooseRandomIPv4SSMAddress(*env); // Note: This is a multicast address. If you wish instead to stream // using unicast, then you should use the "testOnDemandRTSPServer" // test program - not this test program - as a model. const unsigned short rtpPortNum = 18888; const unsigned short rtcpPortNum = rtpPortNum+1; const unsigned char ttl = 255; const Port rtpPort(rtpPortNum); const Port rtcpPort(rtcpPortNum); Groupsock rtpGroupsock(*env, destinationAddress, rtpPort, ttl); rtpGroupsock.multicastSendOnly(); // we're a SSM source Groupsock rtcpGroupsock(*env, destinationAddress, rtcpPort, ttl); rtcpGroupsock.multicastSendOnly(); // we're a SSM source // Create a 'H265 Video RTP' sink from the RTP 'groupsock': OutPacketBuffer::maxSize = 100000; videoSink = H265VideoRTPSink::createNew(*env, &rtpGroupsock, 96); // Create (and start) a 'RTCP instance' for this RTP sink: const unsigned estimatedSessionBandwidth = 500; // in kbps; for RTCP b/w share const unsigned maxCNAMElen = 100; unsigned char CNAME[maxCNAMElen+1]; gethostname((char*)CNAME, maxCNAMElen); CNAME[maxCNAMElen] = '\0'; // just in case RTCPInstance* rtcp = RTCPInstance::createNew(*env, &rtcpGroupsock, estimatedSessionBandwidth, CNAME, videoSink, NULL /* we're a server */, True /* we're a SSM source */); // Note: This starts RTCP running automatically RTSPServer* rtspServer = RTSPServer::createNew(*env, 8554); if (rtspServer == NULL) { *env << "Failed to create RTSP server: " << env->getResultMsg() << "\n"; exit(1); } ServerMediaSession* sms = ServerMediaSession::createNew(*env, "testStream", inputFileName, "Session streamed by \"testH265VideoStreamer\"", True /*SSM*/); sms->addSubsession(PassiveServerMediaSubsession::createNew(*videoSink, rtcp)); rtspServer->addServerMediaSession(sms); char* url = rtspServer->rtspURL(sms); *env << "Play this stream using the URL \"" << url << "\"\n"; delete[] url; // Start the streaming: *env << "Beginning streaming...\n"; play(); env->taskScheduler().doEventLoop(); // does not return return 0; // only to prevent compiler warning } void afterPlaying(void* /*clientData*/) { *env << "...done reading from file\n"; videoSink->stopPlaying(); Medium::close(videoSource); // Note that this also closes the input file that this source read from. // Start playing once again: play(); } void play() { // Open the input file as a 'byte-stream file source': ByteStreamFileSource* fileSource = ByteStreamFileSource::createNew(*env, inputFileName); if (fileSource == NULL) { *env << "Unable to open file \"" << inputFileName << "\" as a byte-stream file source\n"; exit(1); } FramedSource* videoES = fileSource; // Create a framer for the Video Elementary Stream: videoSource = H265VideoStreamFramer::createNew(*env, videoES); // Finally, start playing: *env << "Beginning to read from file...\n"; videoSink->startPlaying(*videoSource, afterPlaying, videoSink); } live/testProgs/testGSMStreamer.cpp000400 001751 000000 00000012632 12265042432 017462 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // Copyright (c) 1996-2014, Live Networks, Inc. All rights reserved // A test program that streams GSM audio via RTP/RTCP // main program // NOTE: This program assumes the existence of a (currently nonexistent) // function called "createNewGSMAudioSource()". #include "liveMedia.hh" #include "GroupsockHelper.hh" #include "BasicUsageEnvironment.hh" ////////// Main program ////////// // To stream using "source-specific multicast" (SSM), uncomment the following: //#define USE_SSM 1 #ifdef USE_SSM Boolean const isSSM = True; #else Boolean const isSSM = False; #endif // To set up an internal RTSP server, uncomment the following: //#define IMPLEMENT_RTSP_SERVER 1 // (Note that this RTSP server works for multicast only) #ifdef IMPLEMENT_RTSP_SERVER RTSPServer* rtspServer; #endif UsageEnvironment* env; void afterPlaying(void* clientData); // forward // A structure to hold the state of the current session. // It is used in the "afterPlaying()" function to clean up the session. struct sessionState_t { FramedSource* source; RTPSink* sink; RTCPInstance* rtcpInstance; Groupsock* rtpGroupsock; Groupsock* rtcpGroupsock; } sessionState; void play(); // forward int main(int argc, char** argv) { // Begin by setting up our usage environment: TaskScheduler* scheduler = BasicTaskScheduler::createNew(); env = BasicUsageEnvironment::createNew(*scheduler); // Create 'groupsocks' for RTP and RTCP: char* destinationAddressStr #ifdef USE_SSM = "232.255.42.42"; #else = "239.255.42.42"; // Note: This is a multicast address. If you wish to stream using // unicast instead, then replace this string with the unicast address // of the (single) destination. (You may also need to make a similar // change to the receiver program.) #endif const unsigned short rtpPortNum = 6666; const unsigned short rtcpPortNum = rtpPortNum+1; const unsigned char ttl = 1; // low, in case routers don't admin scope struct in_addr destinationAddress; destinationAddress.s_addr = our_inet_addr(destinationAddressStr); const Port rtpPort(rtpPortNum); const Port rtcpPort(rtcpPortNum); sessionState.rtpGroupsock = new Groupsock(*env, destinationAddress, rtpPort, ttl); sessionState.rtcpGroupsock = new Groupsock(*env, destinationAddress, rtcpPort, ttl); #ifdef USE_SSM sessionState.rtpGroupsock->multicastSendOnly(); sessionState.rtcpGroupsock->multicastSendOnly(); #endif // Create a 'GSM RTP' sink from the RTP 'groupsock': sessionState.sink = GSMAudioRTPSink::createNew(*env, sessionState.rtpGroupsock); // Create (and start) a 'RTCP instance' for this RTP sink: const unsigned estimatedSessionBandwidth = 160; // in kbps; for RTCP b/w share const unsigned maxCNAMElen = 100; unsigned char CNAME[maxCNAMElen+1]; gethostname((char*)CNAME, maxCNAMElen); CNAME[maxCNAMElen] = '\0'; // just in case sessionState.rtcpInstance = RTCPInstance::createNew(*env, sessionState.rtcpGroupsock, estimatedSessionBandwidth, CNAME, sessionState.sink, NULL /* we're a server */, isSSM); // Note: This starts RTCP running automatically #ifdef IMPLEMENT_RTSP_SERVER rtspServer = RTSPServer::createNew(*env, 8554); if (rtspServer == NULL) { *env << "Failed to create RTSP server: " << env->getResultMsg() << "%s\n"; exit(1); } ServerMediaSession* sms = ServerMediaSession::createNew(*env, "testStream", "GSM input", "Session streamed by \"testGSMStreamer\"", isSSM); sms->addSubsession(PassiveServerMediaSubsession::createNew(*sessionState.sink, sessionState.rtcpInstance)); rtspServer->addServerMediaSession(sms); char* url = rtspServer->rtspURL(sms); *env << "Play this stream using the URL \"" << url << "\"\n"; delete[] url; #endif play(); env->taskScheduler().doEventLoop(); // does not return return 0; // only to prevent compiler warning } void play() { // Open the input source: extern FramedSource* createNewGSMAudioSource(UsageEnvironment&); sessionState.source = createNewGSMAudioSource(*env); if (sessionState.source == NULL) { *env << "Failed to create GSM source\n"; exit(1); } // Finally, start the streaming: *env << "Beginning streaming...\n"; sessionState.sink->startPlaying(*sessionState.source, afterPlaying, NULL); } void afterPlaying(void* /*clientData*/) { *env << "...done streaming\n"; sessionState.sink->stopPlaying(); // End this loop by closing the media: #ifdef IMPLEMENT_RTSP_SERVER Medium::close(rtspServer); #endif Medium::close(sessionState.rtcpInstance); Medium::close(sessionState.sink); delete sessionState.rtpGroupsock; Medium::close(sessionState.source); delete sessionState.rtcpGroupsock; // And start another loop: play(); } live/testProgs/testMPEG1or2Video.sdp000444 001751 000000 00000000353 12265042432 017565 0ustar00rsfwheel000000 000000 v=0 o=- 49451 3 IN IP4 127.0.0.1 s=Test MPEG Video session i=Parameters for the session streamed by "testMPEG1or2VideoStreamer" t=0 0 a=tool:testMPEG1or2VideoStreamer a=type:broadcast m=video 8888 RTP/AVP 32 c=IN IP4 239.255.42.42/127 live/testProgs/openRTSP.cpp000444 001751 000000 00000006144 12265042432 016114 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // Copyright (c) 1996-2014, Live Networks, Inc. All rights reserved // A RTSP client application that opens a RTSP URL argument, // and extracts and records the data from each incoming RTP stream. // // NOTE: If you want to develop your own RTSP client application (or embed RTSP client functionality into your own application), // then we don't recommend using this code as a model, because it is too complex (with many options). // Instead, we recommend using the "testRTSPClient" application code as a model. #include "playCommon.hh" RTSPClient* ourRTSPClient = NULL; Medium* createClient(UsageEnvironment& env, char const* url, int verbosityLevel, char const* applicationName) { extern portNumBits tunnelOverHTTPPortNum; return ourRTSPClient = RTSPClient::createNew(env, url, verbosityLevel, applicationName, tunnelOverHTTPPortNum); } void assignClient(Medium* client) { ourRTSPClient = (RTSPClient*)client; } void getOptions(RTSPClient::responseHandler* afterFunc) { ourRTSPClient->sendOptionsCommand(afterFunc, ourAuthenticator); } void getSDPDescription(RTSPClient::responseHandler* afterFunc) { ourRTSPClient->sendDescribeCommand(afterFunc, ourAuthenticator); } void setupSubsession(MediaSubsession* subsession, Boolean streamUsingTCP, Boolean forceMulticastOnUnspecified, RTSPClient::responseHandler* afterFunc) { ourRTSPClient->sendSetupCommand(*subsession, afterFunc, False, streamUsingTCP, forceMulticastOnUnspecified, ourAuthenticator); } void startPlayingSession(MediaSession* session, double start, double end, float scale, RTSPClient::responseHandler* afterFunc) { ourRTSPClient->sendPlayCommand(*session, afterFunc, start, end, scale, ourAuthenticator); } void startPlayingSession(MediaSession* session, char const* absStartTime, char const* absEndTime, float scale, RTSPClient::responseHandler* afterFunc) { ourRTSPClient->sendPlayCommand(*session, afterFunc, absStartTime, absEndTime, scale, ourAuthenticator); } void tearDownSession(MediaSession* session, RTSPClient::responseHandler* afterFunc) { ourRTSPClient->sendTeardownCommand(*session, afterFunc, ourAuthenticator); } void setUserAgentString(char const* userAgentString) { ourRTSPClient->setUserAgentString(userAgentString); } Boolean allowProxyServers = False; Boolean controlConnectionUsesTCP = True; Boolean supportCodecSelection = False; char const* clientProtocolName = "RTSP"; live/testProgs/testMPEG1or2Splitter.cpp000444 001751 000000 00000007212 12265042432 020322 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // Copyright (c) 1996-2014, Live Networks, Inc. All rights reserved // A test program that splits a MPEG-1 or 2 Program Stream file into // video and audio output files. // main program #include "liveMedia.hh" #include "BasicUsageEnvironment.hh" #include char const* inputFileName = "in.mpg"; char const* outputFileName_video = "out_video.mpg"; char const* outputFileName_audio = "out_audio.mpg"; void afterPlaying(void* clientData); // forward // A structure to hold the state of the current session. // It is used in the "afterPlaying()" function to clean up the session. struct sessionState_t { MPEG1or2Demux* baseDemultiplexor; MediaSource* videoSource; MediaSource* audioSource; FileSink* videoSink; FileSink* audioSink; } sessionState; UsageEnvironment* env; int main(int argc, char** argv) { // Begin by setting up our usage environment: TaskScheduler* scheduler = BasicTaskScheduler::createNew(); env = BasicUsageEnvironment::createNew(*scheduler); // Open the input file as a 'byte-stream file source': ByteStreamFileSource* inputSource = ByteStreamFileSource::createNew(*env, inputFileName); if (inputSource == NULL) { *env << "Unable to open file \"" << inputFileName << "\" as a byte-stream file source\n"; exit(1); } // Create a MPEG demultiplexor that reads from that source. sessionState.baseDemultiplexor = MPEG1or2Demux::createNew(*env, inputSource); // Create, from this, our own sources (video and audio): sessionState.videoSource = sessionState.baseDemultiplexor->newVideoStream(); sessionState.audioSource = sessionState.baseDemultiplexor->newAudioStream(); // Create the data sinks (output files): sessionState.videoSink = FileSink::createNew(*env, outputFileName_video); sessionState.audioSink = FileSink::createNew(*env, outputFileName_audio); // Finally, start playing each sink. *env << "Beginning to read...\n"; sessionState.videoSink->startPlaying(*sessionState.videoSource, afterPlaying, sessionState.videoSink); sessionState.audioSink->startPlaying(*sessionState.audioSource, afterPlaying, sessionState.audioSink); env->taskScheduler().doEventLoop(); // does not return return 0; // only to prevent compiler warning } void afterPlaying(void* clientData) { Medium* finishedSink = (Medium*)clientData; if (finishedSink == sessionState.videoSink) { *env << "No more video\n"; Medium::close(sessionState.videoSink); Medium::close(sessionState.videoSource); sessionState.videoSink = NULL; } else if (finishedSink == sessionState.audioSink) { *env << "No more audio\n"; Medium::close(sessionState.audioSink); Medium::close(sessionState.audioSource); sessionState.audioSink = NULL; } if (sessionState.videoSink == NULL && sessionState.audioSink == NULL) { *env << "...finished reading\n"; Medium::close(sessionState.baseDemultiplexor); exit(0); } } live/testProgs/testMPEG1or2AudioVideoStreamer.cpp000400 001751 000000 00000016656 12265042432 022253 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // Copyright (c) 1996-2014, Live Networks, Inc. All rights reserved // A test program that reads a MPEG-1 or 2 Program Stream file, // splits it into Audio and Video Elementary Streams, // and streams both using RTP // main program #include "liveMedia.hh" #include "BasicUsageEnvironment.hh" #include "GroupsockHelper.hh" UsageEnvironment* env; char const* inputFileName = "test.mpg"; MPEG1or2Demux* mpegDemux; FramedSource* audioSource; FramedSource* videoSource; RTPSink* audioSink; RTPSink* videoSink; void play(); // forward // To stream using "source-specific multicast" (SSM), uncomment the following: //#define USE_SSM 1 #ifdef USE_SSM Boolean const isSSM = True; #else Boolean const isSSM = False; #endif // To set up an internal RTSP server, uncomment the following: //#define IMPLEMENT_RTSP_SERVER 1 // (Note that this RTSP server works for multicast only) // To stream *only* MPEG "I" frames (e.g., to reduce network bandwidth), // change the following "False" to "True": Boolean iFramesOnly = False; int main(int argc, char** argv) { // Begin by setting up our usage environment: TaskScheduler* scheduler = BasicTaskScheduler::createNew(); env = BasicUsageEnvironment::createNew(*scheduler); // Create 'groupsocks' for RTP and RTCP: char const* destinationAddressStr #ifdef USE_SSM = "232.255.42.42"; #else = "239.255.42.42"; // Note: This is a multicast address. If you wish to stream using // unicast instead, then replace this string with the unicast address // of the (single) destination. (You may also need to make a similar // change to the receiver program.) #endif const unsigned short rtpPortNumAudio = 6666; const unsigned short rtcpPortNumAudio = rtpPortNumAudio+1; const unsigned short rtpPortNumVideo = 8888; const unsigned short rtcpPortNumVideo = rtpPortNumVideo+1; const unsigned char ttl = 7; // low, in case routers don't admin scope struct in_addr destinationAddress; destinationAddress.s_addr = our_inet_addr(destinationAddressStr); const Port rtpPortAudio(rtpPortNumAudio); const Port rtcpPortAudio(rtcpPortNumAudio); const Port rtpPortVideo(rtpPortNumVideo); const Port rtcpPortVideo(rtcpPortNumVideo); Groupsock rtpGroupsockAudio(*env, destinationAddress, rtpPortAudio, ttl); Groupsock rtcpGroupsockAudio(*env, destinationAddress, rtcpPortAudio, ttl); Groupsock rtpGroupsockVideo(*env, destinationAddress, rtpPortVideo, ttl); Groupsock rtcpGroupsockVideo(*env, destinationAddress, rtcpPortVideo, ttl); #ifdef USE_SSM rtpGroupsockAudio.multicastSendOnly(); rtcpGroupsockAudio.multicastSendOnly(); rtpGroupsockVideo.multicastSendOnly(); rtcpGroupsockVideo.multicastSendOnly(); #endif // Create a 'MPEG Audio RTP' sink from the RTP 'groupsock': audioSink = MPEG1or2AudioRTPSink::createNew(*env, &rtpGroupsockAudio); // Create (and start) a 'RTCP instance' for this RTP sink: const unsigned estimatedSessionBandwidthAudio = 160; // in kbps; for RTCP b/w share const unsigned maxCNAMElen = 100; unsigned char CNAME[maxCNAMElen+1]; gethostname((char*)CNAME, maxCNAMElen); CNAME[maxCNAMElen] = '\0'; // just in case #ifdef IMPLEMENT_RTSP_SERVER RTCPInstance* audioRTCP = #endif RTCPInstance::createNew(*env, &rtcpGroupsockAudio, estimatedSessionBandwidthAudio, CNAME, audioSink, NULL /* we're a server */, isSSM); // Note: This starts RTCP running automatically // Create a 'MPEG Video RTP' sink from the RTP 'groupsock': videoSink = MPEG1or2VideoRTPSink::createNew(*env, &rtpGroupsockVideo); // Create (and start) a 'RTCP instance' for this RTP sink: const unsigned estimatedSessionBandwidthVideo = 4500; // in kbps; for RTCP b/w share #ifdef IMPLEMENT_RTSP_SERVER RTCPInstance* videoRTCP = #endif RTCPInstance::createNew(*env, &rtcpGroupsockVideo, estimatedSessionBandwidthVideo, CNAME, videoSink, NULL /* we're a server */, isSSM); // Note: This starts RTCP running automatically #ifdef IMPLEMENT_RTSP_SERVER RTSPServer* rtspServer = RTSPServer::createNew(*env); // Note that this (attempts to) start a server on the default RTSP server // port: 554. To use a different port number, add it as an extra // (optional) parameter to the "RTSPServer::createNew()" call above. if (rtspServer == NULL) { *env << "Failed to create RTSP server: " << env->getResultMsg() << "\n"; exit(1); } ServerMediaSession* sms = ServerMediaSession::createNew(*env, "testStream", inputFileName, "Session streamed by \"testMPEG1or2AudioVideoStreamer\"", isSSM); sms->addSubsession(PassiveServerMediaSubsession::createNew(*audioSink, audioRTCP)); sms->addSubsession(PassiveServerMediaSubsession::createNew(*videoSink, videoRTCP)); rtspServer->addServerMediaSession(sms); char* url = rtspServer->rtspURL(sms); *env << "Play this stream using the URL \"" << url << "\"\n"; delete[] url; #endif // Finally, start the streaming: *env << "Beginning streaming...\n"; play(); env->taskScheduler().doEventLoop(); // does not return return 0; // only to prevent compiler warning } void afterPlaying(void* clientData) { // One of the sinks has ended playing. // Check whether any of the sources have a pending read. If so, // wait until its sink ends playing also: if (audioSource->isCurrentlyAwaitingData() || videoSource->isCurrentlyAwaitingData()) return; // Now that both sinks have ended, close both input sources, // and start playing again: *env << "...done reading from file\n"; audioSink->stopPlaying(); videoSink->stopPlaying(); // ensures that both are shut down Medium::close(audioSource); Medium::close(videoSource); Medium::close(mpegDemux); // Note: This also closes the input file that this source read from. // Start playing once again: play(); } void play() { // Open the input file as a 'byte-stream file source': ByteStreamFileSource* fileSource = ByteStreamFileSource::createNew(*env, inputFileName); if (fileSource == NULL) { *env << "Unable to open file \"" << inputFileName << "\" as a byte-stream file source\n"; exit(1); } // We must demultiplex Audio and Video Elementary Streams // from the input source: mpegDemux = MPEG1or2Demux::createNew(*env, fileSource); FramedSource* audioES = mpegDemux->newAudioStream(); FramedSource* videoES = mpegDemux->newVideoStream(); // Create a framer for each Elementary Stream: audioSource = MPEG1or2AudioStreamFramer::createNew(*env, audioES); videoSource = MPEG1or2VideoStreamFramer::createNew(*env, videoES, iFramesOnly); // Finally, start playing each sink. *env << "Beginning to read from file...\n"; videoSink->startPlaying(*videoSource, afterPlaying, videoSink); audioSink->startPlaying(*audioSource, afterPlaying, audioSink); } live/testProgs/testMPEG1or2VideoStreamer.cpp000444 001751 000000 00000013621 12265042432 021266 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // Copyright (c) 1996-2014, Live Networks, Inc. All rights reserved // A test program that reads a MPEG-1 or 2 Video Elementary Stream file, // and streams it using RTP // main program #include "liveMedia.hh" #include "BasicUsageEnvironment.hh" #include "GroupsockHelper.hh" // Uncomment the following if the input file is a MPEG Program Stream // rather than a MPEG Video Elementary Stream //#define SOURCE_IS_PROGRAM_STREAM 1 // To stream using "source-specific multicast" (SSM), uncomment the following: //#define USE_SSM 1 #ifdef USE_SSM Boolean const isSSM = True; #else Boolean const isSSM = False; #endif // To set up an internal RTSP server, uncomment the following: //#define IMPLEMENT_RTSP_SERVER 1 // (Note that this RTSP server works for multicast only) // To stream *only* MPEG "I" frames (e.g., to reduce network bandwidth), // change the following "False" to "True": Boolean iFramesOnly = False; UsageEnvironment* env; char const* inputFileName = "test.mpg"; #ifdef SOURCE_IS_PROGRAM_STREAM MPEG1or2Demux* mpegDemux; #endif MediaSource* videoSource; RTPSink* videoSink; void play(); // forward int main(int argc, char** argv) { // Begin by setting up our usage environment: TaskScheduler* scheduler = BasicTaskScheduler::createNew(); env = BasicUsageEnvironment::createNew(*scheduler); // Create 'groupsocks' for RTP and RTCP: char const* destinationAddressStr #ifdef USE_SSM = "232.255.42.42"; #else = "239.255.42.42"; // Note: This is a multicast address. If you wish to stream using // unicast instead, then replace this string with the unicast address // of the (single) destination. (You may also need to make a similar // change to the receiver program.) #endif const unsigned short rtpPortNum = 8888; const unsigned short rtcpPortNum = rtpPortNum+1; const unsigned char ttl = 7; // low, in case routers don't admin scope struct in_addr destinationAddress; destinationAddress.s_addr = our_inet_addr(destinationAddressStr); const Port rtpPort(rtpPortNum); const Port rtcpPort(rtcpPortNum); Groupsock rtpGroupsock(*env, destinationAddress, rtpPort, ttl); Groupsock rtcpGroupsock(*env, destinationAddress, rtcpPort, ttl); #ifdef USE_SSM rtpGroupsock.multicastSendOnly(); rtcpGroupsock.multicastSendOnly(); #endif // Create a 'MPEG Video RTP' sink from the RTP 'groupsock': videoSink = MPEG1or2VideoRTPSink::createNew(*env, &rtpGroupsock); // Create (and start) a 'RTCP instance' for this RTP sink: const unsigned estimatedSessionBandwidth = 4500; // in kbps; for RTCP b/w share const unsigned maxCNAMElen = 100; unsigned char CNAME[maxCNAMElen+1]; gethostname((char*)CNAME, maxCNAMElen); CNAME[maxCNAMElen] = '\0'; // just in case #ifdef IMPLEMENT_RTSP_SERVER RTCPInstance* rtcp = #endif RTCPInstance::createNew(*env, &rtcpGroupsock, estimatedSessionBandwidth, CNAME, videoSink, NULL /* we're a server */, isSSM); // Note: This starts RTCP running automatically #ifdef IMPLEMENT_RTSP_SERVER RTSPServer* rtspServer = RTSPServer::createNew(*env); // Note that this (attempts to) start a server on the default RTSP server // port: 554. To use a different port number, add it as an extra // (optional) parameter to the "RTSPServer::createNew()" call above. if (rtspServer == NULL) { *env << "Failed to create RTSP server: " << env->getResultMsg() << "\n"; exit(1); } ServerMediaSession* sms = ServerMediaSession::createNew(*env, "testStream", inputFileName, "Session streamed by \"testMPEG1or2VideoStreamer\"", isSSM); sms->addSubsession(PassiveServerMediaSubsession::createNew(*videoSink, rtcp)); rtspServer->addServerMediaSession(sms); char* url = rtspServer->rtspURL(sms); *env << "Play this stream using the URL \"" << url << "\"\n"; delete[] url; #endif // Finally, start the streaming: *env << "Beginning streaming...\n"; play(); env->taskScheduler().doEventLoop(); // does not return return 0; // only to prevent compiler warning } void afterPlaying(void* /*clientData*/) { *env << "...done reading from file\n"; videoSink->stopPlaying(); Medium::close(videoSource); #ifdef SOURCE_IS_PROGRAM_STREAM Medium::close(mpegDemux); #endif // Note that this also closes the input file that this source read from. play(); } void play() { // Open the input file as a 'byte-stream file source': ByteStreamFileSource* fileSource = ByteStreamFileSource::createNew(*env, inputFileName); if (fileSource == NULL) { *env << "Unable to open file \"" << inputFileName << "\" as a byte-stream file source\n"; exit(1); } FramedSource* videoES; #ifdef SOURCE_IS_PROGRAM_STREAM // We must demultiplex a Video Elementary Stream from the input source: mpegDemux = MPEG1or2Demux::createNew(*env, fileSource); videoES = mpegDemux->newVideoStream(); #else // The input source is assumed to already be a Video Elementary Stream: videoES = fileSource; #endif // Create a framer for the Video Elementary Stream: videoSource = MPEG1or2VideoStreamFramer::createNew(*env, videoES, iFramesOnly); // Finally, start playing: *env << "Beginning to read from file...\n"; videoSink->startPlaying(*videoSource, afterPlaying, videoSink); } live/testProgs/testMPEG4VideoStreamer.cpp000444 001751 000000 00000010665 12265042432 020653 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // Copyright (c) 1996-2014, Live Networks, Inc. All rights reserved // A test program that reads a MPEG-4 Video Elementary Stream file, // and streams it using RTP // main program #include "liveMedia.hh" #include "BasicUsageEnvironment.hh" #include "GroupsockHelper.hh" UsageEnvironment* env; char const* inputFileName = "test.m4e"; MPEG4VideoStreamFramer* videoSource; RTPSink* videoSink; void play(); // forward int main(int argc, char** argv) { // Begin by setting up our usage environment: TaskScheduler* scheduler = BasicTaskScheduler::createNew(); env = BasicUsageEnvironment::createNew(*scheduler); // Create 'groupsocks' for RTP and RTCP: struct in_addr destinationAddress; destinationAddress.s_addr = chooseRandomIPv4SSMAddress(*env); // Note: This is a multicast address. If you wish instead to stream // using unicast, then you should use the "testOnDemandRTSPServer" // test program - not this test program - as a model. const unsigned short rtpPortNum = 18888; const unsigned short rtcpPortNum = rtpPortNum+1; const unsigned char ttl = 255; const Port rtpPort(rtpPortNum); const Port rtcpPort(rtcpPortNum); Groupsock rtpGroupsock(*env, destinationAddress, rtpPort, ttl); rtpGroupsock.multicastSendOnly(); // we're a SSM source Groupsock rtcpGroupsock(*env, destinationAddress, rtcpPort, ttl); rtcpGroupsock.multicastSendOnly(); // we're a SSM source // Create a 'MPEG-4 Video RTP' sink from the RTP 'groupsock': videoSink = MPEG4ESVideoRTPSink::createNew(*env, &rtpGroupsock, 96); // Create (and start) a 'RTCP instance' for this RTP sink: const unsigned estimatedSessionBandwidth = 500; // in kbps; for RTCP b/w share const unsigned maxCNAMElen = 100; unsigned char CNAME[maxCNAMElen+1]; gethostname((char*)CNAME, maxCNAMElen); CNAME[maxCNAMElen] = '\0'; // just in case RTCPInstance* rtcp = RTCPInstance::createNew(*env, &rtcpGroupsock, estimatedSessionBandwidth, CNAME, videoSink, NULL /* we're a server */, True /* we're a SSM source */); // Note: This starts RTCP running automatically RTSPServer* rtspServer = RTSPServer::createNew(*env, 8554); if (rtspServer == NULL) { *env << "Failed to create RTSP server: " << env->getResultMsg() << "\n"; exit(1); } ServerMediaSession* sms = ServerMediaSession::createNew(*env, "testStream", inputFileName, "Session streamed by \"testMPEG4VideoStreamer\"", True /*SSM*/); sms->addSubsession(PassiveServerMediaSubsession::createNew(*videoSink, rtcp)); rtspServer->addServerMediaSession(sms); char* url = rtspServer->rtspURL(sms); *env << "Play this stream using the URL \"" << url << "\"\n"; delete[] url; // Start the streaming: *env << "Beginning streaming...\n"; play(); env->taskScheduler().doEventLoop(); // does not return return 0; // only to prevent compiler warning } void afterPlaying(void* /*clientData*/) { *env << "...done reading from file\n"; videoSink->stopPlaying(); Medium::close(videoSource); // Note that this also closes the input file that this source read from. // Start playing once again: play(); } void play() { // Open the input file as a 'byte-stream file source': ByteStreamFileSource* fileSource = ByteStreamFileSource::createNew(*env, inputFileName); if (fileSource == NULL) { *env << "Unable to open file \"" << inputFileName << "\" as a byte-stream file source\n"; exit(1); } FramedSource* videoES = fileSource; // Create a framer for the Video Elementary Stream: videoSource = MPEG4VideoStreamFramer::createNew(*env, videoES); // Finally, start playing: *env << "Beginning to read from file...\n"; videoSink->startPlaying(*videoSource, afterPlaying, videoSink); } live/testProgs/testMPEG2TransportStreamer.cpp000444 001751 000000 00000012753 12265042432 021577 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // Copyright (c) 1996-2014, Live Networks, Inc. All rights reserved // A test program that reads a MPEG-2 Transport Stream file, // and streams it using RTP // main program #include "liveMedia.hh" #include "BasicUsageEnvironment.hh" #include "GroupsockHelper.hh" // To stream using "source-specific multicast" (SSM), uncomment the following: //#define USE_SSM 1 #ifdef USE_SSM Boolean const isSSM = True; #else Boolean const isSSM = False; #endif // To set up an internal RTSP server, uncomment the following: //#define IMPLEMENT_RTSP_SERVER 1 // (Note that this RTSP server works for multicast only) #define TRANSPORT_PACKET_SIZE 188 #define TRANSPORT_PACKETS_PER_NETWORK_PACKET 7 // The product of these two numbers must be enough to fit within a network packet UsageEnvironment* env; char const* inputFileName = "test.ts"; FramedSource* videoSource; RTPSink* videoSink; void play(); // forward int main(int argc, char** argv) { // Begin by setting up our usage environment: TaskScheduler* scheduler = BasicTaskScheduler::createNew(); env = BasicUsageEnvironment::createNew(*scheduler); // Create 'groupsocks' for RTP and RTCP: char const* destinationAddressStr #ifdef USE_SSM = "232.255.42.42"; #else = "239.255.42.42"; // Note: This is a multicast address. If you wish to stream using // unicast instead, then replace this string with the unicast address // of the (single) destination. (You may also need to make a similar // change to the receiver program.) #endif const unsigned short rtpPortNum = 1234; const unsigned short rtcpPortNum = rtpPortNum+1; const unsigned char ttl = 7; // low, in case routers don't admin scope struct in_addr destinationAddress; destinationAddress.s_addr = our_inet_addr(destinationAddressStr); const Port rtpPort(rtpPortNum); const Port rtcpPort(rtcpPortNum); Groupsock rtpGroupsock(*env, destinationAddress, rtpPort, ttl); Groupsock rtcpGroupsock(*env, destinationAddress, rtcpPort, ttl); #ifdef USE_SSM rtpGroupsock.multicastSendOnly(); rtcpGroupsock.multicastSendOnly(); #endif // Create an appropriate 'RTP sink' from the RTP 'groupsock': videoSink = SimpleRTPSink::createNew(*env, &rtpGroupsock, 33, 90000, "video", "MP2T", 1, True, False /*no 'M' bit*/); // Create (and start) a 'RTCP instance' for this RTP sink: const unsigned estimatedSessionBandwidth = 5000; // in kbps; for RTCP b/w share const unsigned maxCNAMElen = 100; unsigned char CNAME[maxCNAMElen+1]; gethostname((char*)CNAME, maxCNAMElen); CNAME[maxCNAMElen] = '\0'; // just in case #ifdef IMPLEMENT_RTSP_SERVER RTCPInstance* rtcp = #endif RTCPInstance::createNew(*env, &rtcpGroupsock, estimatedSessionBandwidth, CNAME, videoSink, NULL /* we're a server */, isSSM); // Note: This starts RTCP running automatically #ifdef IMPLEMENT_RTSP_SERVER RTSPServer* rtspServer = RTSPServer::createNew(*env); // Note that this (attempts to) start a server on the default RTSP server // port: 554. To use a different port number, add it as an extra // (optional) parameter to the "RTSPServer::createNew()" call above. if (rtspServer == NULL) { *env << "Failed to create RTSP server: " << env->getResultMsg() << "\n"; exit(1); } ServerMediaSession* sms = ServerMediaSession::createNew(*env, "testStream", inputFileName, "Session streamed by \"testMPEG2TransportStreamer\"", isSSM); sms->addSubsession(PassiveServerMediaSubsession::createNew(*videoSink, rtcp)); rtspServer->addServerMediaSession(sms); char* url = rtspServer->rtspURL(sms); *env << "Play this stream using the URL \"" << url << "\"\n"; delete[] url; #endif // Finally, start the streaming: *env << "Beginning streaming...\n"; play(); env->taskScheduler().doEventLoop(); // does not return return 0; // only to prevent compiler warning } void afterPlaying(void* /*clientData*/) { *env << "...done reading from file\n"; videoSink->stopPlaying(); Medium::close(videoSource); // Note that this also closes the input file that this source read from. play(); } void play() { unsigned const inputDataChunkSize = TRANSPORT_PACKETS_PER_NETWORK_PACKET*TRANSPORT_PACKET_SIZE; // Open the input file as a 'byte-stream file source': ByteStreamFileSource* fileSource = ByteStreamFileSource::createNew(*env, inputFileName, inputDataChunkSize); if (fileSource == NULL) { *env << "Unable to open file \"" << inputFileName << "\" as a byte-stream file source\n"; exit(1); } // Create a 'framer' for the input source (to give us proper inter-packet gaps): videoSource = MPEG2TransportStreamFramer::createNew(*env, fileSource); // Finally, start playing: *env << "Beginning to read from file...\n"; videoSink->startPlaying(*videoSource, afterPlaying, videoSink); } live/testProgs/testRTSPClient.cpp000444 001751 000000 00000053242 12265042432 017272 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // Copyright (c) 1996-2014, Live Networks, Inc. All rights reserved // A demo application, showing how to create and run a RTSP client (that can potentially receive multiple streams concurrently). // // NOTE: This code - although it builds a running application - is intended only to illustrate how to develop your own RTSP // client application. For a full-featured RTSP client application - with much more functionality, and many options - see // "openRTSP": http://www.live555.com/openRTSP/ #include "liveMedia.hh" #include "BasicUsageEnvironment.hh" // Forward function definitions: // RTSP 'response handlers': void continueAfterDESCRIBE(RTSPClient* rtspClient, int resultCode, char* resultString); void continueAfterSETUP(RTSPClient* rtspClient, int resultCode, char* resultString); void continueAfterPLAY(RTSPClient* rtspClient, int resultCode, char* resultString); // Other event handler functions: void subsessionAfterPlaying(void* clientData); // called when a stream's subsession (e.g., audio or video substream) ends void subsessionByeHandler(void* clientData); // called when a RTCP "BYE" is received for a subsession void streamTimerHandler(void* clientData); // called at the end of a stream's expected duration (if the stream has not already signaled its end using a RTCP "BYE") // The main streaming routine (for each "rtsp://" URL): void openURL(UsageEnvironment& env, char const* progName, char const* rtspURL); // Used to iterate through each stream's 'subsessions', setting up each one: void setupNextSubsession(RTSPClient* rtspClient); // Used to shut down and close a stream (including its "RTSPClient" object): void shutdownStream(RTSPClient* rtspClient, int exitCode = 1); // A function that outputs a string that identifies each stream (for debugging output). Modify this if you wish: UsageEnvironment& operator<<(UsageEnvironment& env, const RTSPClient& rtspClient) { return env << "[URL:\"" << rtspClient.url() << "\"]: "; } // A function that outputs a string that identifies each subsession (for debugging output). Modify this if you wish: UsageEnvironment& operator<<(UsageEnvironment& env, const MediaSubsession& subsession) { return env << subsession.mediumName() << "/" << subsession.codecName(); } void usage(UsageEnvironment& env, char const* progName) { env << "Usage: " << progName << " ... \n"; env << "\t(where each is a \"rtsp://\" URL)\n"; } char eventLoopWatchVariable = 0; int main(int argc, char** argv) { // Begin by setting up our usage environment: TaskScheduler* scheduler = BasicTaskScheduler::createNew(); UsageEnvironment* env = BasicUsageEnvironment::createNew(*scheduler); // We need at least one "rtsp://" URL argument: if (argc < 2) { usage(*env, argv[0]); return 1; } // There are argc-1 URLs: argv[1] through argv[argc-1]. Open and start streaming each one: for (int i = 1; i <= argc-1; ++i) { openURL(*env, argv[0], argv[i]); } // All subsequent activity takes place within the event loop: env->taskScheduler().doEventLoop(&eventLoopWatchVariable); // This function call does not return, unless, at some point in time, "eventLoopWatchVariable" gets set to something non-zero. return 0; // If you choose to continue the application past this point (i.e., if you comment out the "return 0;" statement above), // and if you don't intend to do anything more with the "TaskScheduler" and "UsageEnvironment" objects, // then you can also reclaim the (small) memory used by these objects by uncommenting the following code: /* env->reclaim(); env = NULL; delete scheduler; scheduler = NULL; */ } // Define a class to hold per-stream state that we maintain throughout each stream's lifetime: class StreamClientState { public: StreamClientState(); virtual ~StreamClientState(); public: MediaSubsessionIterator* iter; MediaSession* session; MediaSubsession* subsession; TaskToken streamTimerTask; double duration; }; // If you're streaming just a single stream (i.e., just from a single URL, once), then you can define and use just a single // "StreamClientState" structure, as a global variable in your application. However, because - in this demo application - we're // showing how to play multiple streams, concurrently, we can't do that. Instead, we have to have a separate "StreamClientState" // structure for each "RTSPClient". To do this, we subclass "RTSPClient", and add a "StreamClientState" field to the subclass: class ourRTSPClient: public RTSPClient { public: static ourRTSPClient* createNew(UsageEnvironment& env, char const* rtspURL, int verbosityLevel = 0, char const* applicationName = NULL, portNumBits tunnelOverHTTPPortNum = 0); protected: ourRTSPClient(UsageEnvironment& env, char const* rtspURL, int verbosityLevel, char const* applicationName, portNumBits tunnelOverHTTPPortNum); // called only by createNew(); virtual ~ourRTSPClient(); public: StreamClientState scs; }; // Define a data sink (a subclass of "MediaSink") to receive the data for each subsession (i.e., each audio or video 'substream'). // In practice, this might be a class (or a chain of classes) that decodes and then renders the incoming audio or video. // Or it might be a "FileSink", for outputting the received data into a file (as is done by the "openRTSP" application). // In this example code, however, we define a simple 'dummy' sink that receives incoming data, but does nothing with it. class DummySink: public MediaSink { public: static DummySink* createNew(UsageEnvironment& env, MediaSubsession& subsession, // identifies the kind of data that's being received char const* streamId = NULL); // identifies the stream itself (optional) private: DummySink(UsageEnvironment& env, MediaSubsession& subsession, char const* streamId); // called only by "createNew()" virtual ~DummySink(); static void afterGettingFrame(void* clientData, unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime, unsigned durationInMicroseconds); void afterGettingFrame(unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime, unsigned durationInMicroseconds); private: // redefined virtual functions: virtual Boolean continuePlaying(); private: u_int8_t* fReceiveBuffer; MediaSubsession& fSubsession; char* fStreamId; }; #define RTSP_CLIENT_VERBOSITY_LEVEL 1 // by default, print verbose output from each "RTSPClient" static unsigned rtspClientCount = 0; // Counts how many streams (i.e., "RTSPClient"s) are currently in use. void openURL(UsageEnvironment& env, char const* progName, char const* rtspURL) { // Begin by creating a "RTSPClient" object. Note that there is a separate "RTSPClient" object for each stream that we wish // to receive (even if more than stream uses the same "rtsp://" URL). RTSPClient* rtspClient = ourRTSPClient::createNew(env, rtspURL, RTSP_CLIENT_VERBOSITY_LEVEL, progName); if (rtspClient == NULL) { env << "Failed to create a RTSP client for URL \"" << rtspURL << "\": " << env.getResultMsg() << "\n"; return; } ++rtspClientCount; // Next, send a RTSP "DESCRIBE" command, to get a SDP description for the stream. // Note that this command - like all RTSP commands - is sent asynchronously; we do not block, waiting for a response. // Instead, the following function call returns immediately, and we handle the RTSP response later, from within the event loop: rtspClient->sendDescribeCommand(continueAfterDESCRIBE); } // Implementation of the RTSP 'response handlers': void continueAfterDESCRIBE(RTSPClient* rtspClient, int resultCode, char* resultString) { do { UsageEnvironment& env = rtspClient->envir(); // alias StreamClientState& scs = ((ourRTSPClient*)rtspClient)->scs; // alias if (resultCode != 0) { env << *rtspClient << "Failed to get a SDP description: " << resultString << "\n"; delete[] resultString; break; } char* const sdpDescription = resultString; env << *rtspClient << "Got a SDP description:\n" << sdpDescription << "\n"; // Create a media session object from this SDP description: scs.session = MediaSession::createNew(env, sdpDescription); delete[] sdpDescription; // because we don't need it anymore if (scs.session == NULL) { env << *rtspClient << "Failed to create a MediaSession object from the SDP description: " << env.getResultMsg() << "\n"; break; } else if (!scs.session->hasSubsessions()) { env << *rtspClient << "This session has no media subsessions (i.e., no \"m=\" lines)\n"; break; } // Then, create and set up our data source objects for the session. We do this by iterating over the session's 'subsessions', // calling "MediaSubsession::initiate()", and then sending a RTSP "SETUP" command, on each one. // (Each 'subsession' will have its own data source.) scs.iter = new MediaSubsessionIterator(*scs.session); setupNextSubsession(rtspClient); return; } while (0); // An unrecoverable error occurred with this stream. shutdownStream(rtspClient); } // By default, we request that the server stream its data using RTP/UDP. // If, instead, you want to request that the server stream via RTP-over-TCP, change the following to True: #define REQUEST_STREAMING_OVER_TCP False void setupNextSubsession(RTSPClient* rtspClient) { UsageEnvironment& env = rtspClient->envir(); // alias StreamClientState& scs = ((ourRTSPClient*)rtspClient)->scs; // alias scs.subsession = scs.iter->next(); if (scs.subsession != NULL) { if (!scs.subsession->initiate()) { env << *rtspClient << "Failed to initiate the \"" << *scs.subsession << "\" subsession: " << env.getResultMsg() << "\n"; setupNextSubsession(rtspClient); // give up on this subsession; go to the next one } else { env << *rtspClient << "Initiated the \"" << *scs.subsession << "\" subsession (client ports " << scs.subsession->clientPortNum() << "-" << scs.subsession->clientPortNum()+1 << ")\n"; // Continue setting up this subsession, by sending a RTSP "SETUP" command: rtspClient->sendSetupCommand(*scs.subsession, continueAfterSETUP, False, REQUEST_STREAMING_OVER_TCP); } return; } // We've finished setting up all of the subsessions. Now, send a RTSP "PLAY" command to start the streaming: if (scs.session->absStartTime() != NULL) { // Special case: The stream is indexed by 'absolute' time, so send an appropriate "PLAY" command: rtspClient->sendPlayCommand(*scs.session, continueAfterPLAY, scs.session->absStartTime(), scs.session->absEndTime()); } else { scs.duration = scs.session->playEndTime() - scs.session->playStartTime(); rtspClient->sendPlayCommand(*scs.session, continueAfterPLAY); } } void continueAfterSETUP(RTSPClient* rtspClient, int resultCode, char* resultString) { do { UsageEnvironment& env = rtspClient->envir(); // alias StreamClientState& scs = ((ourRTSPClient*)rtspClient)->scs; // alias if (resultCode != 0) { env << *rtspClient << "Failed to set up the \"" << *scs.subsession << "\" subsession: " << resultString << "\n"; break; } env << *rtspClient << "Set up the \"" << *scs.subsession << "\" subsession (client ports " << scs.subsession->clientPortNum() << "-" << scs.subsession->clientPortNum()+1 << ")\n"; // Having successfully setup the subsession, create a data sink for it, and call "startPlaying()" on it. // (This will prepare the data sink to receive data; the actual flow of data from the client won't start happening until later, // after we've sent a RTSP "PLAY" command.) scs.subsession->sink = DummySink::createNew(env, *scs.subsession, rtspClient->url()); // perhaps use your own custom "MediaSink" subclass instead if (scs.subsession->sink == NULL) { env << *rtspClient << "Failed to create a data sink for the \"" << *scs.subsession << "\" subsession: " << env.getResultMsg() << "\n"; break; } env << *rtspClient << "Created a data sink for the \"" << *scs.subsession << "\" subsession\n"; scs.subsession->miscPtr = rtspClient; // a hack to let subsession handle functions get the "RTSPClient" from the subsession scs.subsession->sink->startPlaying(*(scs.subsession->readSource()), subsessionAfterPlaying, scs.subsession); // Also set a handler to be called if a RTCP "BYE" arrives for this subsession: if (scs.subsession->rtcpInstance() != NULL) { scs.subsession->rtcpInstance()->setByeHandler(subsessionByeHandler, scs.subsession); } } while (0); delete[] resultString; // Set up the next subsession, if any: setupNextSubsession(rtspClient); } void continueAfterPLAY(RTSPClient* rtspClient, int resultCode, char* resultString) { Boolean success = False; do { UsageEnvironment& env = rtspClient->envir(); // alias StreamClientState& scs = ((ourRTSPClient*)rtspClient)->scs; // alias if (resultCode != 0) { env << *rtspClient << "Failed to start playing session: " << resultString << "\n"; break; } // Set a timer to be handled at the end of the stream's expected duration (if the stream does not already signal its end // using a RTCP "BYE"). This is optional. If, instead, you want to keep the stream active - e.g., so you can later // 'seek' back within it and do another RTSP "PLAY" - then you can omit this code. // (Alternatively, if you don't want to receive the entire stream, you could set this timer for some shorter value.) if (scs.duration > 0) { unsigned const delaySlop = 2; // number of seconds extra to delay, after the stream's expected duration. (This is optional.) scs.duration += delaySlop; unsigned uSecsToDelay = (unsigned)(scs.duration*1000000); scs.streamTimerTask = env.taskScheduler().scheduleDelayedTask(uSecsToDelay, (TaskFunc*)streamTimerHandler, rtspClient); } env << *rtspClient << "Started playing session"; if (scs.duration > 0) { env << " (for up to " << scs.duration << " seconds)"; } env << "...\n"; success = True; } while (0); delete[] resultString; if (!success) { // An unrecoverable error occurred with this stream. shutdownStream(rtspClient); } } // Implementation of the other event handlers: void subsessionAfterPlaying(void* clientData) { MediaSubsession* subsession = (MediaSubsession*)clientData; RTSPClient* rtspClient = (RTSPClient*)(subsession->miscPtr); // Begin by closing this subsession's stream: Medium::close(subsession->sink); subsession->sink = NULL; // Next, check whether *all* subsessions' streams have now been closed: MediaSession& session = subsession->parentSession(); MediaSubsessionIterator iter(session); while ((subsession = iter.next()) != NULL) { if (subsession->sink != NULL) return; // this subsession is still active } // All subsessions' streams have now been closed, so shutdown the client: shutdownStream(rtspClient); } void subsessionByeHandler(void* clientData) { MediaSubsession* subsession = (MediaSubsession*)clientData; RTSPClient* rtspClient = (RTSPClient*)subsession->miscPtr; UsageEnvironment& env = rtspClient->envir(); // alias env << *rtspClient << "Received RTCP \"BYE\" on \"" << *subsession << "\" subsession\n"; // Now act as if the subsession had closed: subsessionAfterPlaying(subsession); } void streamTimerHandler(void* clientData) { ourRTSPClient* rtspClient = (ourRTSPClient*)clientData; StreamClientState& scs = rtspClient->scs; // alias scs.streamTimerTask = NULL; // Shut down the stream: shutdownStream(rtspClient); } void shutdownStream(RTSPClient* rtspClient, int exitCode) { UsageEnvironment& env = rtspClient->envir(); // alias StreamClientState& scs = ((ourRTSPClient*)rtspClient)->scs; // alias // First, check whether any subsessions have still to be closed: if (scs.session != NULL) { Boolean someSubsessionsWereActive = False; MediaSubsessionIterator iter(*scs.session); MediaSubsession* subsession; while ((subsession = iter.next()) != NULL) { if (subsession->sink != NULL) { Medium::close(subsession->sink); subsession->sink = NULL; if (subsession->rtcpInstance() != NULL) { subsession->rtcpInstance()->setByeHandler(NULL, NULL); // in case the server sends a RTCP "BYE" while handling "TEARDOWN" } someSubsessionsWereActive = True; } } if (someSubsessionsWereActive) { // Send a RTSP "TEARDOWN" command, to tell the server to shutdown the stream. // Don't bother handling the response to the "TEARDOWN". rtspClient->sendTeardownCommand(*scs.session, NULL); } } env << *rtspClient << "Closing the stream.\n"; Medium::close(rtspClient); // Note that this will also cause this stream's "StreamClientState" structure to get reclaimed. if (--rtspClientCount == 0) { // The final stream has ended, so exit the application now. // (Of course, if you're embedding this code into your own application, you might want to comment this out, // and replace it with "eventLoopWatchVariable = 1;", so that we leave the LIVE555 event loop, and continue running "main()".) exit(exitCode); } } // Implementation of "ourRTSPClient": ourRTSPClient* ourRTSPClient::createNew(UsageEnvironment& env, char const* rtspURL, int verbosityLevel, char const* applicationName, portNumBits tunnelOverHTTPPortNum) { return new ourRTSPClient(env, rtspURL, verbosityLevel, applicationName, tunnelOverHTTPPortNum); } ourRTSPClient::ourRTSPClient(UsageEnvironment& env, char const* rtspURL, int verbosityLevel, char const* applicationName, portNumBits tunnelOverHTTPPortNum) : RTSPClient(env,rtspURL, verbosityLevel, applicationName, tunnelOverHTTPPortNum, -1) { } ourRTSPClient::~ourRTSPClient() { } // Implementation of "StreamClientState": StreamClientState::StreamClientState() : iter(NULL), session(NULL), subsession(NULL), streamTimerTask(NULL), duration(0.0) { } StreamClientState::~StreamClientState() { delete iter; if (session != NULL) { // We also need to delete "session", and unschedule "streamTimerTask" (if set) UsageEnvironment& env = session->envir(); // alias env.taskScheduler().unscheduleDelayedTask(streamTimerTask); Medium::close(session); } } // Implementation of "DummySink": // Even though we're not going to be doing anything with the incoming data, we still need to receive it. // Define the size of the buffer that we'll use: #define DUMMY_SINK_RECEIVE_BUFFER_SIZE 100000 DummySink* DummySink::createNew(UsageEnvironment& env, MediaSubsession& subsession, char const* streamId) { return new DummySink(env, subsession, streamId); } DummySink::DummySink(UsageEnvironment& env, MediaSubsession& subsession, char const* streamId) : MediaSink(env), fSubsession(subsession) { fStreamId = strDup(streamId); fReceiveBuffer = new u_int8_t[DUMMY_SINK_RECEIVE_BUFFER_SIZE]; } DummySink::~DummySink() { delete[] fReceiveBuffer; delete[] fStreamId; } void DummySink::afterGettingFrame(void* clientData, unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime, unsigned durationInMicroseconds) { DummySink* sink = (DummySink*)clientData; sink->afterGettingFrame(frameSize, numTruncatedBytes, presentationTime, durationInMicroseconds); } // If you don't want to see debugging output for each received frame, then comment out the following line: #define DEBUG_PRINT_EACH_RECEIVED_FRAME 1 void DummySink::afterGettingFrame(unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime, unsigned /*durationInMicroseconds*/) { // We've just received a frame of data. (Optionally) print out information about it: #ifdef DEBUG_PRINT_EACH_RECEIVED_FRAME if (fStreamId != NULL) envir() << "Stream \"" << fStreamId << "\"; "; envir() << fSubsession.mediumName() << "/" << fSubsession.codecName() << ":\tReceived " << frameSize << " bytes"; if (numTruncatedBytes > 0) envir() << " (with " << numTruncatedBytes << " bytes truncated)"; char uSecsStr[6+1]; // used to output the 'microseconds' part of the presentation time sprintf(uSecsStr, "%06u", (unsigned)presentationTime.tv_usec); envir() << ".\tPresentation time: " << (int)presentationTime.tv_sec << "." << uSecsStr; if (fSubsession.rtpSource() != NULL && !fSubsession.rtpSource()->hasBeenSynchronizedUsingRTCP()) { envir() << "!"; // mark the debugging output to indicate that this presentation time is not RTCP-synchronized } #ifdef DEBUG_PRINT_NPT envir() << "\tNPT: " << fSubsession.getNormalPlayTime(presentationTime); #endif envir() << "\n"; #endif // Then continue, to request the next frame of data: continuePlaying(); } Boolean DummySink::continuePlaying() { if (fSource == NULL) return False; // sanity check (should not happen) // Request the next frame of data from our input source. "afterGettingFrame()" will get called later, when it arrives: fSource->getNextFrame(fReceiveBuffer, DUMMY_SINK_RECEIVE_BUFFER_SIZE, afterGettingFrame, this, onSourceClosure, this); return True; } live/testProgs/playCommon.cpp000444 001751 000000 00000142744 12265042432 016567 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // Copyright (c) 1996-2014, Live Networks, Inc. All rights reserved // A common framework, used for the "openRTSP" and "playSIP" applications // Implementation // // NOTE: If you want to develop your own RTSP client application (or embed RTSP client functionality into your own application), // then we don't recommend using this code as a model, because it is too complex (with many options). // Instead, we recommend using the "testRTSPClient" application code as a model. #include "playCommon.hh" #include "BasicUsageEnvironment.hh" #include "GroupsockHelper.hh" #if defined(__WIN32__) || defined(_WIN32) #define snprintf _snprintf #else #include #define USE_SIGNALS 1 #endif // Forward function definitions: void continueAfterClientCreation0(RTSPClient* client, Boolean requestStreamingOverTCP); void continueAfterClientCreation1(); void continueAfterOPTIONS(RTSPClient* client, int resultCode, char* resultString); void continueAfterDESCRIBE(RTSPClient* client, int resultCode, char* resultString); void continueAfterSETUP(RTSPClient* client, int resultCode, char* resultString); void continueAfterPLAY(RTSPClient* client, int resultCode, char* resultString); void continueAfterTEARDOWN(RTSPClient* client, int resultCode, char* resultString); void createOutputFiles(char const* periodicFilenameSuffix); void createPeriodicOutputFiles(); void setupStreams(); void closeMediaSinks(); void subsessionAfterPlaying(void* clientData); void subsessionByeHandler(void* clientData); void sessionAfterPlaying(void* clientData = NULL); void sessionTimerHandler(void* clientData); void periodicFileOutputTimerHandler(void* clientData); void shutdown(int exitCode = 1); void signalHandlerShutdown(int sig); void checkForPacketArrival(void* clientData); void checkInterPacketGaps(void* clientData); void beginQOSMeasurement(); char const* progName; UsageEnvironment* env; Medium* ourClient = NULL; Authenticator* ourAuthenticator = NULL; char const* streamURL = NULL; MediaSession* session = NULL; TaskToken sessionTimerTask = NULL; TaskToken arrivalCheckTimerTask = NULL; TaskToken interPacketGapCheckTimerTask = NULL; TaskToken qosMeasurementTimerTask = NULL; TaskToken periodicFileOutputTask = NULL; Boolean createReceivers = True; Boolean outputQuickTimeFile = False; Boolean generateMP4Format = False; QuickTimeFileSink* qtOut = NULL; Boolean outputAVIFile = False; AVIFileSink* aviOut = NULL; Boolean audioOnly = False; Boolean videoOnly = False; char const* singleMedium = NULL; int verbosityLevel = 1; // by default, print verbose output double duration = 0; double durationSlop = -1.0; // extra seconds to play at the end double initialSeekTime = 0.0f; char* initialAbsoluteSeekTime = NULL; float scale = 1.0f; double endTime; unsigned interPacketGapMaxTime = 0; unsigned totNumPacketsReceived = ~0; // used if checking inter-packet gaps Boolean playContinuously = False; int simpleRTPoffsetArg = -1; Boolean sendOptionsRequest = True; Boolean sendOptionsRequestOnly = False; Boolean oneFilePerFrame = False; Boolean notifyOnPacketArrival = False; Boolean streamUsingTCP = False; Boolean forceMulticastOnUnspecified = False; unsigned short desiredPortNum = 0; portNumBits tunnelOverHTTPPortNum = 0; char* username = NULL; char* password = NULL; char* proxyServerName = NULL; unsigned short proxyServerPortNum = 0; unsigned char desiredAudioRTPPayloadFormat = 0; char* mimeSubtype = NULL; unsigned short movieWidth = 240; // default Boolean movieWidthOptionSet = False; unsigned short movieHeight = 180; // default Boolean movieHeightOptionSet = False; unsigned movieFPS = 15; // default Boolean movieFPSOptionSet = False; char const* fileNamePrefix = ""; unsigned fileSinkBufferSize = 100000; unsigned socketInputBufferSize = 0; Boolean packetLossCompensate = False; Boolean syncStreams = False; Boolean generateHintTracks = False; Boolean waitForResponseToTEARDOWN = True; unsigned qosMeasurementIntervalMS = 0; // 0 means: Don't output QOS data char* userAgent = NULL; unsigned fileOutputInterval = 0; // seconds unsigned fileOutputSecondsSoFar = 0; // seconds Boolean createHandlerServerForREGISTERCommand = False; portNumBits handlerServerForREGISTERCommandPortNum = 0; HandlerServerForREGISTERCommand* handlerServerForREGISTERCommand; char* usernameForREGISTER = NULL; char* passwordForREGISTER = NULL; UserAuthenticationDatabase* authDBForREGISTER = NULL; struct timeval startTime; void usage() { *env << "Usage: " << progName << " [-p ] [-r|-q|-4|-i] [-a|-v] [-V] [-d ] [-D [-c] [-S ] [-n] [-O]" << (controlConnectionUsesTCP ? " [-t|-T ]" : "") << " [-u " << (allowProxyServers ? " [ []]" : "") << "]" << (supportCodecSelection ? " [-A |-M ]" : "") << " [-s ]|[-U ] [-z ] [-g user-agent]" << " [-k ]" << " [-P ]" << " [-w -h ] [-f ] [-y] [-H] [-Q []] [-F ] [-b ] [-B ] [-I ] [-m] [|-R []] (or " << progName << " -o [-V] )\n"; shutdown(); } int main(int argc, char** argv) { // Begin by setting up our usage environment: TaskScheduler* scheduler = BasicTaskScheduler::createNew(); env = BasicUsageEnvironment::createNew(*scheduler); progName = argv[0]; gettimeofday(&startTime, NULL); #ifdef USE_SIGNALS // Allow ourselves to be shut down gracefully by a SIGHUP or a SIGUSR1: signal(SIGHUP, signalHandlerShutdown); signal(SIGUSR1, signalHandlerShutdown); #endif // unfortunately we can't use getopt() here, as Windoze doesn't have it while (argc > 1) { char* const opt = argv[1]; if (opt[0] != '-') { if (argc == 2) break; // only the URL is left usage(); } switch (opt[1]) { case 'p': { // specify start port number int portArg; if (sscanf(argv[2], "%d", &portArg) != 1) { usage(); } if (portArg <= 0 || portArg >= 65536 || portArg&1) { *env << "bad port number: " << portArg << " (must be even, and in the range (0,65536))\n"; usage(); } desiredPortNum = (unsigned short)portArg; ++argv; --argc; break; } case 'r': { // do not receive data (instead, just 'play' the stream(s)) createReceivers = False; break; } case 'q': { // output a QuickTime file (to stdout) outputQuickTimeFile = True; break; } case '4': { // output a 'mp4'-format file (to stdout) outputQuickTimeFile = True; generateMP4Format = True; break; } case 'i': { // output an AVI file (to stdout) outputAVIFile = True; break; } case 'I': { // specify input interface... NetAddressList addresses(argv[2]); if (addresses.numAddresses() == 0) { *env << "Failed to find network address for \"" << argv[2] << "\""; break; } ReceivingInterfaceAddr = *(unsigned*)(addresses.firstAddress()->data()); ++argv; --argc; break; } case 'a': { // receive/record an audio stream only audioOnly = True; singleMedium = "audio"; break; } case 'v': { // receive/record a video stream only videoOnly = True; singleMedium = "video"; break; } case 'V': { // disable verbose output verbosityLevel = 0; break; } case 'd': { // specify duration, or how much to delay after end time float arg; if (sscanf(argv[2], "%g", &arg) != 1) { usage(); } if (argv[2][0] == '-') { // not "arg<0", in case argv[2] was "-0" // a 'negative' argument was specified; use this for "durationSlop": duration = 0; // use whatever's in the SDP durationSlop = -arg; } else { duration = arg; durationSlop = 0; } ++argv; --argc; break; } case 'D': { // specify maximum number of seconds to wait for packets: if (sscanf(argv[2], "%u", &interPacketGapMaxTime) != 1) { usage(); } ++argv; --argc; break; } case 'c': { // play continuously playContinuously = True; break; } case 'S': { // specify an offset to use with "SimpleRTPSource"s if (sscanf(argv[2], "%d", &simpleRTPoffsetArg) != 1) { usage(); } if (simpleRTPoffsetArg < 0) { *env << "offset argument to \"-S\" must be >= 0\n"; usage(); } ++argv; --argc; break; } case 'm': { // output multiple files - one for each frame oneFilePerFrame = True; break; } case 'n': { // notify the user when the first data packet arrives notifyOnPacketArrival = True; break; } case 'O': { // Don't send an "OPTIONS" request before "DESCRIBE" sendOptionsRequest = False; break; } case 'o': { // Send only the "OPTIONS" request to the server sendOptionsRequestOnly = True; break; } case 'P': { // specify an interval (in seconds) between writing successive output files int fileOutputIntervalInt; if (sscanf(argv[2], "%d", &fileOutputIntervalInt) != 1 || fileOutputIntervalInt <= 0) { usage(); } fileOutputInterval = (unsigned)fileOutputIntervalInt; ++argv; --argc; break; } case 't': { // stream RTP and RTCP over the TCP 'control' connection if (controlConnectionUsesTCP) { streamUsingTCP = True; } else { usage(); } break; } case 'T': { // stream RTP and RTCP over a HTTP connection if (controlConnectionUsesTCP) { if (argc > 3 && argv[2][0] != '-') { // The next argument is the HTTP server port number: if (sscanf(argv[2], "%hu", &tunnelOverHTTPPortNum) == 1 && tunnelOverHTTPPortNum > 0) { ++argv; --argc; break; } } } // If we get here, the option was specified incorrectly: usage(); break; } case 'u': { // specify a username and password if (argc < 4) usage(); // there's no argv[3] (for the "password") username = argv[2]; password = argv[3]; argv+=2; argc-=2; if (allowProxyServers && argc > 3 && argv[2][0] != '-') { // The next argument is the name of a proxy server: proxyServerName = argv[2]; ++argv; --argc; if (argc > 3 && argv[2][0] != '-') { // The next argument is the proxy server port number: if (sscanf(argv[2], "%hu", &proxyServerPortNum) != 1) { usage(); } ++argv; --argc; } } ourAuthenticator = new Authenticator(username, password); break; } case 'k': { // specify a username and password to be used to authentication an incoming "REGISTER" command (for use with -R) if (argc < 4) usage(); // there's no argv[3] (for the "password") usernameForREGISTER = argv[2]; passwordForREGISTER = argv[3]; argv+=2; argc-=2; if (authDBForREGISTER == NULL) authDBForREGISTER = new UserAuthenticationDatabase; authDBForREGISTER->addUserRecord(usernameForREGISTER, passwordForREGISTER); break; } case 'A': { // specify a desired audio RTP payload format unsigned formatArg; if (sscanf(argv[2], "%u", &formatArg) != 1 || formatArg >= 96) { usage(); } desiredAudioRTPPayloadFormat = (unsigned char)formatArg; ++argv; --argc; break; } case 'M': { // specify a MIME subtype for a dynamic RTP payload type mimeSubtype = argv[2]; if (desiredAudioRTPPayloadFormat==0) desiredAudioRTPPayloadFormat =96; ++argv; --argc; break; } case 'w': { // specify a width (pixels) for an output QuickTime or AVI movie if (sscanf(argv[2], "%hu", &movieWidth) != 1) { usage(); } movieWidthOptionSet = True; ++argv; --argc; break; } case 'h': { // specify a height (pixels) for an output QuickTime or AVI movie if (sscanf(argv[2], "%hu", &movieHeight) != 1) { usage(); } movieHeightOptionSet = True; ++argv; --argc; break; } case 'f': { // specify a frame rate (per second) for an output QT or AVI movie if (sscanf(argv[2], "%u", &movieFPS) != 1) { usage(); } movieFPSOptionSet = True; ++argv; --argc; break; } case 'F': { // specify a prefix for the audio and video output files fileNamePrefix = argv[2]; ++argv; --argc; break; } case 'g': { // specify a user agent name to use in outgoing requests userAgent = argv[2]; ++argv; --argc; break; } case 'b': { // specify the size of buffers for "FileSink"s if (sscanf(argv[2], "%u", &fileSinkBufferSize) != 1) { usage(); } ++argv; --argc; break; } case 'B': { // specify the size of input socket buffers if (sscanf(argv[2], "%u", &socketInputBufferSize) != 1) { usage(); } ++argv; --argc; break; } // Note: The following option is deprecated, and may someday be removed: case 'l': { // try to compensate for packet loss by repeating frames packetLossCompensate = True; break; } case 'y': { // synchronize audio and video streams syncStreams = True; break; } case 'H': { // generate hint tracks (as well as the regular data tracks) generateHintTracks = True; break; } case 'Q': { // output QOS measurements qosMeasurementIntervalMS = 1000; // default: 1 second if (argc > 3 && argv[2][0] != '-') { // The next argument is the measurement interval, // in multiples of 100 ms if (sscanf(argv[2], "%u", &qosMeasurementIntervalMS) != 1) { usage(); } qosMeasurementIntervalMS *= 100; ++argv; --argc; } break; } case 's': { // specify initial seek time (trick play) double arg; if (sscanf(argv[2], "%lg", &arg) != 1 || arg < 0) { usage(); } initialSeekTime = arg; ++argv; --argc; break; } case 'U': { // specify initial absolute seek time (trick play), using a string of the form "YYYYMMDDTHHMMSSZ" or "YYYYMMDDTHHMMSS.Z" initialAbsoluteSeekTime = argv[2]; ++argv; --argc; break; } case 'z': { // scale (trick play) float arg; if (sscanf(argv[2], "%g", &arg) != 1 || arg == 0.0f) { usage(); } scale = arg; ++argv; --argc; break; } case 'R': { // set up a handler server for incoming "REGISTER" commands createHandlerServerForREGISTERCommand = True; if (argc > 2 && argv[2][0] != '-') { // The next argument is the REGISTER handler server port number: if (sscanf(argv[2], "%hu", &handlerServerForREGISTERCommandPortNum) == 1 && handlerServerForREGISTERCommandPortNum > 0) { ++argv; --argc; break; } } break; } case 'C': { forceMulticastOnUnspecified = True; break; } default: { *env << "Invalid option: " << opt << "\n"; usage(); break; } } ++argv; --argc; } // There must be exactly one "rtsp://" URL at the end (unless '-R' was used, in which case there's no URL) if (!( (argc == 2 && !createHandlerServerForREGISTERCommand) || (argc == 1 && createHandlerServerForREGISTERCommand) )) usage(); if (outputQuickTimeFile && outputAVIFile) { *env << "The -i and -q (or -4) options cannot both be used!\n"; usage(); } Boolean outputCompositeFile = outputQuickTimeFile || outputAVIFile; if (!createReceivers && (outputCompositeFile || oneFilePerFrame || fileOutputInterval > 0)) { *env << "The -r option cannot be used with -q, -4, -i, -m, or -P!\n"; usage(); } if (oneFilePerFrame && fileOutputInterval > 0) { *env << "The -m and -P options cannot both be used!\n"; usage(); } if (outputCompositeFile && !movieWidthOptionSet) { *env << "Warning: The -q, -4 or -i option was used, but not -w. Assuming a video width of " << movieWidth << " pixels\n"; } if (outputCompositeFile && !movieHeightOptionSet) { *env << "Warning: The -q, -4 or -i option was used, but not -h. Assuming a video height of " << movieHeight << " pixels\n"; } if (outputCompositeFile && !movieFPSOptionSet) { *env << "Warning: The -q, -4 or -i option was used, but not -f. Assuming a video frame rate of " << movieFPS << " frames-per-second\n"; } if (audioOnly && videoOnly) { *env << "The -a and -v options cannot both be used!\n"; usage(); } if (sendOptionsRequestOnly && !sendOptionsRequest) { *env << "The -o and -O options cannot both be used!\n"; usage(); } if (initialAbsoluteSeekTime != NULL && initialSeekTime != 0.0f) { *env << "The -s and -U options cannot both be used!\n"; usage(); } if (authDBForREGISTER != NULL && !createHandlerServerForREGISTERCommand) { *env << "If \"-k \" is used, then -R (or \"-R \") must also be used!\n"; usage(); } if (tunnelOverHTTPPortNum > 0) { if (streamUsingTCP) { *env << "The -t and -T options cannot both be used!\n"; usage(); } else { streamUsingTCP = True; } } if (!createReceivers && notifyOnPacketArrival) { *env << "Warning: Because we're not receiving stream data, the -n flag has no effect\n"; } if (durationSlop < 0) { // This parameter wasn't set, so use a default value. // If we're measuring QOS stats, then don't add any slop, to avoid // having 'empty' measurement intervals at the end. durationSlop = qosMeasurementIntervalMS > 0 ? 0.0 : 5.0; } streamURL = argv[1]; // Create (or arrange to create) our client object: if (createHandlerServerForREGISTERCommand) { handlerServerForREGISTERCommand = HandlerServerForREGISTERCommand::createNew(*env, continueAfterClientCreation0, handlerServerForREGISTERCommandPortNum, authDBForREGISTER, verbosityLevel, progName); if (handlerServerForREGISTERCommand == NULL) { *env << "Failed to create a server for handling incoming \"REGISTER\" commands: " << env->getResultMsg() << "\n"; } else { *env << "Awaiting an incoming \"REGISTER\" command on port " << handlerServerForREGISTERCommand->serverPortNum() << "\n"; } } else { ourClient = createClient(*env, streamURL, verbosityLevel, progName); if (ourClient == NULL) { *env << "Failed to create " << clientProtocolName << " client: " << env->getResultMsg() << "\n"; shutdown(); } continueAfterClientCreation1(); } // All subsequent activity takes place within the event loop: env->taskScheduler().doEventLoop(); // does not return return 0; // only to prevent compiler warning } void continueAfterClientCreation0(RTSPClient* newRTSPClient, Boolean requestStreamingOverTCP) { if (newRTSPClient == NULL) return; streamUsingTCP = requestStreamingOverTCP; assignClient(ourClient = newRTSPClient); streamURL = newRTSPClient->url(); // Having handled one "REGISTER" command (giving us a "rtsp://" URL to stream from), we don't handle any more: Medium::close(handlerServerForREGISTERCommand); handlerServerForREGISTERCommand = NULL; continueAfterClientCreation1(); } void continueAfterClientCreation1() { setUserAgentString(userAgent); if (sendOptionsRequest) { // Begin by sending an "OPTIONS" command: getOptions(continueAfterOPTIONS); } else { continueAfterOPTIONS(NULL, 0, NULL); } } void continueAfterOPTIONS(RTSPClient*, int resultCode, char* resultString) { if (sendOptionsRequestOnly) { if (resultCode != 0) { *env << clientProtocolName << " \"OPTIONS\" request failed: " << resultString << "\n"; } else { *env << clientProtocolName << " \"OPTIONS\" request returned: " << resultString << "\n"; } shutdown(); } delete[] resultString; // Next, get a SDP description for the stream: getSDPDescription(continueAfterDESCRIBE); } void continueAfterDESCRIBE(RTSPClient*, int resultCode, char* resultString) { if (resultCode != 0) { *env << "Failed to get a SDP description for the URL \"" << streamURL << "\": " << resultString << "\n"; delete[] resultString; shutdown(); } char* sdpDescription = resultString; *env << "Opened URL \"" << streamURL << "\", returning a SDP description:\n" << sdpDescription << "\n"; // Create a media session object from this SDP description: session = MediaSession::createNew(*env, sdpDescription); delete[] sdpDescription; if (session == NULL) { *env << "Failed to create a MediaSession object from the SDP description: " << env->getResultMsg() << "\n"; shutdown(); } else if (!session->hasSubsessions()) { *env << "This session has no media subsessions (i.e., no \"m=\" lines)\n"; shutdown(); } // Then, setup the "RTPSource"s for the session: MediaSubsessionIterator iter(*session); MediaSubsession *subsession; Boolean madeProgress = False; char const* singleMediumToTest = singleMedium; while ((subsession = iter.next()) != NULL) { // If we've asked to receive only a single medium, then check this now: if (singleMediumToTest != NULL) { if (strcmp(subsession->mediumName(), singleMediumToTest) != 0) { *env << "Ignoring \"" << subsession->mediumName() << "/" << subsession->codecName() << "\" subsession, because we've asked to receive a single " << singleMedium << " session only\n"; continue; } else { // Receive this subsession only singleMediumToTest = "xxxxx"; // this hack ensures that we get only 1 subsession of this type } } if (desiredPortNum != 0) { subsession->setClientPortNum(desiredPortNum); desiredPortNum += 2; } if (createReceivers) { if (!subsession->initiate(simpleRTPoffsetArg)) { *env << "Unable to create receiver for \"" << subsession->mediumName() << "/" << subsession->codecName() << "\" subsession: " << env->getResultMsg() << "\n"; } else { *env << "Created receiver for \"" << subsession->mediumName() << "/" << subsession->codecName() << "\" subsession (client ports " << subsession->clientPortNum() << "-" << subsession->clientPortNum()+1 << ")\n"; madeProgress = True; if (subsession->rtpSource() != NULL) { // Because we're saving the incoming data, rather than playing // it in real time, allow an especially large time threshold // (1 second) for reordering misordered incoming packets: unsigned const thresh = 1000000; // 1 second subsession->rtpSource()->setPacketReorderingThresholdTime(thresh); // Set the RTP source's OS socket buffer size as appropriate - either if we were explicitly asked (using -B), // or if the desired FileSink buffer size happens to be larger than the current OS socket buffer size. // (The latter case is a heuristic, on the assumption that if the user asked for a large FileSink buffer size, // then the input data rate may be large enough to justify increasing the OS socket buffer size also.) int socketNum = subsession->rtpSource()->RTPgs()->socketNum(); unsigned curBufferSize = getReceiveBufferSize(*env, socketNum); if (socketInputBufferSize > 0 || fileSinkBufferSize > curBufferSize) { unsigned newBufferSize = socketInputBufferSize > 0 ? socketInputBufferSize : fileSinkBufferSize; newBufferSize = setReceiveBufferTo(*env, socketNum, newBufferSize); if (socketInputBufferSize > 0) { // The user explicitly asked for the new socket buffer size; announce it: *env << "Changed socket receive buffer size for the \"" << subsession->mediumName() << "/" << subsession->codecName() << "\" subsession from " << curBufferSize << " to " << newBufferSize << " bytes\n"; } } } } } else { if (subsession->clientPortNum() == 0) { *env << "No client port was specified for the \"" << subsession->mediumName() << "/" << subsession->codecName() << "\" subsession. (Try adding the \"-p \" option.)\n"; } else { madeProgress = True; } } } if (!madeProgress) shutdown(); // Perform additional 'setup' on each subsession, before playing them: setupStreams(); } MediaSubsession *subsession; Boolean madeProgress = False; void continueAfterSETUP(RTSPClient*, int resultCode, char* resultString) { if (resultCode == 0) { *env << "Setup \"" << subsession->mediumName() << "/" << subsession->codecName() << "\" subsession (client ports " << subsession->clientPortNum() << "-" << subsession->clientPortNum()+1 << ")\n"; madeProgress = True; } else { *env << "Failed to setup \"" << subsession->mediumName() << "/" << subsession->codecName() << "\" subsession: " << resultString << "\n"; } delete[] resultString; // Set up the next subsession, if any: setupStreams(); } void createOutputFiles(char const* periodicFilenameSuffix) { char outFileName[1000]; if (outputQuickTimeFile || outputAVIFile) { if (periodicFilenameSuffix[0] == '\0') { // Normally (unless the '-P ' option was given) we output to 'stdout': sprintf(outFileName, "stdout"); } else { // Otherwise output to a type-specific file name, containing "periodicFilenameSuffix": char const* prefix = fileNamePrefix[0] == '\0' ? "output" : fileNamePrefix; snprintf(outFileName, sizeof outFileName, "%s%s.%s", prefix, periodicFilenameSuffix, outputAVIFile ? "avi" : generateMP4Format ? "mp4" : "mov"); } if (outputQuickTimeFile) { qtOut = QuickTimeFileSink::createNew(*env, *session, outFileName, fileSinkBufferSize, movieWidth, movieHeight, movieFPS, packetLossCompensate, syncStreams, generateHintTracks, generateMP4Format); if (qtOut == NULL) { *env << "Failed to create a \"QuickTimeFileSink\" for outputting to \"" << outFileName << "\": " << env->getResultMsg() << "\n"; shutdown(); } else { *env << "Outputting to the file: \"" << outFileName << "\"\n"; } qtOut->startPlaying(sessionAfterPlaying, NULL); } else { // outputAVIFile aviOut = AVIFileSink::createNew(*env, *session, outFileName, fileSinkBufferSize, movieWidth, movieHeight, movieFPS, packetLossCompensate); if (aviOut == NULL) { *env << "Failed to create an \"AVIFileSink\" for outputting to \"" << outFileName << "\": " << env->getResultMsg() << "\n"; shutdown(); } else { *env << "Outputting to the file: \"" << outFileName << "\"\n"; } aviOut->startPlaying(sessionAfterPlaying, NULL); } } else { // Create and start "FileSink"s for each subsession: madeProgress = False; MediaSubsessionIterator iter(*session); while ((subsession = iter.next()) != NULL) { if (subsession->readSource() == NULL) continue; // was not initiated // Create an output file for each desired stream: if (singleMedium == NULL || periodicFilenameSuffix[0] != '\0') { // Output file name is // "--" static unsigned streamCounter = 0; snprintf(outFileName, sizeof outFileName, "%s%s-%s-%d%s", fileNamePrefix, subsession->mediumName(), subsession->codecName(), ++streamCounter, periodicFilenameSuffix); } else { // When outputting a single medium only, we output to 'stdout // (unless the '-P ' option was given): sprintf(outFileName, "stdout"); } FileSink* fileSink; if (strcmp(subsession->mediumName(), "audio") == 0 && (strcmp(subsession->codecName(), "AMR") == 0 || strcmp(subsession->codecName(), "AMR-WB") == 0)) { // For AMR audio streams, we use a special sink that inserts AMR frame hdrs: fileSink = AMRAudioFileSink::createNew(*env, outFileName, fileSinkBufferSize, oneFilePerFrame); } else if (strcmp(subsession->mediumName(), "video") == 0 && (strcmp(subsession->codecName(), "H264") == 0)) { // For H.264 video stream, we use a special sink that adds 'start codes', and (at the start) the SPS and PPS NAL units: fileSink = H264VideoFileSink::createNew(*env, outFileName, subsession->fmtp_spropparametersets(), fileSinkBufferSize, oneFilePerFrame); } else { // Normal case: fileSink = FileSink::createNew(*env, outFileName, fileSinkBufferSize, oneFilePerFrame); } subsession->sink = fileSink; if (subsession->sink == NULL) { *env << "Failed to create FileSink for \"" << outFileName << "\": " << env->getResultMsg() << "\n"; } else { if (singleMedium == NULL) { *env << "Created output file: \"" << outFileName << "\"\n"; } else { *env << "Outputting data from the \"" << subsession->mediumName() << "/" << subsession->codecName() << "\" subsession to \"" << outFileName << "\"\n"; } if (strcmp(subsession->mediumName(), "video") == 0 && strcmp(subsession->codecName(), "MP4V-ES") == 0 && subsession->fmtp_config() != NULL) { // For MPEG-4 video RTP streams, the 'config' information // from the SDP description contains useful VOL etc. headers. // Insert this data at the front of the output file: unsigned configLen; unsigned char* configData = parseGeneralConfigStr(subsession->fmtp_config(), configLen); struct timeval timeNow; gettimeofday(&timeNow, NULL); fileSink->addData(configData, configLen, timeNow); delete[] configData; } subsession->sink->startPlaying(*(subsession->readSource()), subsessionAfterPlaying, subsession); // Also set a handler to be called if a RTCP "BYE" arrives // for this subsession: if (subsession->rtcpInstance() != NULL) { subsession->rtcpInstance()->setByeHandler(subsessionByeHandler, subsession); } madeProgress = True; } } if (!madeProgress) shutdown(); } } void createPeriodicOutputFiles() { // Create a filename suffix that notes the time interval that's being recorded: char periodicFileNameSuffix[100]; snprintf(periodicFileNameSuffix, sizeof periodicFileNameSuffix, "-%05d-%05d", fileOutputSecondsSoFar, fileOutputSecondsSoFar + fileOutputInterval); createOutputFiles(periodicFileNameSuffix); // Schedule an event for writing the next output file: periodicFileOutputTask = env->taskScheduler().scheduleDelayedTask(fileOutputInterval*1000000, (TaskFunc*)periodicFileOutputTimerHandler, (void*)NULL); } void setupStreams() { static MediaSubsessionIterator* setupIter = NULL; if (setupIter == NULL) setupIter = new MediaSubsessionIterator(*session); while ((subsession = setupIter->next()) != NULL) { // We have another subsession left to set up: if (subsession->clientPortNum() == 0) continue; // port # was not set setupSubsession(subsession, streamUsingTCP, forceMulticastOnUnspecified, continueAfterSETUP); return; } // We're done setting up subsessions. delete setupIter; if (!madeProgress) shutdown(); // Create output files: if (createReceivers) { if (fileOutputInterval > 0) { createPeriodicOutputFiles(); } else { createOutputFiles(""); } } // Finally, start playing each subsession, to start the data flow: if (duration == 0) { if (scale > 0) duration = session->playEndTime() - initialSeekTime; // use SDP end time else if (scale < 0) duration = initialSeekTime; } if (duration < 0) duration = 0.0; endTime = initialSeekTime; if (scale > 0) { if (duration <= 0) endTime = -1.0f; else endTime = initialSeekTime + duration; } else { endTime = initialSeekTime - duration; if (endTime < 0) endTime = 0.0f; } char const* absStartTime = initialAbsoluteSeekTime != NULL ? initialAbsoluteSeekTime : session->absStartTime(); if (absStartTime != NULL) { // Either we or the server have specified that seeking should be done by 'absolute' time: startPlayingSession(session, absStartTime, session->absEndTime(), scale, continueAfterPLAY); } else { // Normal case: Seek by relative time (NPT): startPlayingSession(session, initialSeekTime, endTime, scale, continueAfterPLAY); } } void continueAfterPLAY(RTSPClient*, int resultCode, char* resultString) { if (resultCode != 0) { *env << "Failed to start playing session: " << resultString << "\n"; delete[] resultString; shutdown(); } else { *env << "Started playing session\n"; } delete[] resultString; if (qosMeasurementIntervalMS > 0) { // Begin periodic QOS measurements: beginQOSMeasurement(); } // Figure out how long to delay (if at all) before shutting down, or // repeating the playing Boolean timerIsBeingUsed = False; double secondsToDelay = duration; if (duration > 0) { // First, adjust "duration" based on any change to the play range (that was specified in the "PLAY" response): double rangeAdjustment = (session->playEndTime() - session->playStartTime()) - (endTime - initialSeekTime); if (duration + rangeAdjustment > 0.0) duration += rangeAdjustment; timerIsBeingUsed = True; double absScale = scale > 0 ? scale : -scale; // ASSERT: scale != 0 secondsToDelay = duration/absScale + durationSlop; int64_t uSecsToDelay = (int64_t)(secondsToDelay*1000000.0); sessionTimerTask = env->taskScheduler().scheduleDelayedTask(uSecsToDelay, (TaskFunc*)sessionTimerHandler, (void*)NULL); } char const* actionString = createReceivers? "Receiving streamed data":"Data is being streamed"; if (timerIsBeingUsed) { *env << actionString << " (for up to " << secondsToDelay << " seconds)...\n"; } else { #ifdef USE_SIGNALS pid_t ourPid = getpid(); *env << actionString << " (signal with \"kill -HUP " << (int)ourPid << "\" or \"kill -USR1 " << (int)ourPid << "\" to terminate)...\n"; #else *env << actionString << "...\n"; #endif } // Watch for incoming packets (if desired): checkForPacketArrival(NULL); checkInterPacketGaps(NULL); } void closeMediaSinks() { Medium::close(qtOut); qtOut = NULL; Medium::close(aviOut); aviOut = NULL; if (session == NULL) return; MediaSubsessionIterator iter(*session); MediaSubsession* subsession; while ((subsession = iter.next()) != NULL) { Medium::close(subsession->sink); subsession->sink = NULL; } } void subsessionAfterPlaying(void* clientData) { // Begin by closing this media subsession's stream: MediaSubsession* subsession = (MediaSubsession*)clientData; Medium::close(subsession->sink); subsession->sink = NULL; // Next, check whether *all* subsessions' streams have now been closed: MediaSession& session = subsession->parentSession(); MediaSubsessionIterator iter(session); while ((subsession = iter.next()) != NULL) { if (subsession->sink != NULL) return; // this subsession is still active } // All subsessions' streams have now been closed sessionAfterPlaying(); } void subsessionByeHandler(void* clientData) { struct timeval timeNow; gettimeofday(&timeNow, NULL); unsigned secsDiff = timeNow.tv_sec - startTime.tv_sec; MediaSubsession* subsession = (MediaSubsession*)clientData; *env << "Received RTCP \"BYE\" on \"" << subsession->mediumName() << "/" << subsession->codecName() << "\" subsession (after " << secsDiff << " seconds)\n"; // Act now as if the subsession had closed: subsessionAfterPlaying(subsession); } void sessionAfterPlaying(void* /*clientData*/) { if (!playContinuously) { shutdown(0); } else { // We've been asked to play the stream(s) over again. // First, reset state from the current session: if (env != NULL) { env->taskScheduler().unscheduleDelayedTask(periodicFileOutputTask); env->taskScheduler().unscheduleDelayedTask(sessionTimerTask); env->taskScheduler().unscheduleDelayedTask(arrivalCheckTimerTask); env->taskScheduler().unscheduleDelayedTask(interPacketGapCheckTimerTask); env->taskScheduler().unscheduleDelayedTask(qosMeasurementTimerTask); } totNumPacketsReceived = ~0; startPlayingSession(session, initialSeekTime, endTime, scale, continueAfterPLAY); } } void sessionTimerHandler(void* /*clientData*/) { sessionTimerTask = NULL; sessionAfterPlaying(); } void periodicFileOutputTimerHandler(void* /*clientData*/) { fileOutputSecondsSoFar += fileOutputInterval; // First, close the existing output files: closeMediaSinks(); // Then, create new output files: createPeriodicOutputFiles(); } class qosMeasurementRecord { public: qosMeasurementRecord(struct timeval const& startTime, RTPSource* src) : fSource(src), fNext(NULL), kbits_per_second_min(1e20), kbits_per_second_max(0), kBytesTotal(0.0), packet_loss_fraction_min(1.0), packet_loss_fraction_max(0.0), totNumPacketsReceived(0), totNumPacketsExpected(0) { measurementEndTime = measurementStartTime = startTime; RTPReceptionStatsDB::Iterator statsIter(src->receptionStatsDB()); // Assume that there's only one SSRC source (usually the case): RTPReceptionStats* stats = statsIter.next(True); if (stats != NULL) { kBytesTotal = stats->totNumKBytesReceived(); totNumPacketsReceived = stats->totNumPacketsReceived(); totNumPacketsExpected = stats->totNumPacketsExpected(); } } virtual ~qosMeasurementRecord() { delete fNext; } void periodicQOSMeasurement(struct timeval const& timeNow); public: RTPSource* fSource; qosMeasurementRecord* fNext; public: struct timeval measurementStartTime, measurementEndTime; double kbits_per_second_min, kbits_per_second_max; double kBytesTotal; double packet_loss_fraction_min, packet_loss_fraction_max; unsigned totNumPacketsReceived, totNumPacketsExpected; }; static qosMeasurementRecord* qosRecordHead = NULL; static void periodicQOSMeasurement(void* clientData); // forward static unsigned nextQOSMeasurementUSecs; static void scheduleNextQOSMeasurement() { nextQOSMeasurementUSecs += qosMeasurementIntervalMS*1000; struct timeval timeNow; gettimeofday(&timeNow, NULL); unsigned timeNowUSecs = timeNow.tv_sec*1000000 + timeNow.tv_usec; int usecsToDelay = nextQOSMeasurementUSecs - timeNowUSecs; qosMeasurementTimerTask = env->taskScheduler().scheduleDelayedTask( usecsToDelay, (TaskFunc*)periodicQOSMeasurement, (void*)NULL); } static void periodicQOSMeasurement(void* /*clientData*/) { struct timeval timeNow; gettimeofday(&timeNow, NULL); for (qosMeasurementRecord* qosRecord = qosRecordHead; qosRecord != NULL; qosRecord = qosRecord->fNext) { qosRecord->periodicQOSMeasurement(timeNow); } // Do this again later: scheduleNextQOSMeasurement(); } void qosMeasurementRecord ::periodicQOSMeasurement(struct timeval const& timeNow) { unsigned secsDiff = timeNow.tv_sec - measurementEndTime.tv_sec; int usecsDiff = timeNow.tv_usec - measurementEndTime.tv_usec; double timeDiff = secsDiff + usecsDiff/1000000.0; measurementEndTime = timeNow; RTPReceptionStatsDB::Iterator statsIter(fSource->receptionStatsDB()); // Assume that there's only one SSRC source (usually the case): RTPReceptionStats* stats = statsIter.next(True); if (stats != NULL) { double kBytesTotalNow = stats->totNumKBytesReceived(); double kBytesDeltaNow = kBytesTotalNow - kBytesTotal; kBytesTotal = kBytesTotalNow; double kbpsNow = timeDiff == 0.0 ? 0.0 : 8*kBytesDeltaNow/timeDiff; if (kbpsNow < 0.0) kbpsNow = 0.0; // in case of roundoff error if (kbpsNow < kbits_per_second_min) kbits_per_second_min = kbpsNow; if (kbpsNow > kbits_per_second_max) kbits_per_second_max = kbpsNow; unsigned totReceivedNow = stats->totNumPacketsReceived(); unsigned totExpectedNow = stats->totNumPacketsExpected(); unsigned deltaReceivedNow = totReceivedNow - totNumPacketsReceived; unsigned deltaExpectedNow = totExpectedNow - totNumPacketsExpected; totNumPacketsReceived = totReceivedNow; totNumPacketsExpected = totExpectedNow; double lossFractionNow = deltaExpectedNow == 0 ? 0.0 : 1.0 - deltaReceivedNow/(double)deltaExpectedNow; //if (lossFractionNow < 0.0) lossFractionNow = 0.0; //reordering can cause if (lossFractionNow < packet_loss_fraction_min) { packet_loss_fraction_min = lossFractionNow; } if (lossFractionNow > packet_loss_fraction_max) { packet_loss_fraction_max = lossFractionNow; } } } void beginQOSMeasurement() { // Set up a measurement record for each active subsession: struct timeval startTime; gettimeofday(&startTime, NULL); nextQOSMeasurementUSecs = startTime.tv_sec*1000000 + startTime.tv_usec; qosMeasurementRecord* qosRecordTail = NULL; MediaSubsessionIterator iter(*session); MediaSubsession* subsession; while ((subsession = iter.next()) != NULL) { RTPSource* src = subsession->rtpSource(); if (src == NULL) continue; qosMeasurementRecord* qosRecord = new qosMeasurementRecord(startTime, src); if (qosRecordHead == NULL) qosRecordHead = qosRecord; if (qosRecordTail != NULL) qosRecordTail->fNext = qosRecord; qosRecordTail = qosRecord; } // Then schedule the first of the periodic measurements: scheduleNextQOSMeasurement(); } void printQOSData(int exitCode) { *env << "begin_QOS_statistics\n"; // Print out stats for each active subsession: qosMeasurementRecord* curQOSRecord = qosRecordHead; if (session != NULL) { MediaSubsessionIterator iter(*session); MediaSubsession* subsession; while ((subsession = iter.next()) != NULL) { RTPSource* src = subsession->rtpSource(); if (src == NULL) continue; *env << "subsession\t" << subsession->mediumName() << "/" << subsession->codecName() << "\n"; unsigned numPacketsReceived = 0, numPacketsExpected = 0; if (curQOSRecord != NULL) { numPacketsReceived = curQOSRecord->totNumPacketsReceived; numPacketsExpected = curQOSRecord->totNumPacketsExpected; } *env << "num_packets_received\t" << numPacketsReceived << "\n"; *env << "num_packets_lost\t" << int(numPacketsExpected - numPacketsReceived) << "\n"; if (curQOSRecord != NULL) { unsigned secsDiff = curQOSRecord->measurementEndTime.tv_sec - curQOSRecord->measurementStartTime.tv_sec; int usecsDiff = curQOSRecord->measurementEndTime.tv_usec - curQOSRecord->measurementStartTime.tv_usec; double measurementTime = secsDiff + usecsDiff/1000000.0; *env << "elapsed_measurement_time\t" << measurementTime << "\n"; *env << "kBytes_received_total\t" << curQOSRecord->kBytesTotal << "\n"; *env << "measurement_sampling_interval_ms\t" << qosMeasurementIntervalMS << "\n"; if (curQOSRecord->kbits_per_second_max == 0) { // special case: we didn't receive any data: *env << "kbits_per_second_min\tunavailable\n" "kbits_per_second_ave\tunavailable\n" "kbits_per_second_max\tunavailable\n"; } else { *env << "kbits_per_second_min\t" << curQOSRecord->kbits_per_second_min << "\n"; *env << "kbits_per_second_ave\t" << (measurementTime == 0.0 ? 0.0 : 8*curQOSRecord->kBytesTotal/measurementTime) << "\n"; *env << "kbits_per_second_max\t" << curQOSRecord->kbits_per_second_max << "\n"; } *env << "packet_loss_percentage_min\t" << 100*curQOSRecord->packet_loss_fraction_min << "\n"; double packetLossFraction = numPacketsExpected == 0 ? 1.0 : 1.0 - numPacketsReceived/(double)numPacketsExpected; if (packetLossFraction < 0.0) packetLossFraction = 0.0; *env << "packet_loss_percentage_ave\t" << 100*packetLossFraction << "\n"; *env << "packet_loss_percentage_max\t" << (packetLossFraction == 1.0 ? 100.0 : 100*curQOSRecord->packet_loss_fraction_max) << "\n"; RTPReceptionStatsDB::Iterator statsIter(src->receptionStatsDB()); // Assume that there's only one SSRC source (usually the case): RTPReceptionStats* stats = statsIter.next(True); if (stats != NULL) { *env << "inter_packet_gap_ms_min\t" << stats->minInterPacketGapUS()/1000.0 << "\n"; struct timeval totalGaps = stats->totalInterPacketGaps(); double totalGapsMS = totalGaps.tv_sec*1000.0 + totalGaps.tv_usec/1000.0; unsigned totNumPacketsReceived = stats->totNumPacketsReceived(); *env << "inter_packet_gap_ms_ave\t" << (totNumPacketsReceived == 0 ? 0.0 : totalGapsMS/totNumPacketsReceived) << "\n"; *env << "inter_packet_gap_ms_max\t" << stats->maxInterPacketGapUS()/1000.0 << "\n"; } curQOSRecord = curQOSRecord->fNext; } } } *env << "end_QOS_statistics\n"; delete qosRecordHead; } Boolean areAlreadyShuttingDown = False; int shutdownExitCode; void shutdown(int exitCode) { if (areAlreadyShuttingDown) return; // in case we're called after receiving a RTCP "BYE" while in the middle of a "TEARDOWN". areAlreadyShuttingDown = True; shutdownExitCode = exitCode; if (env != NULL) { env->taskScheduler().unscheduleDelayedTask(periodicFileOutputTask); env->taskScheduler().unscheduleDelayedTask(sessionTimerTask); env->taskScheduler().unscheduleDelayedTask(arrivalCheckTimerTask); env->taskScheduler().unscheduleDelayedTask(interPacketGapCheckTimerTask); env->taskScheduler().unscheduleDelayedTask(qosMeasurementTimerTask); } if (qosMeasurementIntervalMS > 0) { printQOSData(exitCode); } // Teardown, then shutdown, any outstanding RTP/RTCP subsessions Boolean shutdownImmediately = True; // by default if (session != NULL) { RTSPClient::responseHandler* responseHandlerForTEARDOWN = NULL; // unless: if (waitForResponseToTEARDOWN) { shutdownImmediately = False; responseHandlerForTEARDOWN = continueAfterTEARDOWN; } tearDownSession(session, responseHandlerForTEARDOWN); } if (shutdownImmediately) continueAfterTEARDOWN(NULL, 0, NULL); } void continueAfterTEARDOWN(RTSPClient*, int /*resultCode*/, char* resultString) { delete[] resultString; // Now that we've stopped any more incoming data from arriving, close our output files: closeMediaSinks(); Medium::close(session); // Finally, shut down our client: delete ourAuthenticator; delete authDBForREGISTER; Medium::close(ourClient); // Adios... exit(shutdownExitCode); } void signalHandlerShutdown(int /*sig*/) { *env << "Got shutdown signal\n"; waitForResponseToTEARDOWN = False; // to ensure that we end, even if the server does not respond to our TEARDOWN shutdown(0); } void checkForPacketArrival(void* /*clientData*/) { if (!notifyOnPacketArrival) return; // we're not checking // Check each subsession, to see whether it has received data packets: unsigned numSubsessionsChecked = 0; unsigned numSubsessionsWithReceivedData = 0; unsigned numSubsessionsThatHaveBeenSynced = 0; MediaSubsessionIterator iter(*session); MediaSubsession* subsession; while ((subsession = iter.next()) != NULL) { RTPSource* src = subsession->rtpSource(); if (src == NULL) continue; ++numSubsessionsChecked; if (src->receptionStatsDB().numActiveSourcesSinceLastReset() > 0) { // At least one data packet has arrived ++numSubsessionsWithReceivedData; } if (src->hasBeenSynchronizedUsingRTCP()) { ++numSubsessionsThatHaveBeenSynced; } } unsigned numSubsessionsToCheck = numSubsessionsChecked; // Special case for "QuickTimeFileSink"s and "AVIFileSink"s: // They might not use all of the input sources: if (qtOut != NULL) { numSubsessionsToCheck = qtOut->numActiveSubsessions(); } else if (aviOut != NULL) { numSubsessionsToCheck = aviOut->numActiveSubsessions(); } Boolean notifyTheUser; if (!syncStreams) { notifyTheUser = numSubsessionsWithReceivedData > 0; // easy case } else { notifyTheUser = numSubsessionsWithReceivedData >= numSubsessionsToCheck && numSubsessionsThatHaveBeenSynced == numSubsessionsChecked; // Note: A subsession with no active sources is considered to be synced } if (notifyTheUser) { struct timeval timeNow; gettimeofday(&timeNow, NULL); char timestampStr[100]; sprintf(timestampStr, "%ld%03ld", timeNow.tv_sec, (long)(timeNow.tv_usec/1000)); *env << (syncStreams ? "Synchronized d" : "D") << "ata packets have begun arriving [" << timestampStr << "]\007\n"; return; } // No luck, so reschedule this check again, after a delay: int uSecsToDelay = 100000; // 100 ms arrivalCheckTimerTask = env->taskScheduler().scheduleDelayedTask(uSecsToDelay, (TaskFunc*)checkForPacketArrival, NULL); } void checkInterPacketGaps(void* /*clientData*/) { if (interPacketGapMaxTime == 0) return; // we're not checking // Check each subsession, counting up how many packets have been received: unsigned newTotNumPacketsReceived = 0; MediaSubsessionIterator iter(*session); MediaSubsession* subsession; while ((subsession = iter.next()) != NULL) { RTPSource* src = subsession->rtpSource(); if (src == NULL) continue; newTotNumPacketsReceived += src->receptionStatsDB().totNumPacketsReceived(); } if (newTotNumPacketsReceived == totNumPacketsReceived) { // No additional packets have been received since the last time we // checked, so end this stream: *env << "Closing session, because we stopped receiving packets.\n"; interPacketGapCheckTimerTask = NULL; sessionAfterPlaying(); } else { totNumPacketsReceived = newTotNumPacketsReceived; // Check again, after the specified delay: interPacketGapCheckTimerTask = env->taskScheduler().scheduleDelayedTask(interPacketGapMaxTime*1000000, (TaskFunc*)checkInterPacketGaps, NULL); } } live/testProgs/playCommon.hh000444 001751 000000 00000004331 12265042432 016371 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // Copyright (c) 1996-2014, Live Networks, Inc. All rights reserved // A common framework, used for the "openRTSP" and "playSIP" applications // Interfaces #include "liveMedia.hh" extern Medium* createClient(UsageEnvironment& env, char const* URL, int verbosityLevel, char const* applicationName); extern void assignClient(Medium* client); extern RTSPClient* ourRTSPClient; extern SIPClient* ourSIPClient; extern void getOptions(RTSPClient::responseHandler* afterFunc); extern void getSDPDescription(RTSPClient::responseHandler* afterFunc); extern void setupSubsession(MediaSubsession* subsession, Boolean streamUsingTCP, Boolean forceMulticastOnUnspecified, RTSPClient::responseHandler* afterFunc); extern void startPlayingSession(MediaSession* session, double start, double end, float scale, RTSPClient::responseHandler* afterFunc); extern void startPlayingSession(MediaSession* session, char const* absStartTime, char const* absEndTime, float scale, RTSPClient::responseHandler* afterFunc); // For playing by 'absolute' time (using strings of the form "YYYYMMDDTHHMMSSZ" or "YYYYMMDDTHHMMSS.Z" extern void tearDownSession(MediaSession* session, RTSPClient::responseHandler* afterFunc); extern void setUserAgentString(char const* userAgentString); extern Authenticator* ourAuthenticator; extern Boolean allowProxyServers; extern Boolean controlConnectionUsesTCP; extern Boolean supportCodecSelection; extern char const* clientProtocolName; extern unsigned statusCode; live/testProgs/testMP3-using-ADUs.sdp000444 001751 000000 00000000355 12265042432 017660 0ustar00rsfwheel000000 000000 v=0 o=- 49452 4 IN IP4 127.0.0.1 s=Test MP3 session i=Parameters for the session streamed by "testMP3Streamer" t=0 0 a=tool:testMP3Streamer a=type:broadcast m=audio 6666 RTP/AVP 96 c=IN IP4 239.255.42.42/127 a=rtpmap:96 mpa-robust/90000 live/testProgs/testWAVAudioStreamer.cpp000400 001751 000000 00000024145 12265042432 020455 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // Copyright (c) 1996-2014, Live Networks, Inc. All rights reserved // A test program that streams a WAV audio file via RTP/RTCP // main program #include "liveMedia.hh" #include "GroupsockHelper.hh" #include "BasicUsageEnvironment.hh" // To convert 16-bit samples to 8-bit u-law ("u" is the Greek letter "mu") // encoding, before streaming, uncomment the following line: //#define CONVERT_TO_ULAW 1 UsageEnvironment* env; void play(); // forward int main(int argc, char** argv) { // Begin by setting up our usage environment: TaskScheduler* scheduler = BasicTaskScheduler::createNew(); env = BasicUsageEnvironment::createNew(*scheduler); play(); env->taskScheduler().doEventLoop(); // does not return return 0; // only to prevent compiler warnings } char const* inputFileName = "test.wav"; void afterPlaying(void* clientData); // forward // A structure to hold the state of the current session. // It is used in the "afterPlaying()" function to clean up the session. struct sessionState_t { FramedSource* source; RTPSink* sink; RTCPInstance* rtcpInstance; Groupsock* rtpGroupsock; Groupsock* rtcpGroupsock; RTSPServer* rtspServer; } sessionState; void play() { // Open the file as a 'WAV' file: WAVAudioFileSource* wavSource = WAVAudioFileSource::createNew(*env, inputFileName); if (wavSource == NULL) { *env << "Unable to open file \"" << inputFileName << "\" as a WAV audio file source: " << env->getResultMsg() << "\n"; exit(1); } // Get attributes of the audio source: unsigned char audioFormat = wavSource->getAudioFormat(); unsigned char const bitsPerSample = wavSource->bitsPerSample(); // We handle only 4,8,16,20,24 bits-per-sample audio: if (bitsPerSample%4 != 0 || bitsPerSample < 4 || bitsPerSample > 24 || bitsPerSample == 12) { *env << "The input file contains " << bitsPerSample << " bit-per-sample audio, which we don't handle\n"; exit(1); } unsigned const samplingFrequency = wavSource->samplingFrequency(); unsigned char const numChannels = wavSource->numChannels(); unsigned bitsPerSecond = samplingFrequency*bitsPerSample*numChannels; *env << "Audio source parameters:\n\t" << samplingFrequency << " Hz, "; *env << bitsPerSample << " bits-per-sample, "; *env << numChannels << " channels => "; *env << bitsPerSecond << " bits-per-second\n"; char const* mimeType; unsigned char payloadFormatCode = 96; // by default, unless a static RTP payload type can be used // Add in any filter necessary to transform the data prior to streaming. // (This is where any audio compression would get added.) sessionState.source = wavSource; // by default if (audioFormat == WA_PCM) { if (bitsPerSample == 16) { // Note that samples in the WAV audio file are in little-endian order. #ifdef CONVERT_TO_ULAW // Add a filter that converts from raw 16-bit PCM audio (in little-endian order) to 8-bit u-law audio: sessionState.source = uLawFromPCMAudioSource::createNew(*env, wavSource, 1/*little-endian*/); if (sessionState.source == NULL) { *env << "Unable to create a u-law filter from the PCM audio source: " << env->getResultMsg() << "\n"; exit(1); } bitsPerSecond /= 2; *env << "Converting to 8-bit u-law audio for streaming => " << bitsPerSecond << " bits-per-second\n"; mimeType = "PCMU"; if (samplingFrequency == 8000 && numChannels == 1) { payloadFormatCode = 0; // a static RTP payload type } #else // Add a filter that converts from little-endian to network (big-endian) order: sessionState.source = EndianSwap16::createNew(*env, wavSource); if (sessionState.source == NULL) { *env << "Unable to create a little->bit-endian order filter from the PCM audio source: " << env->getResultMsg() << "\n"; exit(1); } *env << "Converting to network byte order for streaming\n"; mimeType = "L16"; if (samplingFrequency == 44100 && numChannels == 2) { payloadFormatCode = 10; // a static RTP payload type } else if (samplingFrequency == 44100 && numChannels == 1) { payloadFormatCode = 11; // a static RTP payload type } #endif } else if (bitsPerSample == 20 || bitsPerSample == 24) { // Add a filter that converts from little-endian to network (big-endian) order: sessionState.source = EndianSwap24::createNew(*env, wavSource); if (sessionState.source == NULL) { *env << "Unable to create a little->bit-endian order filter from the PCM audio source: " << env->getResultMsg() << "\n"; exit(1); } *env << "Converting to network byte order for streaming\n"; mimeType = bitsPerSample == 20 ? "L20" : "L24"; } else { // bitsPerSample == 8 (we assume that bitsPerSample == 4 is only for WA_IMA_ADPCM) // Don't do any transformation; send the 8-bit PCM data 'as is': mimeType = "L8"; } } else if (audioFormat == WA_PCMU) { mimeType = "PCMU"; if (samplingFrequency == 8000 && numChannels == 1) { payloadFormatCode = 0; // a static RTP payload type } } else if (audioFormat == WA_PCMA) { mimeType = "PCMA"; if (samplingFrequency == 8000 && numChannels == 1) { payloadFormatCode = 8; // a static RTP payload type } } else if (audioFormat == WA_IMA_ADPCM) { mimeType = "DVI4"; // Use a static payload type, if one is defined: if (numChannels == 1) { if (samplingFrequency == 8000) { payloadFormatCode = 5; // a static RTP payload type } else if (samplingFrequency == 16000) { payloadFormatCode = 6; // a static RTP payload type } else if (samplingFrequency == 11025) { payloadFormatCode = 16; // a static RTP payload type } else if (samplingFrequency == 22050) { payloadFormatCode = 17; // a static RTP payload type } } } else { //unknown format *env << "Unknown audio format code \"" << audioFormat << "\" in WAV file header\n"; exit(1); } // Create 'groupsocks' for RTP and RTCP: struct in_addr destinationAddress; destinationAddress.s_addr = chooseRandomIPv4SSMAddress(*env); // Note: This is a multicast address. If you wish instead to stream // using unicast, then you should use the "testOnDemandRTSPServer" demo application, // or the "LIVE555 Media Server" - not this application - as a model. const unsigned short rtpPortNum = 2222; const unsigned short rtcpPortNum = rtpPortNum+1; const unsigned char ttl = 255; const Port rtpPort(rtpPortNum); const Port rtcpPort(rtcpPortNum); sessionState.rtpGroupsock = new Groupsock(*env, destinationAddress, rtpPort, ttl); sessionState.rtpGroupsock->multicastSendOnly(); // we're a SSM source sessionState.rtcpGroupsock = new Groupsock(*env, destinationAddress, rtcpPort, ttl); sessionState.rtcpGroupsock->multicastSendOnly(); // we're a SSM source // Create an appropriate audio RTP sink (using "SimpleRTPSink") from the RTP 'groupsock': sessionState.sink = SimpleRTPSink::createNew(*env, sessionState.rtpGroupsock, payloadFormatCode, samplingFrequency, "audio", mimeType, numChannels); // Create (and start) a 'RTCP instance' for this RTP sink: const unsigned estimatedSessionBandwidth = (bitsPerSecond + 500)/1000; // in kbps; for RTCP b/w share const unsigned maxCNAMElen = 100; unsigned char CNAME[maxCNAMElen+1]; gethostname((char*)CNAME, maxCNAMElen); CNAME[maxCNAMElen] = '\0'; // just in case sessionState.rtcpInstance = RTCPInstance::createNew(*env, sessionState.rtcpGroupsock, estimatedSessionBandwidth, CNAME, sessionState.sink, NULL /* we're a server */, True /* we're a SSM source*/); // Note: This starts RTCP running automatically // Create and start a RTSP server to serve this stream: sessionState.rtspServer = RTSPServer::createNew(*env, 8554); if (sessionState.rtspServer == NULL) { *env << "Failed to create RTSP server: " << env->getResultMsg() << "\n"; exit(1); } ServerMediaSession* sms = ServerMediaSession::createNew(*env, "testStream", inputFileName, "Session streamed by \"testWAVAudiotreamer\"", True/*SSM*/); sms->addSubsession(PassiveServerMediaSubsession::createNew(*sessionState.sink, sessionState.rtcpInstance)); sessionState.rtspServer->addServerMediaSession(sms); char* url = sessionState.rtspServer->rtspURL(sms); *env << "Play this stream using the URL \"" << url << "\"\n"; delete[] url; // Finally, start the streaming: *env << "Beginning streaming...\n"; sessionState.sink->startPlaying(*sessionState.source, afterPlaying, NULL); } void afterPlaying(void* /*clientData*/) { *env << "...done streaming\n"; // End by closing the media: Medium::close(sessionState.rtspServer); Medium::close(sessionState.rtcpInstance); Medium::close(sessionState.sink); delete sessionState.rtpGroupsock; Medium::close(sessionState.source); delete sessionState.rtcpGroupsock; // We're done: exit(0); } live/testProgs/testH264VideoStreamer.cpp000444 001751 000000 00000011720 12265042432 020453 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA **********/ // Copyright (c) 1996-2014, Live Networks, Inc. All rights reserved // A test program that reads a H.264 Elementary Stream video file // and streams it using RTP // main program // // NOTE: For this application to work, the H.264 Elementary Stream video file *must* contain SPS and PPS NAL units, // ideally at or near the start of the file. These SPS and PPS NAL units are used to specify 'configuration' information // that is set in the output stream's SDP description (by the RTSP server that is built in to this application). // Note also that - unlike some other "*Streamer" demo applications - the resulting stream can be received only using a // RTSP client (such as "openRTSP") #include #include #include UsageEnvironment* env; char const* inputFileName = "test.264"; H264VideoStreamFramer* videoSource; RTPSink* videoSink; void play(); // forward int main(int argc, char** argv) { // Begin by setting up our usage environment: TaskScheduler* scheduler = BasicTaskScheduler::createNew(); env = BasicUsageEnvironment::createNew(*scheduler); // Create 'groupsocks' for RTP and RTCP: struct in_addr destinationAddress; destinationAddress.s_addr = chooseRandomIPv4SSMAddress(*env); // Note: This is a multicast address. If you wish instead to stream // using unicast, then you should use the "testOnDemandRTSPServer" // test program - not this test program - as a model. const unsigned short rtpPortNum = 18888; const unsigned short rtcpPortNum = rtpPortNum+1; const unsigned char ttl = 255; const Port rtpPort(rtpPortNum); const Port rtcpPort(rtcpPortNum); Groupsock rtpGroupsock(*env, destinationAddress, rtpPort, ttl); rtpGroupsock.multicastSendOnly(); // we're a SSM source Groupsock rtcpGroupsock(*env, destinationAddress, rtcpPort, ttl); rtcpGroupsock.multicastSendOnly(); // we're a SSM source // Create a 'H264 Video RTP' sink from the RTP 'groupsock': OutPacketBuffer::maxSize = 100000; videoSink = H264VideoRTPSink::createNew(*env, &rtpGroupsock, 96); // Create (and start) a 'RTCP instance' for this RTP sink: const unsigned estimatedSessionBandwidth = 500; // in kbps; for RTCP b/w share const unsigned maxCNAMElen = 100; unsigned char CNAME[maxCNAMElen+1]; gethostname((char*)CNAME, maxCNAMElen); CNAME[maxCNAMElen] = '\0'; // just in case RTCPInstance* rtcp = RTCPInstance::createNew(*env, &rtcpGroupsock, estimatedSessionBandwidth, CNAME, videoSink, NULL /* we're a server */, True /* we're a SSM source */); // Note: This starts RTCP running automatically RTSPServer* rtspServer = RTSPServer::createNew(*env, 8554); if (rtspServer == NULL) { *env << "Failed to create RTSP server: " << env->getResultMsg() << "\n"; exit(1); } ServerMediaSession* sms = ServerMediaSession::createNew(*env, "testStream", inputFileName, "Session streamed by \"testH264VideoStreamer\"", True /*SSM*/); sms->addSubsession(PassiveServerMediaSubsession::createNew(*videoSink, rtcp)); rtspServer->addServerMediaSession(sms); char* url = rtspServer->rtspURL(sms); *env << "Play this stream using the URL \"" << url << "\"\n"; delete[] url; // Start the streaming: *env << "Beginning streaming...\n"; play(); env->taskScheduler().doEventLoop(); // does not return return 0; // only to prevent compiler warning } void afterPlaying(void* /*clientData*/) { *env << "...done reading from file\n"; videoSink->stopPlaying(); Medium::close(videoSource); // Note that this also closes the input file that this source read from. // Start playing once again: play(); } void play() { // Open the input file as a 'byte-stream file source': ByteStreamFileSource* fileSource = ByteStreamFileSource::createNew(*env, inputFileName); if (fileSource == NULL) { *env << "Unable to open file \"" << inputFileName << "\" as a byte-stream file source\n"; exit(1); } FramedSource* videoES = fileSource; // Create a framer for the Video Elementary Stream: videoSource = H264VideoStreamFramer::createNew(*env, videoES); // Finally, start playing: *env << "Beginning to read from file...\n"; videoSink->startPlaying(*videoSource, afterPlaying, videoSink); } live/testProgs/testMPEG2Transport.sdp000444 001751 000000 00000000372 12265042432 020072 0ustar00rsfwheel000000 000000 v=0 o=- 49451 3 IN IP4 127.0.0.1 s=Test MPEG-2 Transport Stream session i=Parameters for the session streamed by "testMPEG2TransportStreamer" t=0 0 a=tool:testMPEG2TransportStreamer a=type:broadcast m=video 1234 RTP/AVP 33 c=IN IP4 239.255.42.42/127 live/testProgs/testMPEG1or2ProgramToTransportStream.cpp000444 001751 000000 00000005264 12265042432 023524 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // Copyright (c) 1996-2014, Live Networks, Inc. All rights reserved // A program that converts a MPEG-1 or 2 Program Stream file into // a Transport Stream file. // main program #include "liveMedia.hh" #include "BasicUsageEnvironment.hh" char const* inputFileName = "in.mpg"; char const* outputFileName = "out.ts"; void afterPlaying(void* clientData); // forward UsageEnvironment* env; int main(int argc, char** argv) { // Begin by setting up our usage environment: TaskScheduler* scheduler = BasicTaskScheduler::createNew(); env = BasicUsageEnvironment::createNew(*scheduler); // Open the input file as a 'byte-stream file source': FramedSource* inputSource = ByteStreamFileSource::createNew(*env, inputFileName); if (inputSource == NULL) { *env << "Unable to open file \"" << inputFileName << "\" as a byte-stream file source\n"; exit(1); } // Create a MPEG demultiplexor that reads from that source. MPEG1or2Demux* baseDemultiplexor = MPEG1or2Demux::createNew(*env, inputSource); // Create, from this, a source that returns raw PES packets: MPEG1or2DemuxedElementaryStream* pesSource = baseDemultiplexor->newRawPESStream(); // And, from this, a filter that converts to MPEG-2 Transport Stream frames: FramedSource* tsFrames = MPEG2TransportStreamFromPESSource::createNew(*env, pesSource); // Open the output file as a 'file sink': MediaSink* outputSink = FileSink::createNew(*env, outputFileName); if (outputSink == NULL) { *env << "Unable to open file \"" << outputFileName << "\" as a file sink\n"; exit(1); } // Finally, start playing: *env << "Beginning to read...\n"; outputSink->startPlaying(*tsFrames, afterPlaying, NULL); env->taskScheduler().doEventLoop(); // does not return return 0; // only to prevent compiler warning } void afterPlaying(void* /*clientData*/) { *env << "Done reading.\n"; *env << "Wrote output file: \"" << outputFileName << "\"\n"; exit(0); } live/testProgs/MPEG2TransportStreamIndexer.cpp000400 001751 000000 00000006327 12265042432 021657 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // Copyright (c) 1996-2014, Live Networks, Inc. All rights reserved // A program that reads an existing MPEG-2 Transport Stream file, // and generates a separate index file that can be used - by our RTSP server // implementation - to support 'trick play' operations when streaming the // Transport Stream file. // main program #include #include void afterPlaying(void* clientData); // forward UsageEnvironment* env; char const* programName; void usage() { *env << "usage: " << programName << " \n"; *env << "\twhere ends with \".ts\"\n"; exit(1); } int main(int argc, char const** argv) { // Begin by setting up our usage environment: TaskScheduler* scheduler = BasicTaskScheduler::createNew(); env = BasicUsageEnvironment::createNew(*scheduler); // Parse the command line: programName = argv[0]; if (argc != 2) usage(); char const* inputFileName = argv[1]; // Check whether the input file name ends with ".ts": int len = strlen(inputFileName); if (len < 4 || strcmp(&inputFileName[len-3], ".ts") != 0) { *env << "ERROR: input file name \"" << inputFileName << "\" does not end with \".ts\"\n"; usage(); } // Open the input file (as a 'byte stream file source'): FramedSource* input = ByteStreamFileSource::createNew(*env, inputFileName, TRANSPORT_PACKET_SIZE); if (input == NULL) { *env << "Failed to open input file \"" << inputFileName << "\" (does it exist?)\n"; exit(1); } // Create a filter that indexes the input Transport Stream data: FramedSource* indexer = MPEG2IFrameIndexFromTransportStream::createNew(*env, input); // The output file name is the same as the input file name, except with suffix ".tsx": char* outputFileName = new char[len+2]; // allow for trailing x\0 sprintf(outputFileName, "%sx", inputFileName); // Open the output file (for writing), as a 'file sink': MediaSink* output = FileSink::createNew(*env, outputFileName); if (output == NULL) { *env << "Failed to open output file \"" << outputFileName << "\"\n"; exit(1); } // Start playing, to generate the output index file: *env << "Writing index file \"" << outputFileName << "\"..."; output->startPlaying(*indexer, afterPlaying, NULL); env->taskScheduler().doEventLoop(); // does not return return 0; // only to prevent compiler warning } void afterPlaying(void* /*clientData*/) { *env << "...done\n"; exit(0); } live/testProgs/testMPEG2TransportStreamTrickPlay.cpp000400 001751 000000 00000012311 12265042432 023051 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // Copyright (c) 1996-2014, Live Networks, Inc. All rights reserved // A program that tests 'trick mode' operations on a MPEG-2 Transport Stream file, // by generating a new Transport Stream file that represents the result of the // 'trick mode' operation (seeking and/or fast forward/reverse play). // For this to work, there must also be an index file present, in the same directory // as the Transport Stream file, and with the same name prefix. (The Transport // Stream file has name suffix ".ts"; the index file has name suffix ".tsx".) // main program #include #include void afterPlaying(void* clientData); // forward UsageEnvironment* env; char const* programName; void usage() { *env << "usage: " << programName << " \n"; *env << "\twhere\t ends with \".ts\"\n"; *env << "\t\t is the starting play time in seconds (0 for the start)\n"; *env << "\t\t is a non-zero integer, representing the playing speed (use 1 for normal play; use a negative number for reverse play)\n"; exit(1); } int main(int argc, char const** argv) { // Begin by setting up our usage environment: TaskScheduler* scheduler = BasicTaskScheduler::createNew(); env = BasicUsageEnvironment::createNew(*scheduler); // Parse the command line: programName = argv[0]; if (argc != 5) usage(); char const* inputFileName = argv[1]; // Check whether the input file name ends with ".ts": int len = strlen(inputFileName); if (len < 4 || strcmp(&inputFileName[len-3], ".ts") != 0) { *env << "ERROR: input file name \"" << inputFileName << "\" does not end with \".ts\"\n"; usage(); } // Parse the and parameters: float startTime; if (sscanf(argv[2], "%f", &startTime) != 1 || startTime < 0.0f) usage(); int scale; if (sscanf(argv[3], "%d", &scale) != 1 || scale == 0) usage(); // Open the input file (as a 'byte stream file source'): FramedSource* input = ByteStreamFileSource::createNew(*env, inputFileName, TRANSPORT_PACKET_SIZE); if (input == NULL) { *env << "Failed to open input file \"" << inputFileName << "\" (does it exist?)\n"; exit(1); } // Check whether the corresponding index file exists. // The index file name is the same as the input file name, except with suffix ".tsx": char* indexFileName = new char[len+2]; // allow for trailing x\0 sprintf(indexFileName, "%sx", inputFileName); MPEG2TransportStreamIndexFile* indexFile = MPEG2TransportStreamIndexFile::createNew(*env, indexFileName); if (indexFile == NULL) { *env << "Failed to open index file \"" << indexFileName << "\" (does it exist?)\n"; exit(1); } // Create a filter that generates trick mode data from the input and index files: MPEG2TransportStreamTrickModeFilter* trickModeFilter = MPEG2TransportStreamTrickModeFilter::createNew(*env, input, indexFile, scale); if (startTime > 0.0f) { // Seek the input Transport Stream and Index files to the specified start time: unsigned long tsRecordNumber, indexRecordNumber; indexFile->lookupTSPacketNumFromNPT(startTime, tsRecordNumber, indexRecordNumber); if (!trickModeFilter->seekTo(tsRecordNumber, indexRecordNumber)) { // TARFU! *env << "Failed to seek trick mode filter to ts #" << (unsigned)tsRecordNumber << ", ix #" << (unsigned)indexRecordNumber << "(for time " << startTime << ")\n"; exit(1); } } // Generate a new Transport Stream from the Trick Mode filter: MPEG2TransportStreamFromESSource* newTransportStream = MPEG2TransportStreamFromESSource::createNew(*env); newTransportStream->addNewVideoSource(trickModeFilter, indexFile->mpegVersion()); // Open the output file (for writing), as a 'file sink': char const* outputFileName = argv[4]; MediaSink* output = FileSink::createNew(*env, outputFileName); if (output == NULL) { *env << "Failed to open output file \"" << outputFileName << "\"\n"; exit(1); } // Start playing, to generate the output file: *env << "Writing output file \"" << outputFileName << "\" (start time " << startTime << ", scale " << scale << ")..."; output->startPlaying(*newTransportStream, afterPlaying, NULL); env->taskScheduler().doEventLoop(); // does not return return 0; // only to prevent compiler warning } void afterPlaying(void* /*clientData*/) { *env << "...done\n"; exit(0); } live/testProgs/testDVVideoStreamer.cpp000444 001751 000000 00000011100 12265042432 020331 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // Copyright (c) 1996-2014, Live Networks, Inc. All rights reserved // A test program that reads a DV Video Elementary Stream file, // and streams it using RTP // main program #include "liveMedia.hh" #include "BasicUsageEnvironment.hh" #include "GroupsockHelper.hh" UsageEnvironment* env; char const* inputFileName = "test.dv"; DVVideoStreamFramer* videoSource; RTPSink* videoSink; void play(); // forward int main(int argc, char** argv) { // Begin by setting up our usage environment: TaskScheduler* scheduler = BasicTaskScheduler::createNew(); env = BasicUsageEnvironment::createNew(*scheduler); // Create 'groupsocks' for RTP and RTCP: struct in_addr destinationAddress; destinationAddress.s_addr = chooseRandomIPv4SSMAddress(*env); // Note: This is a multicast address. If you wish instead to stream // using unicast, then you should use the "testOnDemandRTSPServer" // test program - not this test program - as a model. const unsigned short rtpPortNum = 18888; const unsigned short rtcpPortNum = rtpPortNum+1; const unsigned char ttl = 255; const Port rtpPort(rtpPortNum); const Port rtcpPort(rtcpPortNum); Groupsock rtpGroupsock(*env, destinationAddress, rtpPort, ttl); rtpGroupsock.multicastSendOnly(); // we're a SSM source Groupsock rtcpGroupsock(*env, destinationAddress, rtcpPort, ttl); rtcpGroupsock.multicastSendOnly(); // we're a SSM source // Create a 'DV Video RTP' sink from the RTP 'groupsock': // (But first, make sure that its buffers will be large enough to handle the huge size of DV frames (as big as 288000).) OutPacketBuffer::maxSize = 300000; videoSink = DVVideoRTPSink::createNew(*env, &rtpGroupsock, 96); // Create (and start) a 'RTCP instance' for this RTP sink: const unsigned estimatedSessionBandwidth = 50000; // in kbps; for RTCP b/w share const unsigned maxCNAMElen = 100; unsigned char CNAME[maxCNAMElen+1]; gethostname((char*)CNAME, maxCNAMElen); CNAME[maxCNAMElen] = '\0'; // just in case RTCPInstance* rtcp = RTCPInstance::createNew(*env, &rtcpGroupsock, estimatedSessionBandwidth, CNAME, videoSink, NULL /* we're a server */, True /* we're a SSM source */); // Note: This starts RTCP running automatically RTSPServer* rtspServer = RTSPServer::createNew(*env, 8554); if (rtspServer == NULL) { *env << "Failed to create RTSP server: " << env->getResultMsg() << "\n"; exit(1); } ServerMediaSession* sms = ServerMediaSession::createNew(*env, "testStream", inputFileName, "Session streamed by \"testDVVideoStreamer\"", True /*SSM*/); sms->addSubsession(PassiveServerMediaSubsession::createNew(*videoSink, rtcp)); rtspServer->addServerMediaSession(sms); char* url = rtspServer->rtspURL(sms); *env << "Play this stream using the URL \"" << url << "\"\n"; delete[] url; // Start the streaming: *env << "Beginning streaming...\n"; play(); env->taskScheduler().doEventLoop(); // does not return return 0; // only to prevent compiler warning } void afterPlaying(void* /*clientData*/) { *env << "...done reading from file\n"; videoSink->stopPlaying(); Medium::close(videoSource); // Note that this also closes the input file that this source read from. // Start playing once again: play(); } void play() { // Open the input file as a 'byte-stream file source': ByteStreamFileSource* fileSource = ByteStreamFileSource::createNew(*env, inputFileName); if (fileSource == NULL) { *env << "Unable to open file \"" << inputFileName << "\" as a byte-stream file source\n"; exit(1); } FramedSource* videoES = fileSource; // Create a framer for the Video Elementary Stream: videoSource = DVVideoStreamFramer::createNew(*env, videoES); // Finally, start playing: *env << "Beginning to read from file...\n"; videoSink->startPlaying(*videoSource, afterPlaying, videoSink); } live/testProgs/testMPEG1or2VideoReceiver.cpp000444 001751 000000 00000010445 12265042432 021251 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // Copyright (c) 1996-2014, Live Networks, Inc. All rights reserved // A test program that receives a RTP/RTCP multicast MPEG video stream, // and outputs the resulting MPEG file stream to 'stdout' // main program #include "liveMedia.hh" #include "GroupsockHelper.hh" #include "BasicUsageEnvironment.hh" // To receive a "source-specific multicast" (SSM) stream, uncomment this: //#define USE_SSM 1 void afterPlaying(void* clientData); // forward // A structure to hold the state of the current session. // It is used in the "afterPlaying()" function to clean up the session. struct sessionState_t { RTPSource* source; MediaSink* sink; RTCPInstance* rtcpInstance; } sessionState; UsageEnvironment* env; int main(int argc, char** argv) { // Begin by setting up our usage environment: TaskScheduler* scheduler = BasicTaskScheduler::createNew(); env = BasicUsageEnvironment::createNew(*scheduler); // Create the data sink for 'stdout': sessionState.sink = FileSink::createNew(*env, "stdout"); // Note: The string "stdout" is handled as a special case. // A real file name could have been used instead. // Create 'groupsocks' for RTP and RTCP: char const* sessionAddressStr #ifdef USE_SSM = "232.255.42.42"; #else = "239.255.42.42"; // Note: If the session is unicast rather than multicast, // then replace this string with "0.0.0.0" #endif const unsigned short rtpPortNum = 8888; const unsigned short rtcpPortNum = rtpPortNum+1; #ifndef USE_SSM const unsigned char ttl = 1; // low, in case routers don't admin scope #endif struct in_addr sessionAddress; sessionAddress.s_addr = our_inet_addr(sessionAddressStr); const Port rtpPort(rtpPortNum); const Port rtcpPort(rtcpPortNum); #ifdef USE_SSM char* sourceAddressStr = "aaa.bbb.ccc.ddd"; // replace this with the real source address struct in_addr sourceFilterAddress; sourceFilterAddress.s_addr = our_inet_addr(sourceAddressStr); Groupsock rtpGroupsock(*env, sessionAddress, sourceFilterAddress, rtpPort); Groupsock rtcpGroupsock(*env, sessionAddress, sourceFilterAddress, rtcpPort); rtcpGroupsock.changeDestinationParameters(sourceFilterAddress,0,~0); // our RTCP "RR"s are sent back using unicast #else Groupsock rtpGroupsock(*env, sessionAddress, rtpPort, ttl); Groupsock rtcpGroupsock(*env, sessionAddress, rtcpPort, ttl); #endif // Create the data source: a "MPEG Video RTP source" sessionState.source = MPEG1or2VideoRTPSource::createNew(*env, &rtpGroupsock); // Create (and start) a 'RTCP instance' for the RTP source: const unsigned estimatedSessionBandwidth = 160; // in kbps; for RTCP b/w share const unsigned maxCNAMElen = 100; unsigned char CNAME[maxCNAMElen+1]; gethostname((char*)CNAME, maxCNAMElen); CNAME[maxCNAMElen] = '\0'; // just in case sessionState.rtcpInstance = RTCPInstance::createNew(*env, &rtcpGroupsock, estimatedSessionBandwidth, CNAME, NULL /* we're a client */, sessionState.source); // Note: This starts RTCP running automatically // Finally, start receiving the multicast stream: *env << "Beginning receiving multicast stream...\n"; sessionState.sink->startPlaying(*sessionState.source, afterPlaying, NULL); env->taskScheduler().doEventLoop(); // does not return return 0; // only to prevent compiler warning } void afterPlaying(void* /*clientData*/) { *env << "...done receiving\n"; // End by closing the media: Medium::close(sessionState.rtcpInstance); // Note: Sends a RTCP BYE Medium::close(sessionState.sink); Medium::close(sessionState.source); } live/testProgs/testH264VideoToTransportStream.cpp000444 001751 000000 00000005272 12265042432 022351 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // Copyright (c) 1996-2014, Live Networks, Inc. All rights reserved // A program that converts a H.264 (Elementary Stream) video file into a Transport Stream file. // main program #include "liveMedia.hh" #include "BasicUsageEnvironment.hh" char const* inputFileName = "in.264"; char const* outputFileName = "out.ts"; void afterPlaying(void* clientData); // forward UsageEnvironment* env; int main(int argc, char** argv) { // Begin by setting up our usage environment: TaskScheduler* scheduler = BasicTaskScheduler::createNew(); env = BasicUsageEnvironment::createNew(*scheduler); // Open the input file as a 'byte-stream file source': FramedSource* inputSource = ByteStreamFileSource::createNew(*env, inputFileName); if (inputSource == NULL) { *env << "Unable to open file \"" << inputFileName << "\" as a byte-stream file source\n"; exit(1); } // Create a 'framer' filter for this file source, to generate presentation times for each NAL unit: H264VideoStreamFramer* framer = H264VideoStreamFramer::createNew(*env, inputSource, True/*includeStartCodeInOutput*/); // Then create a filter that packs the H.264 video data into a Transport Stream: MPEG2TransportStreamFromESSource* tsFrames = MPEG2TransportStreamFromESSource::createNew(*env); tsFrames->addNewVideoSource(framer, 5/*mpegVersion: H.264*/); // Open the output file as a 'file sink': MediaSink* outputSink = FileSink::createNew(*env, outputFileName); if (outputSink == NULL) { *env << "Unable to open file \"" << outputFileName << "\" as a file sink\n"; exit(1); } // Finally, start playing: *env << "Beginning to read...\n"; outputSink->startPlaying(*tsFrames, afterPlaying, NULL); env->taskScheduler().doEventLoop(); // does not return return 0; // only to prevent compiler warning } void afterPlaying(void* /*clientData*/) { *env << "Done reading.\n"; *env << "Wrote output file: \"" << outputFileName << "\"\n"; exit(0); } live/testProgs/testReplicator.cpp000444 001751 000000 00000011463 12265042432 017446 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // Copyright (c) 1996-2014, Live Networks, Inc. All rights reserved // A demo application that receives a UDP multicast stream, replicates it (using the "StreamReplicator" class), // and retransmits one replica stream to another (multicast or unicast) address & port, // and writes the other replica stream to a file. // // main program #include #include "BasicUsageEnvironment.hh" #include "GroupsockHelper.hh" UsageEnvironment* env; // To receive a "source-specific multicast" (SSM) stream, uncomment this: //#define USE_SSM 1 void startReplicaUDPSink(StreamReplicator* replicator, char const* outputAddressStr, portNumBits outputPortNum); // forward void startReplicaFileSink(StreamReplicator* replicator, char const* outputFileName); // forward int main(int argc, char** argv) { // Begin by setting up our usage environment: TaskScheduler* scheduler = BasicTaskScheduler::createNew(); env = BasicUsageEnvironment::createNew(*scheduler); // Create a 'groupsock' for the input multicast group,port: char const* inputAddressStr #ifdef USE_SSM = "232.255.42.42"; #else = "239.255.42.42"; #endif struct in_addr inputAddress; inputAddress.s_addr = our_inet_addr(inputAddressStr); Port const inputPort(8888); unsigned char const inputTTL = 0; // we're only reading from this mcast group #ifdef USE_SSM char* sourceAddressStr = "aaa.bbb.ccc.ddd"; // replace this with the real source address struct in_addr sourceFilterAddress; sourceFilterAddress.s_addr = our_inet_addr(sourceAddressStr); Groupsock inputGroupsock(*env, inputAddress, sourceFilterAddress, inputPort); #else Groupsock inputGroupsock(*env, inputAddress, inputPort, inputTTL); #endif // Then create a liveMedia 'source' object, encapsulating this groupsock: FramedSource* source = BasicUDPSource::createNew(*env, &inputGroupsock); // And feed this into a 'stream replicator': StreamReplicator* replicator = StreamReplicator::createNew(*env, source); // Then create a network (UDP) 'sink' object to receive a replica of the input stream, and start it. // If you wish, you can duplicate this line - with different network addresses and ports - to create multiple output UDP streams: startReplicaUDPSink(replicator, "239.255.43.43", 4444); // Then create a file 'sink' object to receive a replica of the input stream, and start it. // If you wish, you can duplicate this line - with a different file name - to create multiple output files: startReplicaFileSink(replicator, "test.out"); // Finally, enter the 'event loop' (which is where most of the 'real work' in a LIVE555-based application gets done): env->taskScheduler().doEventLoop(); // does not return return 0; // only to prevent compiler warning } void startReplicaUDPSink(StreamReplicator* replicator, char const* outputAddressStr, portNumBits outputPortNum) { // Begin by creating an input stream from our replicator: FramedSource* source = replicator->createStreamReplica(); // Create a 'groupsock' for the destination address and port: struct in_addr outputAddress; outputAddress.s_addr = our_inet_addr(outputAddressStr); Port const outputPort(outputPortNum); unsigned char const outputTTL = 255; Groupsock* outputGroupsock = new Groupsock(*env, outputAddress, outputPort, outputTTL); // Then create a liveMedia 'sink' object, encapsulating this groupsock: unsigned const maxPacketSize = 65536; // allow for large UDP packets MediaSink* sink = BasicUDPSink::createNew(*env, outputGroupsock, maxPacketSize); // Now, start playing, feeding the sink object from the source: sink->startPlaying(*source, NULL, NULL); } void startReplicaFileSink(StreamReplicator* replicator, char const* outputFileName) { // Begin by creating an input stream from our replicator: FramedSource* source = replicator->createStreamReplica(); // Then create a 'file sink' object to receive thie replica stream: MediaSink* sink = FileSink::createNew(*env, outputFileName); // Now, start playing, feeding the sink object from the source: sink->startPlaying(*source, NULL, NULL); } live/testProgs/testMPEG2TransportReceiver.cpp000444 001751 000000 00000010623 12265042432 021553 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // Copyright (c) 1996-2014, Live Networks, Inc. All rights reserved // A test program that receives a RTP/RTCP multicast MPEG-2 Transport Stream, // and outputs the resulting Transport Stream data to 'stdout' // main program #include "liveMedia.hh" #include "GroupsockHelper.hh" #include "BasicUsageEnvironment.hh" // To receive a "source-specific multicast" (SSM) stream, uncomment this: //#define USE_SSM 1 void afterPlaying(void* clientData); // forward // A structure to hold the state of the current session. // It is used in the "afterPlaying()" function to clean up the session. struct sessionState_t { RTPSource* source; MediaSink* sink; RTCPInstance* rtcpInstance; } sessionState; UsageEnvironment* env; int main(int argc, char** argv) { // Begin by setting up our usage environment: TaskScheduler* scheduler = BasicTaskScheduler::createNew(); env = BasicUsageEnvironment::createNew(*scheduler); // Create the data sink for 'stdout': sessionState.sink = FileSink::createNew(*env, "stdout"); // Note: The string "stdout" is handled as a special case. // A real file name could have been used instead. // Create 'groupsocks' for RTP and RTCP: char const* sessionAddressStr #ifdef USE_SSM = "232.255.42.42"; #else = "239.255.42.42"; // Note: If the session is unicast rather than multicast, // then replace this string with "0.0.0.0" #endif const unsigned short rtpPortNum = 1234; const unsigned short rtcpPortNum = rtpPortNum+1; #ifndef USE_SSM const unsigned char ttl = 1; // low, in case routers don't admin scope #endif struct in_addr sessionAddress; sessionAddress.s_addr = our_inet_addr(sessionAddressStr); const Port rtpPort(rtpPortNum); const Port rtcpPort(rtcpPortNum); #ifdef USE_SSM char* sourceAddressStr = "aaa.bbb.ccc.ddd"; // replace this with the real source address struct in_addr sourceFilterAddress; sourceFilterAddress.s_addr = our_inet_addr(sourceAddressStr); Groupsock rtpGroupsock(*env, sessionAddress, sourceFilterAddress, rtpPort); Groupsock rtcpGroupsock(*env, sessionAddress, sourceFilterAddress, rtcpPort); rtcpGroupsock.changeDestinationParameters(sourceFilterAddress,0,~0); // our RTCP "RR"s are sent back using unicast #else Groupsock rtpGroupsock(*env, sessionAddress, rtpPort, ttl); Groupsock rtcpGroupsock(*env, sessionAddress, rtcpPort, ttl); #endif // Create the data source: a "MPEG-2 TransportStream RTP source" (which uses a 'simple' RTP payload format): sessionState.source = SimpleRTPSource::createNew(*env, &rtpGroupsock, 33, 90000, "video/MP2T", 0, False /*no 'M' bit*/); // Create (and start) a 'RTCP instance' for the RTP source: const unsigned estimatedSessionBandwidth = 160; // in kbps; for RTCP b/w share const unsigned maxCNAMElen = 100; unsigned char CNAME[maxCNAMElen+1]; gethostname((char*)CNAME, maxCNAMElen); CNAME[maxCNAMElen] = '\0'; // just in case sessionState.rtcpInstance = RTCPInstance::createNew(*env, &rtcpGroupsock, estimatedSessionBandwidth, CNAME, NULL /* we're a client */, sessionState.source); // Note: This starts RTCP running automatically // Finally, start receiving the multicast stream: *env << "Beginning receiving multicast stream...\n"; sessionState.sink->startPlaying(*sessionState.source, afterPlaying, NULL); env->taskScheduler().doEventLoop(); // does not return return 0; // only to prevent compiler warning } void afterPlaying(void* /*clientData*/) { *env << "...done receiving\n"; // End by closing the media: Medium::close(sessionState.rtcpInstance); // Note: Sends a RTCP BYE Medium::close(sessionState.sink); Medium::close(sessionState.source); } live/testProgs/testH265VideoToTransportStream.cpp000444 001751 000000 00000005272 12265042432 022352 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // Copyright (c) 1996-2014, Live Networks, Inc. All rights reserved // A program that converts a H.265 (Elementary Stream) video file into a Transport Stream file. // main program #include "liveMedia.hh" #include "BasicUsageEnvironment.hh" char const* inputFileName = "in.265"; char const* outputFileName = "out.ts"; void afterPlaying(void* clientData); // forward UsageEnvironment* env; int main(int argc, char** argv) { // Begin by setting up our usage environment: TaskScheduler* scheduler = BasicTaskScheduler::createNew(); env = BasicUsageEnvironment::createNew(*scheduler); // Open the input file as a 'byte-stream file source': FramedSource* inputSource = ByteStreamFileSource::createNew(*env, inputFileName); if (inputSource == NULL) { *env << "Unable to open file \"" << inputFileName << "\" as a byte-stream file source\n"; exit(1); } // Create a 'framer' filter for this file source, to generate presentation times for each NAL unit: H265VideoStreamFramer* framer = H265VideoStreamFramer::createNew(*env, inputSource, True/*includeStartCodeInOutput*/); // Then create a filter that packs the H.265 video data into a Transport Stream: MPEG2TransportStreamFromESSource* tsFrames = MPEG2TransportStreamFromESSource::createNew(*env); tsFrames->addNewVideoSource(framer, 6/*mpegVersion: H.265*/); // Open the output file as a 'file sink': MediaSink* outputSink = FileSink::createNew(*env, outputFileName); if (outputSink == NULL) { *env << "Unable to open file \"" << outputFileName << "\" as a file sink\n"; exit(1); } // Finally, start playing: *env << "Beginning to read...\n"; outputSink->startPlaying(*tsFrames, afterPlaying, NULL); env->taskScheduler().doEventLoop(); // does not return return 0; // only to prevent compiler warning } void afterPlaying(void* /*clientData*/) { *env << "Done reading.\n"; *env << "Wrote output file: \"" << outputFileName << "\"\n"; exit(0); } live/testProgs/Makefile.head000440 001751 000000 00000000727 12265042432 016273 0ustar00rsfwheel000000 000000 INCLUDES = -I../UsageEnvironment/include -I../groupsock/include -I../liveMedia/include -I../BasicUsageEnvironment/include # Default library filename suffixes for each library that we link with. The "config.*" file might redefine these later. libliveMedia_LIB_SUFFIX = $(LIB_SUFFIX) libBasicUsageEnvironment_LIB_SUFFIX = $(LIB_SUFFIX) libUsageEnvironment_LIB_SUFFIX = $(LIB_SUFFIX) libgroupsock_LIB_SUFFIX = $(LIB_SUFFIX) ##### Change the following for your environment: live/mediaServer/live555MediaServer.cpp000444 001751 000000 00000010341 12265042432 020235 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // Copyright (c) 1996-2014, Live Networks, Inc. All rights reserved // LIVE555 Media Server // main program #include #include "DynamicRTSPServer.hh" #include "version.hh" int main(int argc, char** argv) { // Begin by setting up our usage environment: TaskScheduler* scheduler = BasicTaskScheduler::createNew(); UsageEnvironment* env = BasicUsageEnvironment::createNew(*scheduler); UserAuthenticationDatabase* authDB = NULL; #ifdef ACCESS_CONTROL // To implement client access control to the RTSP server, do the following: authDB = new UserAuthenticationDatabase; authDB->addUserRecord("username1", "password1"); // replace these with real strings // Repeat the above with each , that you wish to allow // access to the server. #endif // Create the RTSP server. Try first with the default port number (554), // and then with the alternative port number (8554): RTSPServer* rtspServer; portNumBits rtspServerPortNum = 554; rtspServer = DynamicRTSPServer::createNew(*env, rtspServerPortNum, authDB); if (rtspServer == NULL) { rtspServerPortNum = 8554; rtspServer = DynamicRTSPServer::createNew(*env, rtspServerPortNum, authDB); } if (rtspServer == NULL) { *env << "Failed to create RTSP server: " << env->getResultMsg() << "\n"; exit(1); } *env << "LIVE555 Media Server\n"; *env << "\tversion " << MEDIA_SERVER_VERSION_STRING << " (LIVE555 Streaming Media library version " << LIVEMEDIA_LIBRARY_VERSION_STRING << ").\n"; char* urlPrefix = rtspServer->rtspURLPrefix(); *env << "Play streams from this server using the URL\n\t" << urlPrefix << "\nwhere is a file present in the current directory.\n"; *env << "Each file's type is inferred from its name suffix:\n"; *env << "\t\".264\" => a H.264 Video Elementary Stream file\n"; *env << "\t\".265\" => a H.265 Video Elementary Stream file\n"; *env << "\t\".aac\" => an AAC Audio (ADTS format) file\n"; *env << "\t\".ac3\" => an AC-3 Audio file\n"; *env << "\t\".amr\" => an AMR Audio file\n"; *env << "\t\".dv\" => a DV Video file\n"; *env << "\t\".m4e\" => a MPEG-4 Video Elementary Stream file\n"; *env << "\t\".mkv\" => a Matroska audio+video+(optional)subtitles file\n"; *env << "\t\".mp3\" => a MPEG-1 or 2 Audio file\n"; *env << "\t\".mpg\" => a MPEG-1 or 2 Program Stream (audio+video) file\n"; *env << "\t\".ts\" => a MPEG Transport Stream file\n"; *env << "\t\t(a \".tsx\" index file - if present - provides server 'trick play' support)\n"; *env << "\t\".vob\" => a VOB (MPEG-2 video with AC-3 audio) file\n"; *env << "\t\".wav\" => a WAV Audio file\n"; *env << "\t\".webm\" => a WebM audio(Vorbis)+video(VP8) file\n"; *env << "See http://www.live555.com/mediaServer/ for additional documentation.\n"; // Also, attempt to create a HTTP server for RTSP-over-HTTP tunneling. // Try first with the default HTTP port (80), and then with the alternative HTTP // port numbers (8000 and 8080). if (rtspServer->setUpTunnelingOverHTTP(80) || rtspServer->setUpTunnelingOverHTTP(8000) || rtspServer->setUpTunnelingOverHTTP(8080)) { *env << "(We use port " << rtspServer->httpServerPortNum() << " for optional RTSP-over-HTTP tunneling, or for HTTP live streaming (for indexed Transport Stream files only).)\n"; } else { *env << "(RTSP-over-HTTP tunneling is not available.)\n"; } env->taskScheduler().doEventLoop(); // does not return return 0; // only to prevent compiler warning } live/mediaServer/Makefile.head000440 001751 000000 00000000727 12265042432 016547 0ustar00rsfwheel000000 000000 INCLUDES = -I../UsageEnvironment/include -I../groupsock/include -I../liveMedia/include -I../BasicUsageEnvironment/include # Default library filename suffixes for each library that we link with. The "config.*" file might redefine these later. libliveMedia_LIB_SUFFIX = $(LIB_SUFFIX) libBasicUsageEnvironment_LIB_SUFFIX = $(LIB_SUFFIX) libUsageEnvironment_LIB_SUFFIX = $(LIB_SUFFIX) libgroupsock_LIB_SUFFIX = $(LIB_SUFFIX) ##### Change the following for your environment: live/mediaServer/Makefile.tail000444 001751 000000 00000002703 12265042432 016577 0ustar00rsfwheel000000 000000 ##### End of variables to change MEDIA_SERVER = live555MediaServer$(EXE) PREFIX = /usr/local ALL = $(MEDIA_SERVER) all: $(ALL) .$(C).$(OBJ): $(C_COMPILER) -c $(C_FLAGS) $< .$(CPP).$(OBJ): $(CPLUSPLUS_COMPILER) -c $(CPLUSPLUS_FLAGS) $< MEDIA_SERVER_OBJS = live555MediaServer.$(OBJ) DynamicRTSPServer.$(OBJ) live555MediaServer.$(CPP): DynamicRTSPServer.hh version.hh DynamicRTSPServer.$(CPP): DynamicRTSPServer.hh USAGE_ENVIRONMENT_DIR = ../UsageEnvironment USAGE_ENVIRONMENT_LIB = $(USAGE_ENVIRONMENT_DIR)/libUsageEnvironment.$(libUsageEnvironment_LIB_SUFFIX) BASIC_USAGE_ENVIRONMENT_DIR = ../BasicUsageEnvironment BASIC_USAGE_ENVIRONMENT_LIB = $(BASIC_USAGE_ENVIRONMENT_DIR)/libBasicUsageEnvironment.$(libBasicUsageEnvironment_LIB_SUFFIX) LIVEMEDIA_DIR = ../liveMedia LIVEMEDIA_LIB = $(LIVEMEDIA_DIR)/libliveMedia.$(libliveMedia_LIB_SUFFIX) GROUPSOCK_DIR = ../groupsock GROUPSOCK_LIB = $(GROUPSOCK_DIR)/libgroupsock.$(libgroupsock_LIB_SUFFIX) LOCAL_LIBS = $(LIVEMEDIA_LIB) $(GROUPSOCK_LIB) \ $(BASIC_USAGE_ENVIRONMENT_LIB) $(USAGE_ENVIRONMENT_LIB) LIBS = $(LOCAL_LIBS) $(LIBS_FOR_CONSOLE_APPLICATION) live555MediaServer$(EXE): $(MEDIA_SERVER_OBJS) $(LOCAL_LIBS) $(LINK)$@ $(CONSOLE_LINK_OPTS) $(MEDIA_SERVER_OBJS) $(LIBS) clean: -rm -rf *.$(OBJ) $(ALL) core *.core *~ include/*~ install: $(MEDIA_SERVER) install -d $(DESTDIR)$(PREFIX)/bin install -m 755 $(MEDIA_SERVER) $(DESTDIR)$(PREFIX)/bin ##### Any additional, platform-specific rules come here: live/mediaServer/DynamicRTSPServer.cpp000444 001751 000000 00000021452 12265042432 020201 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // Copyright (c) 1996-2014, Live Networks, Inc. All rights reserved // A subclass of "RTSPServer" that creates "ServerMediaSession"s on demand, // based on whether or not the specified stream name exists as a file // Implementation #include "DynamicRTSPServer.hh" #include #include DynamicRTSPServer* DynamicRTSPServer::createNew(UsageEnvironment& env, Port ourPort, UserAuthenticationDatabase* authDatabase, unsigned reclamationTestSeconds) { int ourSocket = setUpOurSocket(env, ourPort); if (ourSocket == -1) return NULL; return new DynamicRTSPServer(env, ourSocket, ourPort, authDatabase, reclamationTestSeconds); } DynamicRTSPServer::DynamicRTSPServer(UsageEnvironment& env, int ourSocket, Port ourPort, UserAuthenticationDatabase* authDatabase, unsigned reclamationTestSeconds) : RTSPServerSupportingHTTPStreaming(env, ourSocket, ourPort, authDatabase, reclamationTestSeconds) { } DynamicRTSPServer::~DynamicRTSPServer() { } static ServerMediaSession* createNewSMS(UsageEnvironment& env, char const* fileName, FILE* fid); // forward ServerMediaSession* DynamicRTSPServer::lookupServerMediaSession(char const* streamName) { // First, check whether the specified "streamName" exists as a local file: FILE* fid = fopen(streamName, "rb"); Boolean fileExists = fid != NULL; // Next, check whether we already have a "ServerMediaSession" for this file: ServerMediaSession* sms = RTSPServer::lookupServerMediaSession(streamName); Boolean smsExists = sms != NULL; // Handle the four possibilities for "fileExists" and "smsExists": if (!fileExists) { if (smsExists) { // "sms" was created for a file that no longer exists. Remove it: removeServerMediaSession(sms); } return NULL; } else { if (!smsExists) { // Create a new "ServerMediaSession" object for streaming from the named file. sms = createNewSMS(envir(), streamName, fid); addServerMediaSession(sms); } fclose(fid); return sms; } } // Special code for handling Matroska files: static char newMatroskaDemuxWatchVariable; static MatroskaFileServerDemux* demux; static void onMatroskaDemuxCreation(MatroskaFileServerDemux* newDemux, void* /*clientData*/) { demux = newDemux; newMatroskaDemuxWatchVariable = 1; } // END Special code for handling Matroska files: #define NEW_SMS(description) do {\ char const* descStr = description\ ", streamed by the LIVE555 Media Server";\ sms = ServerMediaSession::createNew(env, fileName, fileName, descStr);\ } while(0) static ServerMediaSession* createNewSMS(UsageEnvironment& env, char const* fileName, FILE* /*fid*/) { // Use the file name extension to determine the type of "ServerMediaSession": char const* extension = strrchr(fileName, '.'); if (extension == NULL) return NULL; ServerMediaSession* sms = NULL; Boolean const reuseSource = False; if (strcmp(extension, ".aac") == 0) { // Assumed to be an AAC Audio (ADTS format) file: NEW_SMS("AAC Audio"); sms->addSubsession(ADTSAudioFileServerMediaSubsession::createNew(env, fileName, reuseSource)); } else if (strcmp(extension, ".amr") == 0) { // Assumed to be an AMR Audio file: NEW_SMS("AMR Audio"); sms->addSubsession(AMRAudioFileServerMediaSubsession::createNew(env, fileName, reuseSource)); } else if (strcmp(extension, ".ac3") == 0) { // Assumed to be an AC-3 Audio file: NEW_SMS("AC-3 Audio"); sms->addSubsession(AC3AudioFileServerMediaSubsession::createNew(env, fileName, reuseSource)); } else if (strcmp(extension, ".m4e") == 0) { // Assumed to be a MPEG-4 Video Elementary Stream file: NEW_SMS("MPEG-4 Video"); sms->addSubsession(MPEG4VideoFileServerMediaSubsession::createNew(env, fileName, reuseSource)); } else if (strcmp(extension, ".264") == 0) { // Assumed to be a H.264 Video Elementary Stream file: NEW_SMS("H.264 Video"); OutPacketBuffer::maxSize = 100000; // allow for some possibly large H.264 frames sms->addSubsession(H264VideoFileServerMediaSubsession::createNew(env, fileName, reuseSource)); } else if (strcmp(extension, ".265") == 0) { // Assumed to be a H.265 Video Elementary Stream file: NEW_SMS("H.265 Video"); OutPacketBuffer::maxSize = 100000; // allow for some possibly large H.265 frames sms->addSubsession(H265VideoFileServerMediaSubsession::createNew(env, fileName, reuseSource)); } else if (strcmp(extension, ".mp3") == 0) { // Assumed to be a MPEG-1 or 2 Audio file: NEW_SMS("MPEG-1 or 2 Audio"); // To stream using 'ADUs' rather than raw MP3 frames, uncomment the following: //#define STREAM_USING_ADUS 1 // To also reorder ADUs before streaming, uncomment the following: //#define INTERLEAVE_ADUS 1 // (For more information about ADUs and interleaving, // see ) Boolean useADUs = False; Interleaving* interleaving = NULL; #ifdef STREAM_USING_ADUS useADUs = True; #ifdef INTERLEAVE_ADUS unsigned char interleaveCycle[] = {0,2,1,3}; // or choose your own... unsigned const interleaveCycleSize = (sizeof interleaveCycle)/(sizeof (unsigned char)); interleaving = new Interleaving(interleaveCycleSize, interleaveCycle); #endif #endif sms->addSubsession(MP3AudioFileServerMediaSubsession::createNew(env, fileName, reuseSource, useADUs, interleaving)); } else if (strcmp(extension, ".mpg") == 0) { // Assumed to be a MPEG-1 or 2 Program Stream (audio+video) file: NEW_SMS("MPEG-1 or 2 Program Stream"); MPEG1or2FileServerDemux* demux = MPEG1or2FileServerDemux::createNew(env, fileName, reuseSource); sms->addSubsession(demux->newVideoServerMediaSubsession()); sms->addSubsession(demux->newAudioServerMediaSubsession()); } else if (strcmp(extension, ".vob") == 0) { // Assumed to be a VOB (MPEG-2 Program Stream, with AC-3 audio) file: NEW_SMS("VOB (MPEG-2 video with AC-3 audio)"); MPEG1or2FileServerDemux* demux = MPEG1or2FileServerDemux::createNew(env, fileName, reuseSource); sms->addSubsession(demux->newVideoServerMediaSubsession()); sms->addSubsession(demux->newAC3AudioServerMediaSubsession()); } else if (strcmp(extension, ".ts") == 0) { // Assumed to be a MPEG Transport Stream file: // Use an index file name that's the same as the TS file name, except with ".tsx": unsigned indexFileNameLen = strlen(fileName) + 2; // allow for trailing "x\0" char* indexFileName = new char[indexFileNameLen]; sprintf(indexFileName, "%sx", fileName); NEW_SMS("MPEG Transport Stream"); sms->addSubsession(MPEG2TransportFileServerMediaSubsession::createNew(env, fileName, indexFileName, reuseSource)); delete[] indexFileName; } else if (strcmp(extension, ".wav") == 0) { // Assumed to be a WAV Audio file: NEW_SMS("WAV Audio Stream"); // To convert 16-bit PCM data to 8-bit u-law, prior to streaming, // change the following to True: Boolean convertToULaw = False; sms->addSubsession(WAVAudioFileServerMediaSubsession::createNew(env, fileName, reuseSource, convertToULaw)); } else if (strcmp(extension, ".dv") == 0) { // Assumed to be a DV Video file // First, make sure that the RTPSinks' buffers will be large enough to handle the huge size of DV frames (as big as 288000). OutPacketBuffer::maxSize = 300000; NEW_SMS("DV Video"); sms->addSubsession(DVVideoFileServerMediaSubsession::createNew(env, fileName, reuseSource)); } else if (strcmp(extension, ".mkv") == 0 || strcmp(extension, ".webm") == 0) { // Assumed to be a Matroska file (note that WebM ('.webm') files are also Matroska files) NEW_SMS("Matroska video+audio+(optional)subtitles"); // Create a Matroska file server demultiplexor for the specified file. (We enter the event loop to wait for this to complete.) newMatroskaDemuxWatchVariable = 0; MatroskaFileServerDemux::createNew(env, fileName, onMatroskaDemuxCreation, NULL); env.taskScheduler().doEventLoop(&newMatroskaDemuxWatchVariable); ServerMediaSubsession* smss; while ((smss = demux->newServerMediaSubsession()) != NULL) { sms->addSubsession(smss); } } return sms; } live/mediaServer/DynamicRTSPServer.hh000444 001751 000000 00000003403 12265042432 020012 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // Copyright (c) 1996-2014, Live Networks, Inc. All rights reserved // A subclass of "RTSPServer" that creates "ServerMediaSession"s on demand, // based on whether or not the specified stream name exists as a file // Header file #ifndef _DYNAMIC_RTSP_SERVER_HH #define _DYNAMIC_RTSP_SERVER_HH #ifndef _RTSP_SERVER_SUPPORTING_HTTP_STREAMING_HH #include "RTSPServerSupportingHTTPStreaming.hh" #endif class DynamicRTSPServer: public RTSPServerSupportingHTTPStreaming { public: static DynamicRTSPServer* createNew(UsageEnvironment& env, Port ourPort, UserAuthenticationDatabase* authDatabase, unsigned reclamationTestSeconds = 65); protected: DynamicRTSPServer(UsageEnvironment& env, int ourSocket, Port ourPort, UserAuthenticationDatabase* authDatabase, unsigned reclamationTestSeconds); // called only by createNew(); virtual ~DynamicRTSPServer(); protected: // redefined virtual functions virtual ServerMediaSession* lookupServerMediaSession(char const* streamName); }; #endif live/mediaServer/version.hh000444 001751 000000 00000000413 12265042432 016211 0ustar00rsfwheel000000 000000 // Copyright (c) 1996-2014, Live Networks, Inc. All rights reserved // Version information for the LIVE555 Media Server application // Header file #ifndef _MEDIA_SERVER_VERSION_HH #define _MEDIA_SERVER_VERSION_HH #define MEDIA_SERVER_VERSION_STRING "0.80" #endif live/mediaServer/COPYING000755 001751 000000 00000000000 12265042432 016501 2../COPYINGustar00rsfwheel000000 000000 live/proxyServer/COPYING000755 001751 000000 00000000000 12265042432 016603 2../COPYINGustar00rsfwheel000000 000000 live/proxyServer/Makefile.head000440 001751 000000 00000000727 12265042432 016651 0ustar00rsfwheel000000 000000 INCLUDES = -I../UsageEnvironment/include -I../groupsock/include -I../liveMedia/include -I../BasicUsageEnvironment/include # Default library filename suffixes for each library that we link with. The "config.*" file might redefine these later. libliveMedia_LIB_SUFFIX = $(LIB_SUFFIX) libBasicUsageEnvironment_LIB_SUFFIX = $(LIB_SUFFIX) libUsageEnvironment_LIB_SUFFIX = $(LIB_SUFFIX) libgroupsock_LIB_SUFFIX = $(LIB_SUFFIX) ##### Change the following for your environment: live/proxyServer/Makefile.tail000444 001751 000000 00000002477 12265042432 016711 0ustar00rsfwheel000000 000000 ##### End of variables to change PROXY_SERVER = live555ProxyServer$(EXE) PREFIX = /usr/local ALL = $(PROXY_SERVER) all: $(ALL) .$(C).$(OBJ): $(C_COMPILER) -c $(C_FLAGS) $< .$(CPP).$(OBJ): $(CPLUSPLUS_COMPILER) -c $(CPLUSPLUS_FLAGS) $< PROXY_SERVER_OBJS = live555ProxyServer.$(OBJ) USAGE_ENVIRONMENT_DIR = ../UsageEnvironment USAGE_ENVIRONMENT_LIB = $(USAGE_ENVIRONMENT_DIR)/libUsageEnvironment.$(libUsageEnvironment_LIB_SUFFIX) BASIC_USAGE_ENVIRONMENT_DIR = ../BasicUsageEnvironment BASIC_USAGE_ENVIRONMENT_LIB = $(BASIC_USAGE_ENVIRONMENT_DIR)/libBasicUsageEnvironment.$(libBasicUsageEnvironment_LIB_SUFFIX) LIVEMEDIA_DIR = ../liveMedia LIVEMEDIA_LIB = $(LIVEMEDIA_DIR)/libliveMedia.$(libliveMedia_LIB_SUFFIX) GROUPSOCK_DIR = ../groupsock GROUPSOCK_LIB = $(GROUPSOCK_DIR)/libgroupsock.$(libgroupsock_LIB_SUFFIX) LOCAL_LIBS = $(LIVEMEDIA_LIB) $(GROUPSOCK_LIB) \ $(BASIC_USAGE_ENVIRONMENT_LIB) $(USAGE_ENVIRONMENT_LIB) LIBS = $(LOCAL_LIBS) $(LIBS_FOR_CONSOLE_APPLICATION) live555ProxyServer$(EXE): $(PROXY_SERVER_OBJS) $(LOCAL_LIBS) $(LINK)$@ $(CONSOLE_LINK_OPTS) $(PROXY_SERVER_OBJS) $(LIBS) clean: -rm -rf *.$(OBJ) $(ALL) core *.core *~ include/*~ install: $(PROXY_SERVER) install -d $(DESTDIR)$(PREFIX)/bin install -m 755 $(PROXY_SERVER) $(DESTDIR)$(PREFIX)/bin ##### Any additional, platform-specific rules come here: live/proxyServer/live555ProxyServer.cpp000444 001751 000000 00000017625 12265042432 020455 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // Copyright (c) 1996-2014, Live Networks, Inc. All rights reserved // LIVE555 Proxy Server // main program #include "liveMedia.hh" #include "BasicUsageEnvironment.hh" char const* progName; UsageEnvironment* env; UserAuthenticationDatabase* authDB = NULL; UserAuthenticationDatabase* authDBForREGISTER = NULL; // Default values of command-line parameters: int verbosityLevel = 0; Boolean streamRTPOverTCP = False; portNumBits tunnelOverHTTPPortNum = 0; char* username = NULL; char* password = NULL; Boolean proxyREGISTERRequests = False; char* usernameForREGISTER = NULL; char* passwordForREGISTER = NULL; static RTSPServer* createRTSPServer(Port port) { if (proxyREGISTERRequests) { return RTSPServerWithREGISTERProxying::createNew(*env, port, authDB, authDBForREGISTER, 65, streamRTPOverTCP, verbosityLevel); } else { return RTSPServer::createNew(*env, port, authDB); } } void usage() { *env << "Usage: " << progName << " [-v|-V]" << " [-t|-T ]" << " [-u ]" << " [-R] [-U ]" << " ... \n"; exit(1); } int main(int argc, char** argv) { // Increase the maximum size of video frames that we can 'proxy' without truncation. // (Such frames are unreasonably large; the back-end servers should really not be sending frames this large!) OutPacketBuffer::maxSize = 100000; // bytes // Begin by setting up our usage environment: TaskScheduler* scheduler = BasicTaskScheduler::createNew(); env = BasicUsageEnvironment::createNew(*scheduler); *env << "LIVE555 Proxy Server\n" << "\t(LIVE555 Streaming Media library version " << LIVEMEDIA_LIBRARY_VERSION_STRING << ")\n\n"; // Check command-line arguments: optional parameters, then one or more rtsp:// URLs (of streams to be proxied): progName = argv[0]; if (argc < 2) usage(); while (argc > 1) { // Process initial command-line options (beginning with "-"): char* const opt = argv[1]; if (opt[0] != '-') break; // the remaining parameters are assumed to be "rtsp://" URLs switch (opt[1]) { case 'v': { // verbose output verbosityLevel = 1; break; } case 'V': { // more verbose output verbosityLevel = 2; break; } case 't': { // Stream RTP and RTCP over the TCP 'control' connection. // (This is for the 'back end' (i.e., proxied) stream only.) streamRTPOverTCP = True; break; } case 'T': { // stream RTP and RTCP over a HTTP connection if (argc > 3 && argv[2][0] != '-') { // The next argument is the HTTP server port number: if (sscanf(argv[2], "%hu", &tunnelOverHTTPPortNum) == 1 && tunnelOverHTTPPortNum > 0) { ++argv; --argc; break; } } // If we get here, the option was specified incorrectly: usage(); break; } case 'u': { // specify a username and password (to be used if the 'back end' (i.e., proxied) stream requires authentication) if (argc < 4) usage(); // there's no argv[3] (for the "password") username = argv[2]; password = argv[3]; argv += 2; argc -= 2; break; } case 'U': { // specify a username and password to use to authenticate incoming "REGISTER" commands if (argc < 4) usage(); // there's no argv[3] (for the "password") usernameForREGISTER = argv[2]; passwordForREGISTER = argv[3]; if (authDBForREGISTER == NULL) authDBForREGISTER = new UserAuthenticationDatabase; authDBForREGISTER->addUserRecord(usernameForREGISTER, passwordForREGISTER); argv += 2; argc -= 2; break; } case 'R': { // Handle incoming "REGISTER" requests by proxying the specified stream: proxyREGISTERRequests = True; break; } default: { usage(); break; } } ++argv; --argc; } if (argc < 2 && !proxyREGISTERRequests) usage(); // there must be at least one "rtsp://" URL at the end // Make sure that the remaining arguments appear to be "rtsp://" URLs: int i; for (i = 1; i < argc; ++i) { if (strncmp(argv[i], "rtsp://", 7) != 0) usage(); } // Do some additional checking for invalid command-line argument combinations: if (authDBForREGISTER != NULL && !proxyREGISTERRequests) { *env << "The '-U ' option can be used only with -R\n"; usage(); } if (streamRTPOverTCP) { if (tunnelOverHTTPPortNum > 0) { *env << "The -t and -T options cannot both be used!\n"; usage(); } else { tunnelOverHTTPPortNum = (portNumBits)(~0); // hack to tell "ProxyServerMediaSession" to stream over TCP, but not using HTTP } } #ifdef ACCESS_CONTROL // To implement client access control to the RTSP server, do the following: authDB = new UserAuthenticationDatabase; authDB->addUserRecord("username1", "password1"); // replace these with real strings // Repeat this line with each , that you wish to allow access to the server. #endif // Create the RTSP server. Try first with the default port number (554), // and then with the alternative port number (8554): RTSPServer* rtspServer; portNumBits rtspServerPortNum = 554; rtspServer = createRTSPServer(rtspServerPortNum); if (rtspServer == NULL) { rtspServerPortNum = 8554; rtspServer = createRTSPServer(rtspServerPortNum); } if (rtspServer == NULL) { *env << "Failed to create RTSP server: " << env->getResultMsg() << "\n"; exit(1); } // Create a proxy for each "rtsp://" URL specified on the command line: for (i = 1; i < argc; ++i) { char const* proxiedStreamURL = argv[i]; char streamName[30]; if (argc == 2) { sprintf(streamName, "%s", "proxyStream"); // there's just one stream; give it this name } else { sprintf(streamName, "proxyStream-%d", i); // there's more than one stream; distinguish them by name } ServerMediaSession* sms = ProxyServerMediaSession::createNew(*env, rtspServer, proxiedStreamURL, streamName, username, password, tunnelOverHTTPPortNum, verbosityLevel); rtspServer->addServerMediaSession(sms); char* proxyStreamURL = rtspServer->rtspURL(sms); *env << "RTSP stream, proxying the stream \"" << proxiedStreamURL << "\"\n"; *env << "\tPlay this stream using the URL: " << proxyStreamURL << "\n"; delete[] proxyStreamURL; } if (proxyREGISTERRequests) { *env << "(We handle incoming \"REGISTER\" requests on port " << rtspServerPortNum << ")\n"; } // Also, attempt to create a HTTP server for RTSP-over-HTTP tunneling. // Try first with the default HTTP port (80), and then with the alternative HTTP // port numbers (8000 and 8080). if (rtspServer->setUpTunnelingOverHTTP(80) || rtspServer->setUpTunnelingOverHTTP(8000) || rtspServer->setUpTunnelingOverHTTP(8080)) { *env << "\n(We use port " << rtspServer->httpServerPortNum() << " for optional RTSP-over-HTTP tunneling.)\n"; } else { *env << "\n(RTSP-over-HTTP tunneling is not available.)\n"; } // Now, enter the event loop: env->taskScheduler().doEventLoop(); // does not return return 0; // only to prevent compiler warning } live/BasicUsageEnvironment/include/000755 001751 000000 00000000000 12265042432 017617 5ustar00rsfwheel000000 000000 live/BasicUsageEnvironment/BasicTaskScheduler0.cpp000444 001751 000000 00000016530 12265042432 022466 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // Basic Usage Environment: for a simple, non-scripted, console application // Implementation #include "BasicUsageEnvironment0.hh" #include "HandlerSet.hh" ////////// A subclass of DelayQueueEntry, ////////// used to implement BasicTaskScheduler0::scheduleDelayedTask() class AlarmHandler: public DelayQueueEntry { public: AlarmHandler(TaskFunc* proc, void* clientData, DelayInterval timeToDelay) : DelayQueueEntry(timeToDelay), fProc(proc), fClientData(clientData) { } private: // redefined virtual functions virtual void handleTimeout() { (*fProc)(fClientData); DelayQueueEntry::handleTimeout(); } private: TaskFunc* fProc; void* fClientData; }; ////////// BasicTaskScheduler0 ////////// BasicTaskScheduler0::BasicTaskScheduler0() : fLastHandledSocketNum(-1), fTriggersAwaitingHandling(0), fLastUsedTriggerMask(1), fLastUsedTriggerNum(MAX_NUM_EVENT_TRIGGERS-1) { fHandlers = new HandlerSet; for (unsigned i = 0; i < MAX_NUM_EVENT_TRIGGERS; ++i) { fTriggeredEventHandlers[i] = NULL; fTriggeredEventClientDatas[i] = NULL; } } BasicTaskScheduler0::~BasicTaskScheduler0() { delete fHandlers; } TaskToken BasicTaskScheduler0::scheduleDelayedTask(int64_t microseconds, TaskFunc* proc, void* clientData) { if (microseconds < 0) microseconds = 0; DelayInterval timeToDelay((long)(microseconds/1000000), (long)(microseconds%1000000)); AlarmHandler* alarmHandler = new AlarmHandler(proc, clientData, timeToDelay); fDelayQueue.addEntry(alarmHandler); return (void*)(alarmHandler->token()); } void BasicTaskScheduler0::unscheduleDelayedTask(TaskToken& prevTask) { DelayQueueEntry* alarmHandler = fDelayQueue.removeEntry((intptr_t)prevTask); prevTask = NULL; delete alarmHandler; } void BasicTaskScheduler0::doEventLoop(char* watchVariable) { // Repeatedly loop, handling readble sockets and timed events: while (1) { if (watchVariable != NULL && *watchVariable != 0) break; SingleStep(); } } EventTriggerId BasicTaskScheduler0::createEventTrigger(TaskFunc* eventHandlerProc) { unsigned i = fLastUsedTriggerNum; EventTriggerId mask = fLastUsedTriggerMask; do { i = (i+1)%MAX_NUM_EVENT_TRIGGERS; mask >>= 1; if (mask == 0) mask = 0x80000000; if (fTriggeredEventHandlers[i] == NULL) { // This trigger number is free; use it: fTriggeredEventHandlers[i] = eventHandlerProc; fTriggeredEventClientDatas[i] = NULL; // sanity fLastUsedTriggerMask = mask; fLastUsedTriggerNum = i; return mask; } } while (i != fLastUsedTriggerNum); // All available event triggers are allocated; return 0 instead: return 0; } void BasicTaskScheduler0::deleteEventTrigger(EventTriggerId eventTriggerId) { fTriggersAwaitingHandling &=~ eventTriggerId; if (eventTriggerId == fLastUsedTriggerMask) { // common-case optimization: fTriggeredEventHandlers[fLastUsedTriggerNum] = NULL; fTriggeredEventClientDatas[fLastUsedTriggerNum] = NULL; } else { // "eventTriggerId" should have just one bit set. // However, we do the reasonable thing if the user happened to 'or' together two or more "EventTriggerId"s: EventTriggerId mask = 0x80000000; for (unsigned i = 0; i < MAX_NUM_EVENT_TRIGGERS; ++i) { if ((eventTriggerId&mask) != 0) { fTriggeredEventHandlers[i] = NULL; fTriggeredEventClientDatas[i] = NULL; } mask >>= 1; } } } void BasicTaskScheduler0::triggerEvent(EventTriggerId eventTriggerId, void* clientData) { // First, record the "clientData". (Note that we allow "eventTriggerId" to be a combination of bits for multiple events.) EventTriggerId mask = 0x80000000; for (unsigned i = 0; i < MAX_NUM_EVENT_TRIGGERS; ++i) { if ((eventTriggerId&mask) != 0) { fTriggeredEventClientDatas[i] = clientData; } mask >>= 1; } // Then, note this event as being ready to be handled. // (Note that because this function (unlike others in the library) can be called from an external thread, we do this last, to // reduce the risk of a race condition.) fTriggersAwaitingHandling |= eventTriggerId; } ////////// HandlerSet (etc.) implementation ////////// HandlerDescriptor::HandlerDescriptor(HandlerDescriptor* nextHandler) : conditionSet(0), handlerProc(NULL) { // Link this descriptor into a doubly-linked list: if (nextHandler == this) { // initialization fNextHandler = fPrevHandler = this; } else { fNextHandler = nextHandler; fPrevHandler = nextHandler->fPrevHandler; nextHandler->fPrevHandler = this; fPrevHandler->fNextHandler = this; } } HandlerDescriptor::~HandlerDescriptor() { // Unlink this descriptor from a doubly-linked list: fNextHandler->fPrevHandler = fPrevHandler; fPrevHandler->fNextHandler = fNextHandler; } HandlerSet::HandlerSet() : fHandlers(&fHandlers) { fHandlers.socketNum = -1; // shouldn't ever get looked at, but in case... } HandlerSet::~HandlerSet() { // Delete each handler descriptor: while (fHandlers.fNextHandler != &fHandlers) { delete fHandlers.fNextHandler; // changes fHandlers->fNextHandler } } void HandlerSet ::assignHandler(int socketNum, int conditionSet, TaskScheduler::BackgroundHandlerProc* handlerProc, void* clientData) { // First, see if there's already a handler for this socket: HandlerDescriptor* handler = lookupHandler(socketNum); if (handler == NULL) { // No existing handler, so create a new descr: handler = new HandlerDescriptor(fHandlers.fNextHandler); handler->socketNum = socketNum; } handler->conditionSet = conditionSet; handler->handlerProc = handlerProc; handler->clientData = clientData; } void HandlerSet::clearHandler(int socketNum) { HandlerDescriptor* handler = lookupHandler(socketNum); delete handler; } void HandlerSet::moveHandler(int oldSocketNum, int newSocketNum) { HandlerDescriptor* handler = lookupHandler(oldSocketNum); if (handler != NULL) { handler->socketNum = newSocketNum; } } HandlerDescriptor* HandlerSet::lookupHandler(int socketNum) { HandlerDescriptor* handler; HandlerIterator iter(*this); while ((handler = iter.next()) != NULL) { if (handler->socketNum == socketNum) break; } return handler; } HandlerIterator::HandlerIterator(HandlerSet& handlerSet) : fOurSet(handlerSet) { reset(); } HandlerIterator::~HandlerIterator() { } void HandlerIterator::reset() { fNextPtr = fOurSet.fHandlers.fNextHandler; } HandlerDescriptor* HandlerIterator::next() { HandlerDescriptor* result = fNextPtr; if (result == &fOurSet.fHandlers) { // no more result = NULL; } else { fNextPtr = fNextPtr->fNextHandler; } return result; } live/BasicUsageEnvironment/BasicHashTable.cpp000444 001751 000000 00000016760 12265042432 021505 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // Basic Hash Table implementation // Implementation #include "BasicHashTable.hh" #include "strDup.hh" #if defined(__WIN32__) || defined(_WIN32) #else #include #endif #include #include // When there are this many entries per bucket, on average, rebuild // the table to increase the number of buckets #define REBUILD_MULTIPLIER 3 BasicHashTable::BasicHashTable(int keyType) : fBuckets(fStaticBuckets), fNumBuckets(SMALL_HASH_TABLE_SIZE), fNumEntries(0), fRebuildSize(SMALL_HASH_TABLE_SIZE*REBUILD_MULTIPLIER), fDownShift(28), fMask(0x3), fKeyType(keyType) { for (unsigned i = 0; i < SMALL_HASH_TABLE_SIZE; ++i) { fStaticBuckets[i] = NULL; } } BasicHashTable::~BasicHashTable() { // Free all the entries in the table: for (unsigned i = 0; i < fNumBuckets; ++i) { TableEntry* entry; while ((entry = fBuckets[i]) != NULL) { deleteEntry(i, entry); } } // Also free the bucket array, if it was dynamically allocated: if (fBuckets != fStaticBuckets) delete[] fBuckets; } void* BasicHashTable::Add(char const* key, void* value) { void* oldValue; unsigned index; TableEntry* entry = lookupKey(key, index); if (entry != NULL) { // There's already an item with this key oldValue = entry->value; } else { // There's no existing entry; create a new one: entry = insertNewEntry(index, key); oldValue = NULL; } entry->value = value; // If the table has become too large, rebuild it with more buckets: if (fNumEntries >= fRebuildSize) rebuild(); return oldValue; } Boolean BasicHashTable::Remove(char const* key) { unsigned index; TableEntry* entry = lookupKey(key, index); if (entry == NULL) return False; // no such entry deleteEntry(index, entry); return True; } void* BasicHashTable::Lookup(char const* key) const { unsigned index; TableEntry* entry = lookupKey(key, index); if (entry == NULL) return NULL; // no such entry return entry->value; } unsigned BasicHashTable::numEntries() const { return fNumEntries; } BasicHashTable::Iterator::Iterator(BasicHashTable const& table) : fTable(table), fNextIndex(0), fNextEntry(NULL) { } void* BasicHashTable::Iterator::next(char const*& key) { while (fNextEntry == NULL) { if (fNextIndex >= fTable.fNumBuckets) return NULL; fNextEntry = fTable.fBuckets[fNextIndex++]; } BasicHashTable::TableEntry* entry = fNextEntry; fNextEntry = entry->fNext; key = entry->key; return entry->value; } ////////// Implementation of HashTable creation functions ////////// HashTable* HashTable::create(int keyType) { return new BasicHashTable(keyType); } HashTable::Iterator* HashTable::Iterator::create(HashTable const& hashTable) { // "hashTable" is assumed to be a BasicHashTable return new BasicHashTable::Iterator((BasicHashTable const&)hashTable); } ////////// Implementation of internal member functions ////////// BasicHashTable::TableEntry* BasicHashTable ::lookupKey(char const* key, unsigned& index) const { TableEntry* entry; index = hashIndexFromKey(key); for (entry = fBuckets[index]; entry != NULL; entry = entry->fNext) { if (keyMatches(key, entry->key)) break; } return entry; } Boolean BasicHashTable ::keyMatches(char const* key1, char const* key2) const { // The way we check the keys for a match depends upon their type: if (fKeyType == STRING_HASH_KEYS) { return (strcmp(key1, key2) == 0); } else if (fKeyType == ONE_WORD_HASH_KEYS) { return (key1 == key2); } else { unsigned* k1 = (unsigned*)key1; unsigned* k2 = (unsigned*)key2; for (int i = 0; i < fKeyType; ++i) { if (k1[i] != k2[i]) return False; // keys differ } return True; } } BasicHashTable::TableEntry* BasicHashTable ::insertNewEntry(unsigned index, char const* key) { TableEntry* entry = new TableEntry(); entry->fNext = fBuckets[index]; fBuckets[index] = entry; ++fNumEntries; assignKey(entry, key); return entry; } void BasicHashTable::assignKey(TableEntry* entry, char const* key) { // The way we assign the key depends upon its type: if (fKeyType == STRING_HASH_KEYS) { entry->key = strDup(key); } else if (fKeyType == ONE_WORD_HASH_KEYS) { entry->key = key; } else if (fKeyType > 0) { unsigned* keyFrom = (unsigned*)key; unsigned* keyTo = new unsigned[fKeyType]; for (int i = 0; i < fKeyType; ++i) keyTo[i] = keyFrom[i]; entry->key = (char const*)keyTo; } } void BasicHashTable::deleteEntry(unsigned index, TableEntry* entry) { TableEntry** ep = &fBuckets[index]; Boolean foundIt = False; while (*ep != NULL) { if (*ep == entry) { foundIt = True; *ep = entry->fNext; break; } ep = &((*ep)->fNext); } if (!foundIt) { // shouldn't happen #ifdef DEBUG fprintf(stderr, "BasicHashTable[%p]::deleteEntry(%d,%p): internal error - not found (first entry %p", this, index, entry, fBuckets[index]); if (fBuckets[index] != NULL) fprintf(stderr, ", next entry %p", fBuckets[index]->fNext); fprintf(stderr, ")\n"); #endif } --fNumEntries; deleteKey(entry); delete entry; } void BasicHashTable::deleteKey(TableEntry* entry) { // The way we delete the key depends upon its type: if (fKeyType == ONE_WORD_HASH_KEYS) { entry->key = NULL; } else { delete[] (char*)entry->key; entry->key = NULL; } } void BasicHashTable::rebuild() { // Remember the existing table size: unsigned oldSize = fNumBuckets; TableEntry** oldBuckets = fBuckets; // Create the new sized table: fNumBuckets *= 4; fBuckets = new TableEntry*[fNumBuckets]; for (unsigned i = 0; i < fNumBuckets; ++i) { fBuckets[i] = NULL; } fRebuildSize *= 4; fDownShift -= 2; fMask = (fMask<<2)|0x3; // Rehash the existing entries into the new table: for (TableEntry** oldChainPtr = oldBuckets; oldSize > 0; --oldSize, ++oldChainPtr) { for (TableEntry* hPtr = *oldChainPtr; hPtr != NULL; hPtr = *oldChainPtr) { *oldChainPtr = hPtr->fNext; unsigned index = hashIndexFromKey(hPtr->key); hPtr->fNext = fBuckets[index]; fBuckets[index] = hPtr; } } // Free the old bucket array, if it was dynamically allocated: if (oldBuckets != fStaticBuckets) delete[] oldBuckets; } unsigned BasicHashTable::hashIndexFromKey(char const* key) const { unsigned result = 0; if (fKeyType == STRING_HASH_KEYS) { while (1) { char c = *key++; if (c == 0) break; result += (result<<3) + (unsigned)c; } result &= fMask; } else if (fKeyType == ONE_WORD_HASH_KEYS) { result = randomIndex((uintptr_t)key); } else { unsigned* k = (unsigned*)key; uintptr_t sum = 0; for (int i = 0; i < fKeyType; ++i) { sum += k[i]; } result = randomIndex(sum); } return result; } live/BasicUsageEnvironment/Makefile.head000440 001751 000000 00000000246 12265042432 020530 0ustar00rsfwheel000000 000000 INCLUDES = -Iinclude -I../UsageEnvironment/include -I../groupsock/include PREFIX = /usr/local LIBDIR = $(PREFIX)/lib ##### Change the following for your environment: live/BasicUsageEnvironment/DelayQueue.cpp000444 001751 000000 00000014420 12265042432 020742 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // Copyright (c) 1996-2014, Live Networks, Inc. All rights reserved // Help by Carlo Bonamico to get working for Windows // Delay queue // Implementation #include "DelayQueue.hh" #include "GroupsockHelper.hh" static const int MILLION = 1000000; ///// Timeval ///// int Timeval::operator>=(const Timeval& arg2) const { return seconds() > arg2.seconds() || (seconds() == arg2.seconds() && useconds() >= arg2.useconds()); } void Timeval::operator+=(const DelayInterval& arg2) { secs() += arg2.seconds(); usecs() += arg2.useconds(); if (useconds() >= MILLION) { usecs() -= MILLION; ++secs(); } } void Timeval::operator-=(const DelayInterval& arg2) { secs() -= arg2.seconds(); usecs() -= arg2.useconds(); if ((int)useconds() < 0) { usecs() += MILLION; --secs(); } if ((int)seconds() < 0) secs() = usecs() = 0; } DelayInterval operator-(const Timeval& arg1, const Timeval& arg2) { time_base_seconds secs = arg1.seconds() - arg2.seconds(); time_base_seconds usecs = arg1.useconds() - arg2.useconds(); if ((int)usecs < 0) { usecs += MILLION; --secs; } if ((int)secs < 0) return DELAY_ZERO; else return DelayInterval(secs, usecs); } ///// DelayInterval ///// DelayInterval operator*(short arg1, const DelayInterval& arg2) { time_base_seconds result_seconds = arg1*arg2.seconds(); time_base_seconds result_useconds = arg1*arg2.useconds(); time_base_seconds carry = result_useconds/MILLION; result_useconds -= carry*MILLION; result_seconds += carry; return DelayInterval(result_seconds, result_useconds); } #ifndef INT_MAX #define INT_MAX 0x7FFFFFFF #endif const DelayInterval DELAY_ZERO(0, 0); const DelayInterval DELAY_SECOND(1, 0); const DelayInterval DELAY_MINUTE = 60*DELAY_SECOND; const DelayInterval DELAY_HOUR = 60*DELAY_MINUTE; const DelayInterval DELAY_DAY = 24*DELAY_HOUR; const DelayInterval ETERNITY(INT_MAX, MILLION-1); // used internally to make the implementation work ///// DelayQueueEntry ///// intptr_t DelayQueueEntry::tokenCounter = 0; DelayQueueEntry::DelayQueueEntry(DelayInterval delay) : fDeltaTimeRemaining(delay) { fNext = fPrev = this; fToken = ++tokenCounter; } DelayQueueEntry::~DelayQueueEntry() { } void DelayQueueEntry::handleTimeout() { delete this; } ///// DelayQueue ///// DelayQueue::DelayQueue() : DelayQueueEntry(ETERNITY) { fLastSyncTime = TimeNow(); } DelayQueue::~DelayQueue() { while (fNext != this) { DelayQueueEntry* entryToRemove = fNext; removeEntry(entryToRemove); delete entryToRemove; } } void DelayQueue::addEntry(DelayQueueEntry* newEntry) { synchronize(); DelayQueueEntry* cur = head(); while (newEntry->fDeltaTimeRemaining >= cur->fDeltaTimeRemaining) { newEntry->fDeltaTimeRemaining -= cur->fDeltaTimeRemaining; cur = cur->fNext; } cur->fDeltaTimeRemaining -= newEntry->fDeltaTimeRemaining; // Add "newEntry" to the queue, just before "cur": newEntry->fNext = cur; newEntry->fPrev = cur->fPrev; cur->fPrev = newEntry->fPrev->fNext = newEntry; } void DelayQueue::updateEntry(DelayQueueEntry* entry, DelayInterval newDelay) { if (entry == NULL) return; removeEntry(entry); entry->fDeltaTimeRemaining = newDelay; addEntry(entry); } void DelayQueue::updateEntry(intptr_t tokenToFind, DelayInterval newDelay) { DelayQueueEntry* entry = findEntryByToken(tokenToFind); updateEntry(entry, newDelay); } void DelayQueue::removeEntry(DelayQueueEntry* entry) { if (entry == NULL || entry->fNext == NULL) return; entry->fNext->fDeltaTimeRemaining += entry->fDeltaTimeRemaining; entry->fPrev->fNext = entry->fNext; entry->fNext->fPrev = entry->fPrev; entry->fNext = entry->fPrev = NULL; // in case we should try to remove it again } DelayQueueEntry* DelayQueue::removeEntry(intptr_t tokenToFind) { DelayQueueEntry* entry = findEntryByToken(tokenToFind); removeEntry(entry); return entry; } DelayInterval const& DelayQueue::timeToNextAlarm() { if (head()->fDeltaTimeRemaining == DELAY_ZERO) return DELAY_ZERO; // a common case synchronize(); return head()->fDeltaTimeRemaining; } void DelayQueue::handleAlarm() { if (head()->fDeltaTimeRemaining != DELAY_ZERO) synchronize(); if (head()->fDeltaTimeRemaining == DELAY_ZERO) { // This event is due to be handled: DelayQueueEntry* toRemove = head(); removeEntry(toRemove); // do this first, in case handler accesses queue toRemove->handleTimeout(); } } DelayQueueEntry* DelayQueue::findEntryByToken(intptr_t tokenToFind) { DelayQueueEntry* cur = head(); while (cur != this) { if (cur->token() == tokenToFind) return cur; cur = cur->fNext; } return NULL; } void DelayQueue::synchronize() { // First, figure out how much time has elapsed since the last sync: EventTime timeNow = TimeNow(); if (timeNow < fLastSyncTime) { // The system clock has apparently gone back in time; reset our sync time and return: fLastSyncTime = timeNow; return; } DelayInterval timeSinceLastSync = timeNow - fLastSyncTime; fLastSyncTime = timeNow; // Then, adjust the delay queue for any entries whose time is up: DelayQueueEntry* curEntry = head(); while (timeSinceLastSync >= curEntry->fDeltaTimeRemaining) { timeSinceLastSync -= curEntry->fDeltaTimeRemaining; curEntry->fDeltaTimeRemaining = DELAY_ZERO; curEntry = curEntry->fNext; } curEntry->fDeltaTimeRemaining -= timeSinceLastSync; } ///// EventTime ///// EventTime TimeNow() { struct timeval tvNow; gettimeofday(&tvNow, NULL); return EventTime(tvNow.tv_sec, tvNow.tv_usec); } const EventTime THE_END_OF_TIME(INT_MAX); live/BasicUsageEnvironment/BasicUsageEnvironment.cpp000444 001751 000000 00000004605 12265042432 023136 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // Basic Usage Environment: for a simple, non-scripted, console application // Implementation #include "BasicUsageEnvironment.hh" #include ////////// BasicUsageEnvironment ////////// #if defined(__WIN32__) || defined(_WIN32) extern "C" int initializeWinsockIfNecessary(); #endif BasicUsageEnvironment::BasicUsageEnvironment(TaskScheduler& taskScheduler) : BasicUsageEnvironment0(taskScheduler) { #if defined(__WIN32__) || defined(_WIN32) if (!initializeWinsockIfNecessary()) { setResultErrMsg("Failed to initialize 'winsock': "); reportBackgroundError(); internalError(); } #endif } BasicUsageEnvironment::~BasicUsageEnvironment() { } BasicUsageEnvironment* BasicUsageEnvironment::createNew(TaskScheduler& taskScheduler) { return new BasicUsageEnvironment(taskScheduler); } int BasicUsageEnvironment::getErrno() const { #if defined(__WIN32__) || defined(_WIN32) || defined(_WIN32_WCE) return WSAGetLastError(); #else return errno; #endif } UsageEnvironment& BasicUsageEnvironment::operator<<(char const* str) { if (str == NULL) str = "(NULL)"; // sanity check fprintf(stderr, "%s", str); return *this; } UsageEnvironment& BasicUsageEnvironment::operator<<(int i) { fprintf(stderr, "%d", i); return *this; } UsageEnvironment& BasicUsageEnvironment::operator<<(unsigned u) { fprintf(stderr, "%u", u); return *this; } UsageEnvironment& BasicUsageEnvironment::operator<<(double d) { fprintf(stderr, "%f", d); return *this; } UsageEnvironment& BasicUsageEnvironment::operator<<(void* p) { fprintf(stderr, "%p", p); return *this; } live/BasicUsageEnvironment/BasicTaskScheduler.cpp000444 001751 000000 00000023421 12265042432 022403 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // Basic Usage Environment: for a simple, non-scripted, console application // Implementation #include "BasicUsageEnvironment.hh" #include "HandlerSet.hh" #include #if defined(_QNX4) #include #include #endif ////////// BasicTaskScheduler ////////// BasicTaskScheduler* BasicTaskScheduler::createNew(unsigned maxSchedulerGranularity) { return new BasicTaskScheduler(maxSchedulerGranularity); } BasicTaskScheduler::BasicTaskScheduler(unsigned maxSchedulerGranularity) : fMaxSchedulerGranularity(maxSchedulerGranularity), fMaxNumSockets(0) { FD_ZERO(&fReadSet); FD_ZERO(&fWriteSet); FD_ZERO(&fExceptionSet); if (maxSchedulerGranularity > 0) schedulerTickTask(); // ensures that we handle events frequently } BasicTaskScheduler::~BasicTaskScheduler() { } void BasicTaskScheduler::schedulerTickTask(void* clientData) { ((BasicTaskScheduler*)clientData)->schedulerTickTask(); } void BasicTaskScheduler::schedulerTickTask() { scheduleDelayedTask(fMaxSchedulerGranularity, schedulerTickTask, this); } #ifndef MILLION #define MILLION 1000000 #endif void BasicTaskScheduler::SingleStep(unsigned maxDelayTime) { fd_set readSet = fReadSet; // make a copy for this select() call fd_set writeSet = fWriteSet; // ditto fd_set exceptionSet = fExceptionSet; // ditto DelayInterval const& timeToDelay = fDelayQueue.timeToNextAlarm(); struct timeval tv_timeToDelay; tv_timeToDelay.tv_sec = timeToDelay.seconds(); tv_timeToDelay.tv_usec = timeToDelay.useconds(); // Very large "tv_sec" values cause select() to fail. // Don't make it any larger than 1 million seconds (11.5 days) const long MAX_TV_SEC = MILLION; if (tv_timeToDelay.tv_sec > MAX_TV_SEC) { tv_timeToDelay.tv_sec = MAX_TV_SEC; } // Also check our "maxDelayTime" parameter (if it's > 0): if (maxDelayTime > 0 && (tv_timeToDelay.tv_sec > (long)maxDelayTime/MILLION || (tv_timeToDelay.tv_sec == (long)maxDelayTime/MILLION && tv_timeToDelay.tv_usec > (long)maxDelayTime%MILLION))) { tv_timeToDelay.tv_sec = maxDelayTime/MILLION; tv_timeToDelay.tv_usec = maxDelayTime%MILLION; } int selectResult = select(fMaxNumSockets, &readSet, &writeSet, &exceptionSet, &tv_timeToDelay); if (selectResult < 0) { #if defined(__WIN32__) || defined(_WIN32) int err = WSAGetLastError(); // For some unknown reason, select() in Windoze sometimes fails with WSAEINVAL if // it was called with no entries set in "readSet". If this happens, ignore it: if (err == WSAEINVAL && readSet.fd_count == 0) { err = EINTR; // To stop this from happening again, create a dummy socket: int dummySocketNum = socket(AF_INET, SOCK_DGRAM, 0); FD_SET((unsigned)dummySocketNum, &fReadSet); } if (err != EINTR) { #else if (errno != EINTR && errno != EAGAIN) { #endif // Unexpected error - treat this as fatal: #if !defined(_WIN32_WCE) perror("BasicTaskScheduler::SingleStep(): select() fails"); // Because this failure is often "Bad file descriptor" - which is caused by an invalid socket number (i.e., a socket number // that had already been closed) being used in "select()" - we print out the sockets that were being used in "select()", // to assist in debugging: fprintf(stderr, "socket numbers used in the select() call:"); for (int i = 0; i < 10000; ++i) { if (FD_ISSET(i, &fReadSet) || FD_ISSET(i, &fWriteSet) || FD_ISSET(i, &fExceptionSet)) { fprintf(stderr, " %d(", i); if (FD_ISSET(i, &fReadSet)) fprintf(stderr, "r"); if (FD_ISSET(i, &fWriteSet)) fprintf(stderr, "w"); if (FD_ISSET(i, &fExceptionSet)) fprintf(stderr, "e"); fprintf(stderr, ")"); } } fprintf(stderr, "\n"); #endif internalError(); } } // Call the handler function for one readable socket: HandlerIterator iter(*fHandlers); HandlerDescriptor* handler; // To ensure forward progress through the handlers, begin past the last // socket number that we handled: if (fLastHandledSocketNum >= 0) { while ((handler = iter.next()) != NULL) { if (handler->socketNum == fLastHandledSocketNum) break; } if (handler == NULL) { fLastHandledSocketNum = -1; iter.reset(); // start from the beginning instead } } while ((handler = iter.next()) != NULL) { int sock = handler->socketNum; // alias int resultConditionSet = 0; if (FD_ISSET(sock, &readSet) && FD_ISSET(sock, &fReadSet)/*sanity check*/) resultConditionSet |= SOCKET_READABLE; if (FD_ISSET(sock, &writeSet) && FD_ISSET(sock, &fWriteSet)/*sanity check*/) resultConditionSet |= SOCKET_WRITABLE; if (FD_ISSET(sock, &exceptionSet) && FD_ISSET(sock, &fExceptionSet)/*sanity check*/) resultConditionSet |= SOCKET_EXCEPTION; if ((resultConditionSet&handler->conditionSet) != 0 && handler->handlerProc != NULL) { fLastHandledSocketNum = sock; // Note: we set "fLastHandledSocketNum" before calling the handler, // in case the handler calls "doEventLoop()" reentrantly. (*handler->handlerProc)(handler->clientData, resultConditionSet); break; } } if (handler == NULL && fLastHandledSocketNum >= 0) { // We didn't call a handler, but we didn't get to check all of them, // so try again from the beginning: iter.reset(); while ((handler = iter.next()) != NULL) { int sock = handler->socketNum; // alias int resultConditionSet = 0; if (FD_ISSET(sock, &readSet) && FD_ISSET(sock, &fReadSet)/*sanity check*/) resultConditionSet |= SOCKET_READABLE; if (FD_ISSET(sock, &writeSet) && FD_ISSET(sock, &fWriteSet)/*sanity check*/) resultConditionSet |= SOCKET_WRITABLE; if (FD_ISSET(sock, &exceptionSet) && FD_ISSET(sock, &fExceptionSet)/*sanity check*/) resultConditionSet |= SOCKET_EXCEPTION; if ((resultConditionSet&handler->conditionSet) != 0 && handler->handlerProc != NULL) { fLastHandledSocketNum = sock; // Note: we set "fLastHandledSocketNum" before calling the handler, // in case the handler calls "doEventLoop()" reentrantly. (*handler->handlerProc)(handler->clientData, resultConditionSet); break; } } if (handler == NULL) fLastHandledSocketNum = -1;//because we didn't call a handler } // Also handle any newly-triggered event (Note that we do this *after* calling a socket handler, // in case the triggered event handler modifies The set of readable sockets.) if (fTriggersAwaitingHandling != 0) { if (fTriggersAwaitingHandling == fLastUsedTriggerMask) { // Common-case optimization for a single event trigger: fTriggersAwaitingHandling = 0; if (fTriggeredEventHandlers[fLastUsedTriggerNum] != NULL) { (*fTriggeredEventHandlers[fLastUsedTriggerNum])(fTriggeredEventClientDatas[fLastUsedTriggerNum]); } } else { // Look for an event trigger that needs handling (making sure that we make forward progress through all possible triggers): unsigned i = fLastUsedTriggerNum; EventTriggerId mask = fLastUsedTriggerMask; do { i = (i+1)%MAX_NUM_EVENT_TRIGGERS; mask >>= 1; if (mask == 0) mask = 0x80000000; if ((fTriggersAwaitingHandling&mask) != 0) { fTriggersAwaitingHandling &=~ mask; if (fTriggeredEventHandlers[i] != NULL) { (*fTriggeredEventHandlers[i])(fTriggeredEventClientDatas[i]); } fLastUsedTriggerMask = mask; fLastUsedTriggerNum = i; break; } } while (i != fLastUsedTriggerNum); } } // Also handle any delayed event that may have come due. fDelayQueue.handleAlarm(); } void BasicTaskScheduler ::setBackgroundHandling(int socketNum, int conditionSet, BackgroundHandlerProc* handlerProc, void* clientData) { if (socketNum < 0) return; FD_CLR((unsigned)socketNum, &fReadSet); FD_CLR((unsigned)socketNum, &fWriteSet); FD_CLR((unsigned)socketNum, &fExceptionSet); if (conditionSet == 0) { fHandlers->clearHandler(socketNum); if (socketNum+1 == fMaxNumSockets) { --fMaxNumSockets; } } else { fHandlers->assignHandler(socketNum, conditionSet, handlerProc, clientData); if (socketNum+1 > fMaxNumSockets) { fMaxNumSockets = socketNum+1; } if (conditionSet&SOCKET_READABLE) FD_SET((unsigned)socketNum, &fReadSet); if (conditionSet&SOCKET_WRITABLE) FD_SET((unsigned)socketNum, &fWriteSet); if (conditionSet&SOCKET_EXCEPTION) FD_SET((unsigned)socketNum, &fExceptionSet); } } void BasicTaskScheduler::moveSocketHandling(int oldSocketNum, int newSocketNum) { if (oldSocketNum < 0 || newSocketNum < 0) return; // sanity check if (FD_ISSET(oldSocketNum, &fReadSet)) {FD_CLR((unsigned)oldSocketNum, &fReadSet); FD_SET((unsigned)newSocketNum, &fReadSet);} if (FD_ISSET(oldSocketNum, &fWriteSet)) {FD_CLR((unsigned)oldSocketNum, &fWriteSet); FD_SET((unsigned)newSocketNum, &fWriteSet);} if (FD_ISSET(oldSocketNum, &fExceptionSet)) {FD_CLR((unsigned)oldSocketNum, &fExceptionSet); FD_SET((unsigned)newSocketNum, &fExceptionSet);} fHandlers->moveHandler(oldSocketNum, newSocketNum); if (oldSocketNum+1 == fMaxNumSockets) { --fMaxNumSockets; } if (newSocketNum+1 > fMaxNumSockets) { fMaxNumSockets = newSocketNum+1; } } live/BasicUsageEnvironment/COPYING000755 001751 000000 00000000000 12265042432 020466 2../COPYINGustar00rsfwheel000000 000000 live/BasicUsageEnvironment/Makefile.tail000444 001751 000000 00000003247 12265042432 020570 0ustar00rsfwheel000000 000000 ##### End of variables to change NAME = libBasicUsageEnvironment LIB = $(NAME).$(LIB_SUFFIX) ALL = $(LIB) all: $(ALL) OBJS = BasicUsageEnvironment0.$(OBJ) BasicUsageEnvironment.$(OBJ) \ BasicTaskScheduler0.$(OBJ) BasicTaskScheduler.$(OBJ) \ DelayQueue.$(OBJ) BasicHashTable.$(OBJ) libBasicUsageEnvironment.$(LIB_SUFFIX): $(OBJS) $(LIBRARY_LINK)$@ $(LIBRARY_LINK_OPTS) \ $(OBJS) .$(C).$(OBJ): $(C_COMPILER) -c $(C_FLAGS) $< .$(CPP).$(OBJ): $(CPLUSPLUS_COMPILER) -c $(CPLUSPLUS_FLAGS) $< BasicUsageEnvironment0.$(CPP): include/BasicUsageEnvironment0.hh include/BasicUsageEnvironment0.hh: include/BasicUsageEnvironment_version.hh include/DelayQueue.hh BasicUsageEnvironment.$(CPP): include/BasicUsageEnvironment.hh include/BasicUsageEnvironment.hh: include/BasicUsageEnvironment0.hh BasicTaskScheduler0.$(CPP): include/BasicUsageEnvironment0.hh include/HandlerSet.hh BasicTaskScheduler.$(CPP): include/BasicUsageEnvironment.hh include/HandlerSet.hh DelayQueue.$(CPP): include/DelayQueue.hh BasicHashTable.$(CPP): include/BasicHashTable.hh clean: -rm -rf *.$(OBJ) $(ALL) core *.core *~ include/*~ install: install1 $(INSTALL2) install1: libBasicUsageEnvironment.$(LIB_SUFFIX) install -d $(DESTDIR)$(PREFIX)/include/BasicUsageEnvironment $(DESTDIR)$(LIBDIR) install -m 644 include/*.hh $(DESTDIR)$(PREFIX)/include/BasicUsageEnvironment install -m 644 libBasicUsageEnvironment.$(LIB_SUFFIX) $(DESTDIR)$(LIBDIR) install_shared_libraries: libBasicUsageEnvironment.$(LIB_SUFFIX) ln -s $(NAME).$(LIB_SUFFIX) $(DESTDIR)$(LIBDIR)/$(NAME).$(SHORT_LIB_SUFFIX) ln -s $(NAME).$(LIB_SUFFIX) $(DESTDIR)$(LIBDIR)/$(NAME).so ##### Any additional, platform-specific rules come here: live/BasicUsageEnvironment/BasicUsageEnvironment0.cpp000444 001751 000000 00000005076 12265042432 023221 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // Basic Usage Environment: for a simple, non-scripted, console application // Implementation #include "BasicUsageEnvironment0.hh" #include ////////// BasicUsageEnvironment ////////// BasicUsageEnvironment0::BasicUsageEnvironment0(TaskScheduler& taskScheduler) : UsageEnvironment(taskScheduler), fBufferMaxSize(RESULT_MSG_BUFFER_MAX) { reset(); } BasicUsageEnvironment0::~BasicUsageEnvironment0() { } void BasicUsageEnvironment0::reset() { fCurBufferSize = 0; fResultMsgBuffer[fCurBufferSize] = '\0'; } // Implementation of virtual functions: char const* BasicUsageEnvironment0::getResultMsg() const { return fResultMsgBuffer; } void BasicUsageEnvironment0::setResultMsg(MsgString msg) { reset(); appendToResultMsg(msg); } void BasicUsageEnvironment0::setResultMsg(MsgString msg1, MsgString msg2) { setResultMsg(msg1); appendToResultMsg(msg2); } void BasicUsageEnvironment0::setResultMsg(MsgString msg1, MsgString msg2, MsgString msg3) { setResultMsg(msg1, msg2); appendToResultMsg(msg3); } void BasicUsageEnvironment0::setResultErrMsg(MsgString msg, int err) { setResultMsg(msg); #ifndef _WIN32_WCE appendToResultMsg(strerror(err == 0 ? getErrno() : err)); #endif } void BasicUsageEnvironment0::appendToResultMsg(MsgString msg) { char* curPtr = &fResultMsgBuffer[fCurBufferSize]; unsigned spaceAvailable = fBufferMaxSize - fCurBufferSize; unsigned msgLength = strlen(msg); // Copy only enough of "msg" as will fit: if (msgLength > spaceAvailable-1) { msgLength = spaceAvailable-1; } memmove(curPtr, (char*)msg, msgLength); fCurBufferSize += msgLength; fResultMsgBuffer[fCurBufferSize] = '\0'; } void BasicUsageEnvironment0::reportBackgroundError() { fputs(getResultMsg(), stderr); } live/BasicUsageEnvironment/include/BasicHashTable.hh000444 001751 000000 00000006623 12265042432 022742 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // Basic Hash Table implementation // C++ header #ifndef _BASIC_HASH_TABLE_HH #define _BASIC_HASH_TABLE_HH #ifndef _HASH_TABLE_HH #include "HashTable.hh" #endif #ifndef _NET_COMMON_H #include // to ensure that "uintptr_t" is defined #endif // A simple hash table implementation, inspired by the hash table // implementation used in Tcl 7.6: #define SMALL_HASH_TABLE_SIZE 4 class BasicHashTable: public HashTable { private: class TableEntry; // forward public: BasicHashTable(int keyType); virtual ~BasicHashTable(); // Used to iterate through the members of the table: class Iterator; friend class Iterator; // to make Sun's C++ compiler happy class Iterator: public HashTable::Iterator { public: Iterator(BasicHashTable const& table); private: // implementation of inherited pure virtual functions void* next(char const*& key); // returns 0 if none private: BasicHashTable const& fTable; unsigned fNextIndex; // index of next bucket to be enumerated after this TableEntry* fNextEntry; // next entry in the current bucket }; private: // implementation of inherited pure virtual functions virtual void* Add(char const* key, void* value); // Returns the old value if different, otherwise 0 virtual Boolean Remove(char const* key); virtual void* Lookup(char const* key) const; // Returns 0 if not found virtual unsigned numEntries() const; private: class TableEntry { public: TableEntry* fNext; char const* key; void* value; }; TableEntry* lookupKey(char const* key, unsigned& index) const; // returns entry matching "key", or NULL if none Boolean keyMatches(char const* key1, char const* key2) const; // used to implement "lookupKey()" TableEntry* insertNewEntry(unsigned index, char const* key); // creates a new entry, and inserts it in the table void assignKey(TableEntry* entry, char const* key); // used to implement "insertNewEntry()" void deleteEntry(unsigned index, TableEntry* entry); void deleteKey(TableEntry* entry); // used to implement "deleteEntry()" void rebuild(); // rebuilds the table as its size increases unsigned hashIndexFromKey(char const* key) const; // used to implement many of the routines above unsigned randomIndex(uintptr_t i) const { return (unsigned)(((i*1103515245) >> fDownShift) & fMask); } private: TableEntry** fBuckets; // pointer to bucket array TableEntry* fStaticBuckets[SMALL_HASH_TABLE_SIZE];// used for small tables unsigned fNumBuckets, fNumEntries, fRebuildSize, fDownShift, fMask; int fKeyType; }; #endif live/BasicUsageEnvironment/include/BasicUsageEnvironment.hh000444 001751 000000 00000005744 12265042432 024403 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // Basic Usage Environment: for a simple, non-scripted, console application // C++ header #ifndef _BASIC_USAGE_ENVIRONMENT_HH #define _BASIC_USAGE_ENVIRONMENT_HH #ifndef _BASIC_USAGE_ENVIRONMENT0_HH #include "BasicUsageEnvironment0.hh" #endif class BasicUsageEnvironment: public BasicUsageEnvironment0 { public: static BasicUsageEnvironment* createNew(TaskScheduler& taskScheduler); // redefined virtual functions: virtual int getErrno() const; virtual UsageEnvironment& operator<<(char const* str); virtual UsageEnvironment& operator<<(int i); virtual UsageEnvironment& operator<<(unsigned u); virtual UsageEnvironment& operator<<(double d); virtual UsageEnvironment& operator<<(void* p); protected: BasicUsageEnvironment(TaskScheduler& taskScheduler); // called only by "createNew()" (or subclass constructors) virtual ~BasicUsageEnvironment(); }; class BasicTaskScheduler: public BasicTaskScheduler0 { public: static BasicTaskScheduler* createNew(unsigned maxSchedulerGranularity = 10000/*microseconds*/); // "maxSchedulerGranularity" (default value: 10 ms) specifies the maximum time that we wait (in "select()") before // returning to the event loop to handle non-socket or non-timer-based events, such as 'triggered events'. // You can change this is you wish (but only if you know what you're doing!), or set it to 0, to specify no such maximum time. // (You should set it to 0 only if you know that you will not be using 'event triggers'.) virtual ~BasicTaskScheduler(); protected: BasicTaskScheduler(unsigned maxSchedulerGranularity); // called only by "createNew()" static void schedulerTickTask(void* clientData); void schedulerTickTask(); protected: // Redefined virtual functions: virtual void SingleStep(unsigned maxDelayTime); virtual void setBackgroundHandling(int socketNum, int conditionSet, BackgroundHandlerProc* handlerProc, void* clientData); virtual void moveSocketHandling(int oldSocketNum, int newSocketNum); protected: unsigned fMaxSchedulerGranularity; // To implement background operations: int fMaxNumSockets; fd_set fReadSet; fd_set fWriteSet; fd_set fExceptionSet; }; #endif live/BasicUsageEnvironment/include/DelayQueue.hh000444 001751 000000 00000011052 12265042432 022200 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // Copyright (c) 1996-2014, Live Networks, Inc. All rights reserved // Delay queue // C++ header #ifndef _DELAY_QUEUE_HH #define _DELAY_QUEUE_HH #ifndef _NET_COMMON_H #include "NetCommon.h" #endif #ifdef TIME_BASE typedef TIME_BASE time_base_seconds; #else typedef long time_base_seconds; #endif ///// A "Timeval" can be either an absolute time, or a time interval ///// class Timeval { public: time_base_seconds seconds() const { return fTv.tv_sec; } time_base_seconds seconds() { return fTv.tv_sec; } time_base_seconds useconds() const { return fTv.tv_usec; } time_base_seconds useconds() { return fTv.tv_usec; } int operator>=(Timeval const& arg2) const; int operator<=(Timeval const& arg2) const { return arg2 >= *this; } int operator<(Timeval const& arg2) const { return !(*this >= arg2); } int operator>(Timeval const& arg2) const { return arg2 < *this; } int operator==(Timeval const& arg2) const { return *this >= arg2 && arg2 >= *this; } int operator!=(Timeval const& arg2) const { return !(*this == arg2); } void operator+=(class DelayInterval const& arg2); void operator-=(class DelayInterval const& arg2); // returns ZERO iff arg2 >= arg1 protected: Timeval(time_base_seconds seconds, time_base_seconds useconds) { fTv.tv_sec = seconds; fTv.tv_usec = useconds; } private: time_base_seconds& secs() { return (time_base_seconds&)fTv.tv_sec; } time_base_seconds& usecs() { return (time_base_seconds&)fTv.tv_usec; } struct timeval fTv; }; #ifndef max inline Timeval max(Timeval const& arg1, Timeval const& arg2) { return arg1 >= arg2 ? arg1 : arg2; } #endif #ifndef min inline Timeval min(Timeval const& arg1, Timeval const& arg2) { return arg1 <= arg2 ? arg1 : arg2; } #endif class DelayInterval operator-(Timeval const& arg1, Timeval const& arg2); // returns ZERO iff arg2 >= arg1 ///// DelayInterval ///// class DelayInterval: public Timeval { public: DelayInterval(time_base_seconds seconds, time_base_seconds useconds) : Timeval(seconds, useconds) {} }; DelayInterval operator*(short arg1, DelayInterval const& arg2); extern DelayInterval const DELAY_ZERO; extern DelayInterval const DELAY_SECOND; extern DelayInterval const DELAY_MINUTE; extern DelayInterval const DELAY_HOUR; extern DelayInterval const DELAY_DAY; ///// EventTime ///// class EventTime: public Timeval { public: EventTime(unsigned secondsSinceEpoch = 0, unsigned usecondsSinceEpoch = 0) // We use the Unix standard epoch: January 1, 1970 : Timeval(secondsSinceEpoch, usecondsSinceEpoch) {} }; EventTime TimeNow(); extern EventTime const THE_END_OF_TIME; ///// DelayQueueEntry ///// class DelayQueueEntry { public: virtual ~DelayQueueEntry(); intptr_t token() { return fToken; } protected: // abstract base class DelayQueueEntry(DelayInterval delay); virtual void handleTimeout(); private: friend class DelayQueue; DelayQueueEntry* fNext; DelayQueueEntry* fPrev; DelayInterval fDeltaTimeRemaining; intptr_t fToken; static intptr_t tokenCounter; }; ///// DelayQueue ///// class DelayQueue: public DelayQueueEntry { public: DelayQueue(); virtual ~DelayQueue(); void addEntry(DelayQueueEntry* newEntry); // returns a token for the entry void updateEntry(DelayQueueEntry* entry, DelayInterval newDelay); void updateEntry(intptr_t tokenToFind, DelayInterval newDelay); void removeEntry(DelayQueueEntry* entry); // but doesn't delete it DelayQueueEntry* removeEntry(intptr_t tokenToFind); // but doesn't delete it DelayInterval const& timeToNextAlarm(); void handleAlarm(); private: DelayQueueEntry* head() { return fNext; } DelayQueueEntry* findEntryByToken(intptr_t token); void synchronize(); // bring the 'time remaining' fields up-to-date EventTime fLastSyncTime; }; #endif live/BasicUsageEnvironment/include/BasicUsageEnvironment_version.hh000444 001751 000000 00000000542 12265042432 026137 0ustar00rsfwheel000000 000000 // Version information for the "BasicUsageEnvironment" library // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. #ifndef _BASICUSAGEENVIRONMENT_VERSION_HH #define _BASICUSAGEENVIRONMENT_VERSION_HH #define BASICUSAGEENVIRONMENT_LIBRARY_VERSION_STRING "2014.01.13" #define BASICUSAGEENVIRONMENT_LIBRARY_VERSION_INT 1389571200 #endif live/BasicUsageEnvironment/include/HandlerSet.hh000444 001751 000000 00000004223 12265042432 022170 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // Basic Usage Environment: for a simple, non-scripted, console application // C++ header #ifndef _HANDLER_SET_HH #define _HANDLER_SET_HH #ifndef _BOOLEAN_HH #include "Boolean.hh" #endif ////////// HandlerSet (etc.) definition ////////// class HandlerDescriptor { HandlerDescriptor(HandlerDescriptor* nextHandler); virtual ~HandlerDescriptor(); public: int socketNum; int conditionSet; TaskScheduler::BackgroundHandlerProc* handlerProc; void* clientData; private: // Descriptors are linked together in a doubly-linked list: friend class HandlerSet; friend class HandlerIterator; HandlerDescriptor* fNextHandler; HandlerDescriptor* fPrevHandler; }; class HandlerSet { public: HandlerSet(); virtual ~HandlerSet(); void assignHandler(int socketNum, int conditionSet, TaskScheduler::BackgroundHandlerProc* handlerProc, void* clientData); void clearHandler(int socketNum); void moveHandler(int oldSocketNum, int newSocketNum); private: HandlerDescriptor* lookupHandler(int socketNum); private: friend class HandlerIterator; HandlerDescriptor fHandlers; }; class HandlerIterator { public: HandlerIterator(HandlerSet& handlerSet); virtual ~HandlerIterator(); HandlerDescriptor* next(); // returns NULL if none void reset(); private: HandlerSet& fOurSet; HandlerDescriptor* fNextPtr; }; #endif live/BasicUsageEnvironment/include/BasicUsageEnvironment0.hh000444 001751 000000 00000007200 12265042432 024450 0ustar00rsfwheel000000 000000 /********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. (See .) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved. // Basic Usage Environment: for a simple, non-scripted, console application // C++ header #ifndef _BASIC_USAGE_ENVIRONMENT0_HH #define _BASIC_USAGE_ENVIRONMENT0_HH #ifndef _BASICUSAGEENVIRONMENT_VERSION_HH #include "BasicUsageEnvironment_version.hh" #endif #ifndef _USAGE_ENVIRONMENT_HH #include "UsageEnvironment.hh" #endif #ifndef _DELAY_QUEUE_HH #include "DelayQueue.hh" #endif #define RESULT_MSG_BUFFER_MAX 1000 // An abstract base class, useful for subclassing // (e.g., to redefine the implementation of "operator<<") class BasicUsageEnvironment0: public UsageEnvironment { public: // redefined virtual functions: virtual MsgString getResultMsg() const; virtual void setResultMsg(MsgString msg); virtual void setResultMsg(MsgString msg1, MsgString msg2); virtual void setResultMsg(MsgString msg1, MsgString msg2, MsgString msg3); virtual void setResultErrMsg(MsgString msg, int err = 0); virtual void appendToResultMsg(MsgString msg); virtual void reportBackgroundError(); protected: BasicUsageEnvironment0(TaskScheduler& taskScheduler); virtual ~BasicUsageEnvironment0(); private: void reset(); char fResultMsgBuffer[RESULT_MSG_BUFFER_MAX]; unsigned fCurBufferSize; unsigned fBufferMaxSize; }; class HandlerSet; // forward #define MAX_NUM_EVENT_TRIGGERS 32 // An abstract base class, useful for subclassing // (e.g., to redefine the implementation of socket event handling) class BasicTaskScheduler0: public TaskScheduler { public: virtual ~BasicTaskScheduler0(); virtual void SingleStep(unsigned maxDelayTime = 0) = 0; // "maxDelayTime" is in microseconds. It allows a subclass to impose a limit // on how long "select()" can delay, in case it wants to also do polling. // 0 (the default value) means: There's no maximum; just look at the delay queue public: // Redefined virtual functions: virtual TaskToken scheduleDelayedTask(int64_t microseconds, TaskFunc* proc, void* clientData); virtual void unscheduleDelayedTask(TaskToken& prevTask); virtual void doEventLoop(char* watchVariable); virtual EventTriggerId createEventTrigger(TaskFunc* eventHandlerProc); virtual void deleteEventTrigger(EventTriggerId eventTriggerId); virtual void triggerEvent(EventTriggerId eventTriggerId, void* clientData = NULL); protected: BasicTaskScheduler0(); protected: // To implement delayed operations: DelayQueue fDelayQueue; // To implement background reads: HandlerSet* fHandlers; int fLastHandledSocketNum; // To implement event triggers: EventTriggerId fTriggersAwaitingHandling, fLastUsedTriggerMask; // implemented as 32-bit bitmaps TaskFunc* fTriggeredEventHandlers[MAX_NUM_EVENT_TRIGGERS]; void* fTriggeredEventClientDatas[MAX_NUM_EVENT_TRIGGERS]; unsigned fLastUsedTriggerNum; // in the range [0,MAX_NUM_EVENT_TRIGGERS) }; #endif