live/ 000755 001751 000000 00000000000 12265042525 011744 5 ustar 00rsf wheel 000000 000000 live/BasicUsageEnvironment/ 000755 001751 000000 00000000000 12265042432 016174 5 ustar 00rsf wheel 000000 000000 live/proxyServer/ 000755 001751 000000 00000000000 12265042432 014311 5 ustar 00rsf wheel 000000 000000 live/mediaServer/ 000755 001751 000000 00000000000 12265042432 014207 5 ustar 00rsf wheel 000000 000000 live/testProgs/ 000755 001751 000000 00000000000 12265042432 013733 5 ustar 00rsf wheel 000000 000000 live/WindowsAudioInputDevice/ 000755 001751 000000 00000000000 12265042432 016515 5 ustar 00rsf wheel 000000 000000 live/UsageEnvironment/ 000755 001751 000000 00000000000 12265042432 015232 5 ustar 00rsf wheel 000000 000000 live/groupsock/ 000755 001751 000000 00000000000 12265042432 013755 5 ustar 00rsf wheel 000000 000000 live/liveMedia/ 000755 001751 000000 00000000000 12265042432 013640 5 ustar 00rsf wheel 000000 000000 live/configure 000551 001751 000000 00000000571 12265042432 013645 0 ustar 00rsf wheel 000000 000000 #!/bin/sh
echo "Whoa! This software distribution does NOT use the normal Unix \"configure\" mechanism for generating a Makefile. For instructions on how to build this software, see ."
echo "Also, please make sure that you're using the most up-to-date version of the source code - available from ."
live/config.aix 000444 001751 000000 00000000661 12265042525 013715 0 ustar 00rsf wheel 000000 000000 COMPILE_OPTS = $(INCLUDES) -I. -DBSD=1 -O -DTIME_BASE=int -DSOCKLEN_T=socklen_t
C = c
C_COMPILER = cc
C_FLAGS = $(COMPILE_OPTS)
CPP = cpp
CPLUSPLUS_COMPILER = c++
CPLUSPLUS_FLAGS = $(COMPILE_OPTS) -Wall -DAIX=1
OBJ = o
LINK = c++ -o
LINK_OPTS = -L.
CONSOLE_LINK_OPTS = $(LINK_OPTS)
LIBRARY_LINK = ld -o
LIBRARY_LINK_OPTS = $(LINK_OPTS) -r
LIB_SUFFIX = a
LIBS_FOR_CONSOLE_APPLICATION =
LIBS_FOR_GUI_APPLICATION =
EXE =
live/COPYING 000444 001751 000000 00000057505 12265042525 013011 0 ustar 00rsf wheel 000000 000000 GNU LESSER GENERAL PUBLIC LICENSE
Version 2.1, February 1999
Copyright (C) 1991, 1999 Free Software Foundation, Inc.
59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
[This is the first released version of the Lesser GPL. It also counts
as the successor of the GNU Library Public License, version 2, hence
the version number 2.1.]
Preamble
The licenses for most software are designed to take away your
freedom to share and change it. By contrast, the GNU General Public
Licenses are intended to guarantee your freedom to share and change
free software--to make sure the software is free for all its users.
This license, the Lesser General Public License, applies to some
specially designated software packages--typically libraries--of the
Free Software Foundation and other authors who decide to use it. You
can use it too, but we suggest you first think carefully about whether
this license or the ordinary General Public License is the better
strategy to use in any particular case, based on the explanations below.
When we speak of free software, we are referring to freedom of use,
not price. Our General Public Licenses are designed to make sure that
you have the freedom to distribute copies of free software (and charge
for this service if you wish); that you receive source code or can get
it if you want it; that you can change the software and use pieces of
it in new free programs; and that you are informed that you can do
these things.
To protect your rights, we need to make restrictions that forbid
distributors to deny you these rights or to ask you to surrender these
rights. These restrictions translate to certain responsibilities for
you if you distribute copies of the library or if you modify it.
For example, if you distribute copies of the library, whether gratis
or for a fee, you must give the recipients all the rights that we gave
you. You must make sure that they, too, receive or can get the source
code. If you link other code with the library, you must provide
complete object files to the recipients, so that they can relink them
with the library after making changes to the library and recompiling
it. And you must show them these terms so they know their rights.
We protect your rights with a two-step method: (1) we copyright the
library, and (2) we offer you this license, which gives you legal
permission to copy, distribute and/or modify the library.
To protect each distributor, we want to make it very clear that
there is no warranty for the free library. Also, if the library is
modified by someone else and passed on, the recipients should know
that what they have is not the original version, so that the original
author's reputation will not be affected by problems that might be
introduced by others.
Finally, software patents pose a constant threat to the existence of
any free program. We wish to make sure that a company cannot
effectively restrict the users of a free program by obtaining a
restrictive license from a patent holder. Therefore, we insist that
any patent license obtained for a version of the library must be
consistent with the full freedom of use specified in this license.
Most GNU software, including some libraries, is covered by the
ordinary GNU General Public License. This license, the GNU Lesser
General Public License, applies to certain designated libraries, and
is quite different from the ordinary General Public License. We use
this license for certain libraries in order to permit linking those
libraries into non-free programs.
When a program is linked with a library, whether statically or using
a shared library, the combination of the two is legally speaking a
combined work, a derivative of the original library. The ordinary
General Public License therefore permits such linking only if the
entire combination fits its criteria of freedom. The Lesser General
Public License permits more lax criteria for linking other code with
the library.
We call this license the "Lesser" General Public License because it
does Less to protect the user's freedom than the ordinary General
Public License. It also provides other free software developers Less
of an advantage over competing non-free programs. These disadvantages
are the reason we use the ordinary General Public License for many
libraries. However, the Lesser license provides advantages in certain
special circumstances.
For example, on rare occasions, there may be a special need to
encourage the widest possible use of a certain library, so that it becomes
a de-facto standard. To achieve this, non-free programs must be
allowed to use the library. A more frequent case is that a free
library does the same job as widely used non-free libraries. In this
case, there is little to gain by limiting the free library to free
software only, so we use the Lesser General Public License.
In other cases, permission to use a particular library in non-free
programs enables a greater number of people to use a large body of
free software. For example, permission to use the GNU C Library in
non-free programs enables many more people to use the whole GNU
operating system, as well as its variant, the GNU/Linux operating
system.
Although the Lesser General Public License is Less protective of the
users' freedom, it does ensure that the user of a program that is
linked with the Library has the freedom and the wherewithal to run
that program using a modified version of the Library.
The precise terms and conditions for copying, distribution and
modification follow. Pay close attention to the difference between a
"work based on the library" and a "work that uses the library". The
former contains code derived from the library, whereas the latter must
be combined with the library in order to run.
GNU LESSER GENERAL PUBLIC LICENSE
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
0. This License Agreement applies to any software library or other
program which contains a notice placed by the copyright holder or
other authorized party saying it may be distributed under the terms of
this Lesser General Public License (also called "this License").
Each licensee is addressed as "you".
A "library" means a collection of software functions and/or data
prepared so as to be conveniently linked with application programs
(which use some of those functions and data) to form executables.
The "Library", below, refers to any such software library or work
which has been distributed under these terms. A "work based on the
Library" means either the Library or any derivative work under
copyright law: that is to say, a work containing the Library or a
portion of it, either verbatim or with modifications and/or translated
straightforwardly into another language. (Hereinafter, translation is
included without limitation in the term "modification".)
"Source code" for a work means the preferred form of the work for
making modifications to it. For a library, complete source code means
all the source code for all modules it contains, plus any associated
interface definition files, plus the scripts used to control compilation
and installation of the library.
Activities other than copying, distribution and modification are not
covered by this License; they are outside its scope. The act of
running a program using the Library is not restricted, and output from
such a program is covered only if its contents constitute a work based
on the Library (independent of the use of the Library in a tool for
writing it). Whether that is true depends on what the Library does
and what the program that uses the Library does.
1. You may copy and distribute verbatim copies of the Library's
complete source code as you receive it, in any medium, provided that
you conspicuously and appropriately publish on each copy an
appropriate copyright notice and disclaimer of warranty; keep intact
all the notices that refer to this License and to the absence of any
warranty; and distribute a copy of this License along with the
Library.
You may charge a fee for the physical act of transferring a copy,
and you may at your option offer warranty protection in exchange for a
fee.
2. You may modify your copy or copies of the Library or any portion
of it, thus forming a work based on the Library, and copy and
distribute such modifications or work under the terms of Section 1
above, provided that you also meet all of these conditions:
a) The modified work must itself be a software library.
b) You must cause the files modified to carry prominent notices
stating that you changed the files and the date of any change.
c) You must cause the whole of the work to be licensed at no
charge to all third parties under the terms of this License.
d) If a facility in the modified Library refers to a function or a
table of data to be supplied by an application program that uses
the facility, other than as an argument passed when the facility
is invoked, then you must make a good faith effort to ensure that,
in the event an application does not supply such function or
table, the facility still operates, and performs whatever part of
its purpose remains meaningful.
(For example, a function in a library to compute square roots has
a purpose that is entirely well-defined independent of the
application. Therefore, Subsection 2d requires that any
application-supplied function or table used by this function must
be optional: if the application does not supply it, the square
root function must still compute square roots.)
These requirements apply to the modified work as a whole. If
identifiable sections of that work are not derived from the Library,
and can be reasonably considered independent and separate works in
themselves, then this License, and its terms, do not apply to those
sections when you distribute them as separate works. But when you
distribute the same sections as part of a whole which is a work based
on the Library, the distribution of the whole must be on the terms of
this License, whose permissions for other licensees extend to the
entire whole, and thus to each and every part regardless of who wrote
it.
Thus, it is not the intent of this section to claim rights or contest
your rights to work written entirely by you; rather, the intent is to
exercise the right to control the distribution of derivative or
collective works based on the Library.
In addition, mere aggregation of another work not based on the Library
with the Library (or with a work based on the Library) on a volume of
a storage or distribution medium does not bring the other work under
the scope of this License.
3. You may opt to apply the terms of the ordinary GNU General Public
License instead of this License to a given copy of the Library. To do
this, you must alter all the notices that refer to this License, so
that they refer to the ordinary GNU General Public License, version 2,
instead of to this License. (If a newer version than version 2 of the
ordinary GNU General Public License has appeared, then you can specify
that version instead if you wish.) Do not make any other change in
these notices.
Once this change is made in a given copy, it is irreversible for
that copy, so the ordinary GNU General Public License applies to all
subsequent copies and derivative works made from that copy.
This option is useful when you wish to copy part of the code of
the Library into a program that is not a library.
4. You may copy and distribute the Library (or a portion or
derivative of it, under Section 2) in object code or executable form
under the terms of Sections 1 and 2 above provided that you accompany
it with the complete corresponding machine-readable source code, which
must be distributed under the terms of Sections 1 and 2 above on a
medium customarily used for software interchange.
If distribution of object code is made by offering access to copy
from a designated place, then offering equivalent access to copy the
source code from the same place satisfies the requirement to
distribute the source code, even though third parties are not
compelled to copy the source along with the object code.
5. A program that contains no derivative of any portion of the
Library, but is designed to work with the Library by being compiled or
linked with it, is called a "work that uses the Library". Such a
work, in isolation, is not a derivative work of the Library, and
therefore falls outside the scope of this License.
However, linking a "work that uses the Library" with the Library
creates an executable that is a derivative of the Library (because it
contains portions of the Library), rather than a "work that uses the
library". The executable is therefore covered by this License.
Section 6 states terms for distribution of such executables.
When a "work that uses the Library" uses material from a header file
that is part of the Library, the object code for the work may be a
derivative work of the Library even though the source code is not.
Whether this is true is especially significant if the work can be
linked without the Library, or if the work is itself a library. The
threshold for this to be true is not precisely defined by law.
If such an object file uses only numerical parameters, data
structure layouts and accessors, and small macros and small inline
functions (ten lines or less in length), then the use of the object
file is unrestricted, regardless of whether it is legally a derivative
work. (Executables containing this object code plus portions of the
Library will still fall under Section 6.)
Otherwise, if the work is a derivative of the Library, you may
distribute the object code for the work under the terms of Section 6.
Any executables containing that work also fall under Section 6,
whether or not they are linked directly with the Library itself.
6. As an exception to the Sections above, you may also combine or
link a "work that uses the Library" with the Library to produce a
work containing portions of the Library, and distribute that work
under terms of your choice, provided that the terms permit
modification of the work for the customer's own use and reverse
engineering for debugging such modifications.
You must give prominent notice with each copy of the work that the
Library is used in it and that the Library and its use are covered by
this License. You must supply a copy of this License. If the work
during execution displays copyright notices, you must include the
copyright notice for the Library among them, as well as a reference
directing the user to the copy of this License. Also, you must do one
of these things:
a) Accompany the work with the complete corresponding
machine-readable source code for the Library including whatever
changes were used in the work (which must be distributed under
Sections 1 and 2 above); and, if the work is an executable linked
with the Library, with the complete machine-readable "work that
uses the Library", as object code and/or source code, so that the
user can modify the Library and then relink to produce a modified
executable containing the modified Library. (It is understood
that the user who changes the contents of definitions files in the
Library will not necessarily be able to recompile the application
to use the modified definitions.)
b) Use a suitable shared library mechanism for linking with the
Library. A suitable mechanism is one that (1) uses at run time a
copy of the library already present on the user's computer system,
rather than copying library functions into the executable, and (2)
will operate properly with a modified version of the library, if
the user installs one, as long as the modified version is
interface-compatible with the version that the work was made with.
c) Accompany the work with a written offer, valid for at
least three years, to give the same user the materials
specified in Subsection 6a, above, for a charge no more
than the cost of performing this distribution.
d) If distribution of the work is made by offering access to copy
from a designated place, offer equivalent access to copy the above
specified materials from the same place.
e) Verify that the user has already received a copy of these
materials or that you have already sent this user a copy.
For an executable, the required form of the "work that uses the
Library" must include any data and utility programs needed for
reproducing the executable from it. However, as a special exception,
the materials to be distributed need not include anything that is
normally distributed (in either source or binary form) with the major
components (compiler, kernel, and so on) of the operating system on
which the executable runs, unless that component itself accompanies
the executable.
It may happen that this requirement contradicts the license
restrictions of other proprietary libraries that do not normally
accompany the operating system. Such a contradiction means you cannot
use both them and the Library together in an executable that you
distribute.
7. You may place library facilities that are a work based on the
Library side-by-side in a single library together with other library
facilities not covered by this License, and distribute such a combined
library, provided that the separate distribution of the work based on
the Library and of the other library facilities is otherwise
permitted, and provided that you do these two things:
a) Accompany the combined library with a copy of the same work
based on the Library, uncombined with any other library
facilities. This must be distributed under the terms of the
Sections above.
b) Give prominent notice with the combined library of the fact
that part of it is a work based on the Library, and explaining
where to find the accompanying uncombined form of the same work.
8. You may not copy, modify, sublicense, link with, or distribute
the Library except as expressly provided under this License. Any
attempt otherwise to copy, modify, sublicense, link with, or
distribute the Library is void, and will automatically terminate your
rights under this License. However, parties who have received copies,
or rights, from you under this License will not have their licenses
terminated so long as such parties remain in full compliance.
9. You are not required to accept this License, since you have not
signed it. However, nothing else grants you permission to modify or
distribute the Library or its derivative works. These actions are
prohibited by law if you do not accept this License. Therefore, by
modifying or distributing the Library (or any work based on the
Library), you indicate your acceptance of this License to do so, and
all its terms and conditions for copying, distributing or modifying
the Library or works based on it.
10. Each time you redistribute the Library (or any work based on the
Library), the recipient automatically receives a license from the
original licensor to copy, distribute, link with or modify the Library
subject to these terms and conditions. You may not impose any further
restrictions on the recipients' exercise of the rights granted herein.
You are not responsible for enforcing compliance by third parties with
this License.
11. If, as a consequence of a court judgment or allegation of patent
infringement or for any other reason (not limited to patent issues),
conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot
distribute so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you
may not distribute the Library at all. For example, if a patent
license would not permit royalty-free redistribution of the Library by
all those who receive copies directly or indirectly through you, then
the only way you could satisfy both it and this License would be to
refrain entirely from distribution of the Library.
If any portion of this section is held invalid or unenforceable under any
particular circumstance, the balance of the section is intended to apply,
and the section as a whole is intended to apply in other circumstances.
It is not the purpose of this section to induce you to infringe any
patents or other property right claims or to contest validity of any
such claims; this section has the sole purpose of protecting the
integrity of the free software distribution system which is
implemented by public license practices. Many people have made
generous contributions to the wide range of software distributed
through that system in reliance on consistent application of that
system; it is up to the author/donor to decide if he or she is willing
to distribute software through any other system and a licensee cannot
impose that choice.
This section is intended to make thoroughly clear what is believed to
be a consequence of the rest of this License.
12. If the distribution and/or use of the Library is restricted in
certain countries either by patents or by copyrighted interfaces, the
original copyright holder who places the Library under this License may add
an explicit geographical distribution limitation excluding those countries,
so that distribution is permitted only in or among countries not thus
excluded. In such case, this License incorporates the limitation as if
written in the body of this License.
13. The Free Software Foundation may publish revised and/or new
versions of the Lesser General Public License from time to time.
Such new versions will be similar in spirit to the present version,
but may differ in detail to address new problems or concerns.
Each version is given a distinguishing version number. If the Library
specifies a version number of this License which applies to it and
"any later version", you have the option of following the terms and
conditions either of that version or of any later version published by
the Free Software Foundation. If the Library does not specify a
license version number, you may choose any version ever published by
the Free Software Foundation.
14. If you wish to incorporate parts of the Library into other free
programs whose distribution conditions are incompatible with these,
write to the author to ask for permission. For software which is
copyrighted by the Free Software Foundation, write to the Free
Software Foundation; we sometimes make exceptions for this. Our
decision will be guided by the two goals of preserving the free status
of all derivatives of our free software and of promoting the sharing
and reuse of software generally.
NO WARRANTY
15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO
WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW.
EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR
OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY
KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE
LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME
THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN
WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY
AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU
FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR
CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE
LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING
RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A
FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF
SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
DAMAGES.
END OF TERMS AND CONDITIONS
live/genMakefiles 000551 001751 000000 00000001762 12265042525 014264 0 ustar 00rsf wheel 000000 000000 #!/bin/sh
usage() {
echo "Usage: $0 "
exit 1
}
if [ $# -ne 1 ]
then
usage $*
fi
cd liveMedia
/bin/rm -f Makefile
cat Makefile.head ../config.$1 Makefile.tail > Makefile
chmod a-w Makefile
cd ../groupsock
/bin/rm -f Makefile
cat Makefile.head ../config.$1 Makefile.tail > Makefile
chmod a-w Makefile
cd ../UsageEnvironment
/bin/rm -f Makefile
cat Makefile.head ../config.$1 Makefile.tail > Makefile
chmod a-w Makefile
cd ../BasicUsageEnvironment
/bin/rm -f Makefile
cat Makefile.head ../config.$1 Makefile.tail > Makefile
chmod a-w Makefile
cd ../testProgs
/bin/rm -f Makefile
cat Makefile.head ../config.$1 Makefile.tail > Makefile
chmod a-w Makefile
cd ../mediaServer
/bin/rm -f Makefile
cat Makefile.head ../config.$1 Makefile.tail > Makefile
chmod a-w Makefile
cd ../proxyServer
/bin/rm -f Makefile
cat Makefile.head ../config.$1 Makefile.tail > Makefile
chmod a-w Makefile
cd ..
/bin/rm -f Makefile
cat Makefile.head config.$1 Makefile.tail > Makefile
chmod a-w Makefile
live/config.uClinux 000444 001751 000000 00000001357 12265042525 014566 0 ustar 00rsf wheel 000000 000000 CROSS_COMPILE= arc-linux-uclibc-
COMPILE_OPTS = $(INCLUDES) -I. -O2 -DSOCKLEN_T=socklen_t -D_LARGEFILE_SOURCE=1 -D_FILE_OFFSET_BITS=64
C = c
C_COMPILER = $(CROSS_COMPILE)gcc
CFLAGS += $(COMPILE_OPTS)
C_FLAGS = $(CFLAGS)
CPP = cpp
CPLUSPLUS_COMPILER = $(CROSS_COMPILE)g++
CPLUSPLUS_FLAGS = $(COMPILE_OPTS) -Wall -DBSD=1
CPLUSPLUS_FLAGS += $(CPPFLAGS) -fexceptions
OBJ = o
LINK = $(CROSS_COMPILE)g++ -o
LINK_OPTS = -L. $(LDFLAGS)
CONSOLE_LINK_OPTS = $(LINK_OPTS)
LIBRARY_LINK = $(CROSS_COMPILE)ar cr
LIBRARY_LINK_OPTS =
LIB_SUFFIX = a
LIBS_FOR_CONSOLE_APPLICATION = $(CXXLIBS)
LIBS_FOR_GUI_APPLICATION = $(LIBS_FOR_CONSOLE_APPLICATION)
EXE =
live/config.sunos 000444 001751 000000 00000000613 12265042525 014300 0 ustar 00rsf wheel 000000 000000 COMPILE_OPTS = $(INCLUDES) -I. -DBSD=1 -O
C = c
C_COMPILER = cc
C_FLAGS = $(COMPILE_OPTS)
CPP = cc
CPLUSPLUS_COMPILER = c++
CPLUSPLUS_FLAGS = $(COMPILE_OPTS) -Wall
OBJ = o
LINK = c++ -o
LINK_OPTS = -L.
CONSOLE_LINK_OPTS = $(LINK_OPTS)
LIBRARY_LINK = ld -o
LIBRARY_LINK_OPTS = $(LINK_OPTS) -r -Bstatic
LIB_SUFFIX = a
LIBS_FOR_CONSOLE_APPLICATION =
LIBS_FOR_GUI_APPLICATION =
EXE =
live/config.solaris-64bit 000444 001751 000000 00000001162 12265042525 015533 0 ustar 00rsf wheel 000000 000000 COMPILE_OPTS = $(INCLUDES) -m64 -I. -O -DSOLARIS -DSOCKLEN_T=socklen_t
C = c
C_COMPILER = cc
C_FLAGS = $(COMPILE_OPTS)
CPP = cpp
CPLUSPLUS_COMPILER = c++
CPLUSPLUS_FLAGS = $(COMPILE_OPTS) -Wall
OBJ = o
LINK = c++ -m64 -o
LINK_OPTS = -L.
CONSOLE_LINK_OPTS = $(LINK_OPTS)
LIBRARY_LINK = ld -o
LIBRARY_LINK_OPTS = $(LINK_OPTS) -64 -r -dn
LIB_SUFFIX = a
LIBS_FOR_CONSOLE_APPLICATION = -lsocket -lnsl
LIBS_FOR_GUI_APPLICATION = $(LIBS_FOR_CONSOLE_APPLICATION)
EXE =
live/config.solaris-32bit 000444 001751 000000 00000000716 12265042525 015532 0 ustar 00rsf wheel 000000 000000 COMPILE_OPTS = $(INCLUDES) -I. -O -DSOLARIS -DSOCKLEN_T=socklen_t
C = c
C_COMPILER = cc
C_FLAGS = $(COMPILE_OPTS)
CPP = cpp
CPLUSPLUS_COMPILER = c++
CPLUSPLUS_FLAGS = $(COMPILE_OPTS) -Wall
OBJ = o
LINK = c++ -o
LINK_OPTS = -L.
CONSOLE_LINK_OPTS = $(LINK_OPTS)
LIBRARY_LINK = ld -o
LIBRARY_LINK_OPTS = $(LINK_OPTS) -r -dn
LIB_SUFFIX = a
LIBS_FOR_CONSOLE_APPLICATION = -lsocket -lnsl
LIBS_FOR_GUI_APPLICATION = $(LIBS_FOR_CONSOLE_APPLICATION)
EXE =
live/config.qnx4 000444 001751 000000 00000001070 12265042525 014021 0 ustar 00rsf wheel 000000 000000 #
# Requires:
# QNX 4.25
# Watcom 10.6
# TCP/IP 5.0
#
COMPILE_OPTS = $(INCLUDES) -I. -D_QNX4 -DBSD -DSOCKLEN_T=uint32_t -I/usr/watcom/10.6/usr/include
C = c
C_COMPILER = cc32
C_FLAGS = $(COMPILE_OPTS)
CPP = cpp
CPLUSPLUS_COMPILER = cc32
CPLUSPLUS_FLAGS = $(COMPILE_OPTS) -WC,-xs
OBJ = o
LINK = cc32 -b -M -N30000 -o
LINK_OPTS = -l.
CONSOLE_LINK_OPTS = $(LINK_OPTS)
LIBRARY_LINK = wlib -n -b -c
LIBRARY_LINK_OPTS = $(LINK_OPTS)
LIB_SUFFIX = lib
LIBS_FOR_CONSOLE_APPLICATION = -lsocket
LIBS_FOR_GUI_APPLICATION = $(LIBS_FOR_CONSOLE_APPLICATION)
EXE =
live/config.openbsd 000444 001751 000000 00000000661 12265042525 014566 0 ustar 00rsf wheel 000000 000000 .SUFFIXES: .cpp
COMPILE_OPTS = $(INCLUDES) -I. -DBSD=1 -O -DSOCKLEN_T=socklen_t
C = c
C_COMPILER = cc
C_FLAGS = $(COMPILE_OPTS)
CPP = cpp
CPLUSPLUS_COMPILER = c++
CPLUSPLUS_FLAGS = $(COMPILE_OPTS) -Wall -DAIX=1
OBJ = o
LINK = c++ -o
LINK_OPTS = -L.
CONSOLE_LINK_OPTS = $(LINK_OPTS)
LIBRARY_LINK = ld -o
LIBRARY_LINK_OPTS = $(LINK_OPTS) -r
LIB_SUFFIX = a
LIBS_FOR_CONSOLE_APPLICATION =
LIBS_FOR_GUI_APPLICATION =
EXE =
live/config.mingw 000444 001751 000000 00000001213 12265042525 014247 0 ustar 00rsf wheel 000000 000000 COMPILE_OPTS = $(INCLUDES) -I. -O -DSOCKLEN_T=int -DLOCALE_NOT_USED
C = c
C_COMPILER = $(CC)
C_FLAGS = $(COMPILE_OPTS) -DUSE_OUR_BZERO=1 -D__MINGW32__
CPP = cpp
CPLUSPLUS_COMPILER = $(CXX)
CPLUSPLUS_FLAGS = $(COMPILE_OPTS) -D__MINGW32__ -Wall -Wno-deprecated
OBJ = o
LINK = $(CXX) -o
LINK_OPTS = -L.
CONSOLE_LINK_OPTS = $(LINK_OPTS)
LIBRARY_LINK = $(LD) -o
LIBRARY_LINK_OPTS = $(LINK_OPTS) -r -Bstatic
LIB_SUFFIX = a
LIBS_FOR_CONSOLE_APPLICATION = -lws2_32
LIBS_FOR_GUI_APPLICATION = -lws2_32
EXE =
live/config.macosx-32bit 000444 001751 000000 00000000725 12265042525 015350 0 ustar 00rsf wheel 000000 000000 COMPILE_OPTS = -m32 $(INCLUDES) -I. $(EXTRA_LDFLAGS) -DBSD=1 -O -DSOCKLEN_T=socklen_t -DHAVE_SOCKADDR_LEN=1 -DTIME_BASE=int
C = c
C_COMPILER = cc
C_FLAGS = $(COMPILE_OPTS)
CPP = cpp
CPLUSPLUS_COMPILER = c++
CPLUSPLUS_FLAGS = $(COMPILE_OPTS) -Wall
OBJ = o
LINK = c++ -o
LINK_OPTS = -L. -m32
CONSOLE_LINK_OPTS = $(LINK_OPTS)
LIBRARY_LINK = libtool -s -o
LIBRARY_LINK_OPTS =
LIB_SUFFIX = a
LIBS_FOR_CONSOLE_APPLICATION =
LIBS_FOR_GUI_APPLICATION =
EXE =
live/config.macosx-before-version-10.4 000444 001751 000000 00000000646 12265042525 017734 0 ustar 00rsf wheel 000000 000000 COMPILE_OPTS = $(INCLUDES) -I. -DBSD=1 -O -DSOCKLEN_T=int -DTIME_BASE=int
C = c
C_COMPILER = cc
C_FLAGS = $(COMPILE_OPTS)
CPP = cpp
CPLUSPLUS_COMPILER = c++
CPLUSPLUS_FLAGS = $(COMPILE_OPTS) -Wall
OBJ = o
LINK = c++ -o
LINK_OPTS = -L.
CONSOLE_LINK_OPTS = $(LINK_OPTS)
LIBRARY_LINK = ld -o
LIBRARY_LINK_OPTS = $(LINK_OPTS) -r
LIB_SUFFIX = a
LIBS_FOR_CONSOLE_APPLICATION =
LIBS_FOR_GUI_APPLICATION =
EXE =
live/config.macosx 000444 001751 000000 00000000713 12265042525 014424 0 ustar 00rsf wheel 000000 000000 COMPILE_OPTS = $(INCLUDES) -I. $(EXTRA_LDFLAGS) -DBSD=1 -O -DSOCKLEN_T=socklen_t -DHAVE_SOCKADDR_LEN=1 -DTIME_BASE=int
C = c
C_COMPILER = cc
C_FLAGS = $(COMPILE_OPTS)
CPP = cpp
CPLUSPLUS_COMPILER = c++
CPLUSPLUS_FLAGS = $(COMPILE_OPTS) -Wall
OBJ = o
LINK = c++ -o
LINK_OPTS = -L.
CONSOLE_LINK_OPTS = $(LINK_OPTS)
LIBRARY_LINK = libtool -s -o
LIBRARY_LINK_OPTS =
LIB_SUFFIX = a
LIBS_FOR_CONSOLE_APPLICATION =
LIBS_FOR_GUI_APPLICATION =
EXE =
live/config.linux-with-shared-libraries 000444 001751 000000 00000004444 12265042525 020465 0 ustar 00rsf wheel 000000 000000 # 'CURRENT':'REVISION':'AGE' are updated - whenever a library changes - as follows:
# The library code changes, but without any changes to the API (i.e., interfaces) => increment REVISION
# At least one interface changes, or is removed => CURRENT += 1; REVISION = 0; AGE = 0
# One or more interfaces were added, but no existing interfaces were changed or removed => CURRENT += 1; REVISION = 0; AGE += 1
libliveMedia_VERSION_CURRENT=24
libliveMedia_VERSION_REVISION=0
libliveMedia_VERSION_AGE=1
libliveMedia_LIB_SUFFIX=so.$(shell expr $(libliveMedia_VERSION_CURRENT) - $(libliveMedia_VERSION_AGE)).$(libliveMedia_VERSION_AGE).$(libliveMedia_VERSION_REVISION)
libBasicUsageEnvironment_VERSION_CURRENT=0
libBasicUsageEnvironment_VERSION_REVISION=2
libBasicUsageEnvironment_VERSION_AGE=0
libBasicUsageEnvironment_LIB_SUFFIX=so.$(shell expr $(libBasicUsageEnvironment_VERSION_CURRENT) - $(libBasicUsageEnvironment_VERSION_AGE)).$(libBasicUsageEnvironment_VERSION_AGE).$(libBasicUsageEnvironment_VERSION_REVISION)
libUsageEnvironment_VERSION_CURRENT=1
libUsageEnvironment_VERSION_REVISION=0
libUsageEnvironment_VERSION_AGE=0
libUsageEnvironment_LIB_SUFFIX=so.$(shell expr $(libUsageEnvironment_VERSION_CURRENT) - $(libUsageEnvironment_VERSION_AGE)).$(libUsageEnvironment_VERSION_AGE).$(libUsageEnvironment_VERSION_REVISION)
libgroupsock_VERSION_CURRENT=1
libgroupsock_VERSION_REVISION=4
libgroupsock_VERSION_AGE=0
libgroupsock_LIB_SUFFIX=so.$(shell expr $(libgroupsock_VERSION_CURRENT) - $(libgroupsock_VERSION_AGE)).$(libgroupsock_VERSION_AGE).$(libgroupsock_VERSION_REVISION)
#####
COMPILE_OPTS = $(INCLUDES) -I. -O2 -DSOCKLEN_T=socklen_t -D_LARGEFILE_SOURCE=1 -D_FILE_OFFSET_BITS=64 -fPIC
C = c
C_COMPILER = cc
C_FLAGS = $(COMPILE_OPTS) $(CPPFLAGS) $(CFLAGS)
CPP = cpp
CPLUSPLUS_COMPILER = c++
CPLUSPLUS_FLAGS = $(COMPILE_OPTS) -Wall -DBSD=1 $(CPPFLAGS) $(CXXFLAGS)
OBJ = o
LINK = c++ -o
LINK_OPTS = -L. $(LDFLAGS)
CONSOLE_LINK_OPTS = $(LINK_OPTS)
LIBRARY_LINK = gcc -o
SHORT_LIB_SUFFIX = so.$(shell expr $($(NAME)_VERSION_CURRENT) - $($(NAME)_VERSION_AGE))
LIB_SUFFIX = $(SHORT_LIB_SUFFIX).$($(NAME)_VERSION_AGE).$($(NAME)_VERSION_REVISION)
LIBRARY_LINK_OPTS = -shared -Wl,-soname,$(NAME).$(SHORT_LIB_SUFFIX) $(LDFLAGS)
LIBS_FOR_CONSOLE_APPLICATION =
LIBS_FOR_GUI_APPLICATION =
EXE =
INSTALL2 = install_shared_libraries
live/config.linux-gdb 000444 001751 000000 00000000673 12265042525 015030 0 ustar 00rsf wheel 000000 000000 COMPILE_OPTS = $(INCLUDES) -I. -O -DSOCKLEN_T=socklen_t -g -D_LARGEFILE_SOURCE=1 -D_FILE_OFFSET_BITS=64
C = c
C_COMPILER = cc
C_FLAGS = $(COMPILE_OPTS)
CPP = cpp
CPLUSPLUS_COMPILER = c++
CPLUSPLUS_FLAGS = $(COMPILE_OPTS) -Wall -DBSD=1
OBJ = o
LINK = c++ -o
LINK_OPTS = -L.
CONSOLE_LINK_OPTS = $(LINK_OPTS)
LIBRARY_LINK = ar cr
LIBRARY_LINK_OPTS =
LIB_SUFFIX = a
LIBS_FOR_CONSOLE_APPLICATION =
LIBS_FOR_GUI_APPLICATION =
EXE =
live/config.linux-64bit 000444 001751 000000 00000000705 12265042525 015220 0 ustar 00rsf wheel 000000 000000 COMPILE_OPTS = $(INCLUDES) -m64 -fPIC -I. -O2 -DSOCKLEN_T=socklen_t -D_LARGEFILE_SOURCE=1 -D_FILE_OFFSET_BITS=64
C = c
C_COMPILER = cc
C_FLAGS = $(COMPILE_OPTS)
CPP = cpp
CPLUSPLUS_COMPILER = c++
CPLUSPLUS_FLAGS = $(COMPILE_OPTS) -Wall -DBSD=1
OBJ = o
LINK = c++ -o
LINK_OPTS = -L.
CONSOLE_LINK_OPTS = $(LINK_OPTS)
LIBRARY_LINK = ar cr
LIBRARY_LINK_OPTS =
LIB_SUFFIX = a
LIBS_FOR_CONSOLE_APPLICATION =
LIBS_FOR_GUI_APPLICATION =
EXE =
live/config.linux 000444 001751 000000 00000000762 12265042525 014275 0 ustar 00rsf wheel 000000 000000 COMPILE_OPTS = $(INCLUDES) -I. -O2 -DSOCKLEN_T=socklen_t -D_LARGEFILE_SOURCE=1 -D_FILE_OFFSET_BITS=64
C = c
C_COMPILER = cc
C_FLAGS = $(COMPILE_OPTS) $(CPPFLAGS) $(CFLAGS)
CPP = cpp
CPLUSPLUS_COMPILER = c++
CPLUSPLUS_FLAGS = $(COMPILE_OPTS) -Wall -DBSD=1 $(CPPFLAGS) $(CXXFLAGS)
OBJ = o
LINK = c++ -o
LINK_OPTS = -L. $(LDFLAGS)
CONSOLE_LINK_OPTS = $(LINK_OPTS)
LIBRARY_LINK = ar cr
LIBRARY_LINK_OPTS =
LIB_SUFFIX = a
LIBS_FOR_CONSOLE_APPLICATION =
LIBS_FOR_GUI_APPLICATION =
EXE =
live/config.irix 000444 001751 000000 00000000623 12265042525 014105 0 ustar 00rsf wheel 000000 000000 COMPILE_OPTS = $(INCLUDES) -I. -O
C = c
C_COMPILER = cc
C_FLAGS = $(COMPILE_OPTS) -DIRIX
CPP = cpp
CPLUSPLUS_COMPILER = c++
CPLUSPLUS_FLAGS = $(COMPILE_OPTS) -Wall -DIRIX
OBJ = o
LINK = c++ -o
LINK_OPTS = -L.
CONSOLE_LINK_OPTS = $(LINK_OPTS)
LIBRARY_LINK = ld -o
LIBRARY_LINK_OPTS = $(LINK_OPTS) -r -B static
LIB_SUFFIX = a
LIBS_FOR_CONSOLE_APPLICATION =
LIBS_FOR_GUI_APPLICATION =
EXE =
live/config.iphoneos 000444 001751 000000 00000002075 12265042525 014761 0 ustar 00rsf wheel 000000 000000 # Change the following version number, if necessary, before running "genMakefiles iphoneos"
IOS_VERSION = 6.1
DEVELOPER_PATH = /Applications/Xcode.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer
TOOL_PATH = $(DEVELOPER_PATH)/usr/bin
SDK_PATH = $(DEVELOPER_PATH)/SDKs
SDK = $(SDK_PATH)/iPhoneOS$(IOS_VERSION).sdk
COMPILE_OPTS = $(INCLUDES) -I. $(EXTRA_LDFLAGS) -DBSD=1 -O2 -DSOCKLEN_T=socklen_t -DHAVE_SOCKADDR_LEN=1 -D_LARGEFILE_SOURCE=1 -D_FILE_OFFSET_BITS=64 -fPIC -arch armv7 --sysroot=$(SDK)
C = c
C_COMPILER = $(TOOL_PATH)/gcc
C_FLAGS = $(COMPILE_OPTS)
CPP = cpp
CPLUSPLUS_COMPILER = $(TOOL_PATH)/g++
CPLUSPLUS_FLAGS = $(COMPILE_OPTS) -Wall
OBJ = o
LINK = $(TOOL_PATH)/g++ -o
LINK_OPTS = -L. -arch armv7 --sysroot=$(SDK) -L$(SDK)/usr/lib/system
CONSOLE_LINK_OPTS = $(LINK_OPTS)
LIBRARY_LINK = libtool -s -o
LIBRARY_LINK_OPTS =
LIB_SUFFIX = a
LIBS_FOR_CONSOLE_APPLICATION =
LIBS_FOR_GUI_APPLICATION =
EXE =
live/config.iphone-simulator 000444 001751 000000 00000002111 12265042525 016423 0 ustar 00rsf wheel 000000 000000 # Change the following version number, if necessary, before running "genMakefiles iphoneos"
IOS_VERSION = 6.1
DEVELOPER_PATH = /Applications/Xcode.app/Contents/Developer/Platforms/iPhoneSimulator.platform/Developer
TOOL_PATH = $(DEVELOPER_PATH)/usr/bin
SDK_PATH = $(DEVELOPER_PATH)/SDKs
SDK = $(SDK_PATH)/iPhoneSimulator$(IOS_VERSION).sdk
COMPILE_OPTS = $(INCLUDES) -I. $(EXTRA_LDFLAGS) -DBSD=1 -O2 -DSOCKLEN_T=socklen_t -DHAVE_SOCKADDR_LEN=1 -D_LARGEFILE_SOURCE=1 -D_FILE_OFFSET_BITS=64 -fPIC -arch i386 --sysroot=$(SDK)
C = c
C_COMPILER = $(TOOL_PATH)/gcc
C_FLAGS = $(COMPILE_OPTS)
CPP = cpp
CPLUSPLUS_COMPILER = $(TOOL_PATH)/g++
CPLUSPLUS_FLAGS = $(COMPILE_OPTS) -Wall
OBJ = o
LINK = $(TOOL_PATH)/g++ -o
LINK_OPTS = -L. -arch i386 --sysroot=$(SDK) -L$(SDK)/usr/lib/system
CONSOLE_LINK_OPTS = $(LINK_OPTS)
LIBRARY_LINK = libtool -s -o
LIBRARY_LINK_OPTS =
LIB_SUFFIX = a
LIBS_FOR_CONSOLE_APPLICATION =
LIBS_FOR_GUI_APPLICATION =
EXE =
live/config.freebsd 000444 001751 000000 00000000666 12265042525 014553 0 ustar 00rsf wheel 000000 000000 COMPILE_OPTS = $(INCLUDES) -I. -O -DBSD=1 -DXLOCALE_NOT_USED=1 -DSOCKLEN_T=socklen_t -DHAVE_SOCKADDR_LEN=1
C = c
C_COMPILER = cc
C_FLAGS = $(COMPILE_OPTS)
CPP = cpp
CPLUSPLUS_COMPILER = c++
CPLUSPLUS_FLAGS = $(COMPILE_OPTS) -Wall
OBJ = o
LINK = c++ -o
LINK_OPTS = -L.
CONSOLE_LINK_OPTS = $(LINK_OPTS)
LIBRARY_LINK = ar cr
LIBRARY_LINK_OPTS =
LIB_SUFFIX = a
LIBS_FOR_CONSOLE_APPLICATION =
LIBS_FOR_GUI_APPLICATION =
EXE =
live/config.cygwin-for-vlc 000444 001751 000000 00000001003 12265042525 015771 0 ustar 00rsf wheel 000000 000000 COMPILE_OPTS = $(INCLUDES) -I. -O -DSOCKLEN_T=socklen_t -DXLOCALE_NOT_USED=1
C = c
C_COMPILER = gcc
C_FLAGS = $(COMPILE_OPTS) -DUSE_OUR_BZERO=1 -D_WIN32 -mno-cygwin
CPP = cpp
CPLUSPLUS_COMPILER = c++
CPLUSPLUS_FLAGS = $(COMPILE_OPTS) -Wall -DBSD=1 -D_WIN32 -Wno-deprecated -mno-cygwin
OBJ = o
LINK = c++ -o
LINK_OPTS = -L.
CONSOLE_LINK_OPTS = $(LINK_OPTS)
LIBRARY_LINK = ld -o
LIBRARY_LINK_OPTS = $(LINK_OPTS) -r -Bstatic
LIB_SUFFIX = a
LIBS_FOR_CONSOLE_APPLICATION =
LIBS_FOR_GUI_APPLICATION =
EXE =
live/config.cygwin 000444 001751 000000 00000000731 12265042525 014432 0 ustar 00rsf wheel 000000 000000 COMPILE_OPTS = $(INCLUDES) -I. -O -DSOCKLEN_T=socklen_t -DXLOCALE_NOT_USED=1
C = c
C_COMPILER = gcc
C_FLAGS = $(COMPILE_OPTS) -DUSE_OUR_BZERO=1 -D__CYGWIN__
CPP = cpp
CPLUSPLUS_COMPILER = c++
CPLUSPLUS_FLAGS = $(COMPILE_OPTS) -Wall -DBSD=1
OBJ = o
LINK = c++ -o
LINK_OPTS = -L.
CONSOLE_LINK_OPTS = $(LINK_OPTS)
LIBRARY_LINK = ld -o
LIBRARY_LINK_OPTS = $(LINK_OPTS) -r -Bstatic
LIB_SUFFIX = a
LIBS_FOR_CONSOLE_APPLICATION =
LIBS_FOR_GUI_APPLICATION =
EXE =
live/config.cris-axis-linux-gnu 000444 001751 000000 00000001603 12265042525 016757 0 ustar 00rsf wheel 000000 000000 # Note: AXIS_TOP_DIR is assumed to already be set in your environment.
# You can set this using the "init_env" script.
# See http://developer.axis.com/doc/software/apps/apps-howto.html
# for more information.
AXIS_DIR = $(AXIS_TOP_DIR)/target/cris-axis-linux-gnu
COMPILE_OPTS = $(INCLUDES) -I. -mlinux -isystem $(AXIS_DIR)/include -Wall -O2 -DSOCKLEN_T=socklen_t -DCRIS -D_LARGEFILE_SOURCE=1 -D_FILE_OFFSET_BITS=64
C = c
C_COMPILER = gcc-cris
C_FLAGS = $(COMPILE_OPTS)
CPP = cpp
CPLUSPLUS_COMPILER = c++-cris
CPLUSPLUS_FLAGS = $(COMPILE_OPTS) -Wno-ctor-dtor-privacy -ansi -pipe
OBJ = o
LINK = c++-cris -static -o
AXIS_LINK_OPTS = -L$(AXIS_DIR)/lib
LINK_OPTS = -L.
CONSOLE_LINK_OPTS = $(LINK_OPTS) -L$(AXIS_DIR)/lib -mlinux
LIBRARY_LINK = ld-cris -mcrislinux -o
LIBRARY_LINK_OPTS = $(LINK_OPTS) -r -Bstatic
LIB_SUFFIX = a
LIBS_FOR_CONSOLE_APPLICATION =
LIBS_FOR_GUI_APPLICATION =
EXE =
live/config.bsplinux 000444 001751 000000 00000001311 12265042525 014771 0 ustar 00rsf wheel 000000 000000 CROSS_COMPILE=
COMPILE_OPTS = $(INCLUDES) -I. -O2 -DSOCKLEN_T=socklen_t -DNO_SSTREAM=1 -D_LARGEFILE_SOURCE=1 -D_FILE_OFFSET_BITS=64
C = c
C_COMPILER = $(CROSS_COMPILE)ecc
C_FLAGS = $(COMPILE_OPTS)
CPP = cpp
CPLUSPLUS_COMPILER = $(CROSS_COMPILE)e++
CPLUSPLUS_FLAGS = $(COMPILE_OPTS) -Wall -DBSD=1
OBJ = o
LINK = $(CROSS_COMPILE)e++ -o
LINK_OPTS = -L.
CONSOLE_LINK_OPTS = $(LINK_OPTS)
LIBRARY_LINK = $(CROSS_COMPILE)eld -o
LIBRARY_LINK_OPTS = $(LINK_OPTS) -r -Bstatic
LIB_SUFFIX = a
LIBS_FOR_CONSOLE_APPLICATION = -lm
LIBS_FOR_GUI_APPLICATION =
EXE =
live/config.bfin-uclinux 000444 001751 000000 00000001213 12265042525 015531 0 ustar 00rsf wheel 000000 000000 CROSS_COMPILER= bfin-uclinux-
COMPILE_OPTS = $(INCLUDES) -I. -DSOCKLEN_T=socklen_t -D_LARGEFILE_SOURCE=1 -DUCLINUX -D_FILE_OFFSET_BITS=64
C = c
C_COMPILER = $(CROSS_COMPILER)gcc
C_FLAGS = $(COMPILE_OPTS) -Wall
CPP = cpp
CPLUSPLUS_COMPILER = $(CROSS_COMPILER)g++
CPLUSPLUS_FLAGS = $(COMPILE_OPTS) -Wall
OBJ = o
LINK = $(CROSS_COMPILER)g++ -Wl,-elf2flt -o
LINK_OPTS = -L.
CONSOLE_LINK_OPTS = $(LINK_OPTS)
LIBRARY_LINK = $(CROSS_COMPILER)ar cr
LIBRARY_LINK_OPTS =
LIB_SUFFIX = a
LIBS_FOR_CONSOLE_APPLICATION =
LIBS_FOR_GUI_APPLICATION =
EXE =
live/config.bfin-linux-uclibc 000444 001751 000000 00000001215 12265042525 016442 0 ustar 00rsf wheel 000000 000000 CROSS_COMPILER = bfin-linux-uclibc-
COMPILE_OPTS = $(INCLUDES) -I. -DSOCKLEN_T=socklen_t -D_LARGEFILE_SOURCE=1 -DUCLINUX -D_FILE_OFFSET_BITS=64
C = c
C_COMPILER = $(CROSS_COMPILER)gcc
C_FLAGS = $(COMPILE_OPTS) -Wall
CPP = cpp
CPLUSPLUS_COMPILER = $(CROSS_COMPILER)g++
CPLUSPLUS_FLAGS = $(COMPILE_OPTS) -Wall
OBJ = o
LINK = $(CROSS_COMPILER)g++ -o
LINK_OPTS = -L.
CONSOLE_LINK_OPTS = $(LINK_OPTS)
LIBRARY_LINK = $(CROSS_COMPILER)ar cr
LIBRARY_LINK_OPTS =
LIB_SUFFIX = a
LIBS_FOR_CONSOLE_APPLICATION =
LIBS_FOR_GUI_APPLICATION =
EXE =
live/config.avr32-linux 000444 001751 000000 00000001263 12265042525 015225 0 ustar 00rsf wheel 000000 000000 CROSS_COMPILE= avr32-linux-uclibc-
COMPILE_OPTS = -Os $(INCLUDES) -msoft-float -D_LARGEFILE_SOURCE -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -DSOCKLEN_T=socklen_t -DNO_SSTREAM=1 C = c
C_COMPILER = $(CROSS_COMPILE)gcc
C_FLAGS = $(COMPILE_OPTS)
CPP = cpp
CPLUSPLUS_COMPILER = $(CROSS_COMPILE)c++
CPLUSPLUS_FLAGS = $(COMPILE_OPTS) -Wall -fuse-cxa-atexit -DBSD=1 OBJ = o
LINK = $(CROSS_COMPILE)c++ -o
LINK_OPTS =
CONSOLE_LINK_OPTS = $(LINK_OPTS)
LIBRARY_LINK = $(CROSS_COMPILE)ar cr LIBRARY_LINK_OPTS =
LIB_SUFFIX = a
LIBS_FOR_CONSOLE_APPLICATION =
LIBS_FOR_GUI_APPLICATION =
EXE =
live/config.armlinux 000444 001751 000000 00000001054 12265042525 014770 0 ustar 00rsf wheel 000000 000000 CROSS_COMPILE?= arm-elf-
COMPILE_OPTS = $(INCLUDES) -I. -O2 -DSOCKLEN_T=socklen_t -DNO_SSTREAM=1 -D_LARGEFILE_SOURCE=1 -D_FILE_OFFSET_BITS=64
C = c
C_COMPILER = $(CROSS_COMPILE)gcc
C_FLAGS = $(COMPILE_OPTS)
CPP = cpp
CPLUSPLUS_COMPILER = $(CROSS_COMPILE)g++
CPLUSPLUS_FLAGS = $(COMPILE_OPTS) -Wall -DBSD=1
OBJ = o
LINK = $(CROSS_COMPILE)g++ -o
LINK_OPTS =
CONSOLE_LINK_OPTS = $(LINK_OPTS)
LIBRARY_LINK = $(CROSS_COMPILE)ar cr
LIBRARY_LINK_OPTS = $(LINK_OPTS)
LIB_SUFFIX = a
LIBS_FOR_CONSOLE_APPLICATION =
LIBS_FOR_GUI_APPLICATION =
EXE =
live/config.armeb-uclibc 000444 001751 000000 00000001274 12265042525 015462 0 ustar 00rsf wheel 000000 000000 CROSS_COMPILE= armeb-linux-uclibc-
COMPILE_OPTS = $(INCLUDES) -I. -Os -DSOCKLEN_T=socklen_t -DNO_SSTREAM=1 -D
LARGEFILE_SOURCE=1 -D_FILE_OFFSET_BITS=64
C = c
C_COMPILER = $(CROSS_COMPILE)gcc
C_FLAGS = $(COMPILE_OPTS)
CPP = cpp
CPLUSPLUS_COMPILER = $(CROSS_COMPILE)g++
CPLUSPLUS_FLAGS = $(COMPILE_OPTS) -Wall -DBSD=1
OBJ = o
LINK = $(CROSS_COMPILE)gcc -o
LINK_OPTS = -L.
CONSOLE_LINK_OPTS = $(LINK_OPTS)
LIBRARY_LINK = $(CROSS_COMPILE)ar cr
LIBRARY_LINK_OPTS =
LIB_SUFFIX = a
LIBS_FOR_CONSOLE_APPLICATION =
LIBS_FOR_GUI_APPLICATION =
EXE =
live/config.alpha 000444 001751 000000 00000000655 12265042525 014224 0 ustar 00rsf wheel 000000 000000 COMPILE_OPTS = $(INCLUDES) -I. -O -DTIME_BASE=int
C = c
C_COMPILER = cc
C_FLAGS = $(COMPILE_OPTS) -DALPHA
CPP = cpp
CPLUSPLUS_COMPILER = c++
CPLUSPLUS_FLAGS = $(COMPILE_OPTS) -Wall -DBSD=1 -DALPHA
OBJ = o
LINK = c++ -o
LINK_OPTS = -L.
CONSOLE_LINK_OPTS = $(LINK_OPTS)
LIBRARY_LINK = ld -o
LIBRARY_LINK_OPTS = $(LINK_OPTS) -r -B static
LIB_SUFFIX = a
LIBS_FOR_CONSOLE_APPLICATION =
LIBS_FOR_GUI_APPLICATION =
EXE =
live/README 000444 001751 000000 00000000147 12265042525 012624 0 ustar 00rsf wheel 000000 000000 For documentation and instructions for building this software,
see
live/Makefile.head 000444 001751 000000 00000000061 12265042525 014277 0 ustar 00rsf wheel 000000 000000 ##### Change the following for your environment:
live/Makefile.tail 000444 001751 000000 00000002553 12265042525 014337 0 ustar 00rsf wheel 000000 000000 ##### End of variables to change
LIVEMEDIA_DIR = liveMedia
GROUPSOCK_DIR = groupsock
USAGE_ENVIRONMENT_DIR = UsageEnvironment
BASIC_USAGE_ENVIRONMENT_DIR = BasicUsageEnvironment
TESTPROGS_DIR = testProgs
MEDIA_SERVER_DIR = mediaServer
PROXY_SERVER_DIR = proxyServer
all:
cd $(LIVEMEDIA_DIR) ; $(MAKE)
cd $(GROUPSOCK_DIR) ; $(MAKE)
cd $(USAGE_ENVIRONMENT_DIR) ; $(MAKE)
cd $(BASIC_USAGE_ENVIRONMENT_DIR) ; $(MAKE)
cd $(TESTPROGS_DIR) ; $(MAKE)
cd $(MEDIA_SERVER_DIR) ; $(MAKE)
cd $(PROXY_SERVER_DIR) ; $(MAKE)
install:
cd $(LIVEMEDIA_DIR) ; $(MAKE) install
cd $(GROUPSOCK_DIR) ; $(MAKE) install
cd $(USAGE_ENVIRONMENT_DIR) ; $(MAKE) install
cd $(BASIC_USAGE_ENVIRONMENT_DIR) ; $(MAKE) install
cd $(TESTPROGS_DIR) ; $(MAKE) install
cd $(MEDIA_SERVER_DIR) ; $(MAKE) install
cd $(PROXY_SERVER_DIR) ; $(MAKE) install
clean:
cd $(LIVEMEDIA_DIR) ; $(MAKE) clean
cd $(GROUPSOCK_DIR) ; $(MAKE) clean
cd $(USAGE_ENVIRONMENT_DIR) ; $(MAKE) clean
cd $(BASIC_USAGE_ENVIRONMENT_DIR) ; $(MAKE) clean
cd $(TESTPROGS_DIR) ; $(MAKE) clean
cd $(MEDIA_SERVER_DIR) ; $(MAKE) clean
cd $(PROXY_SERVER_DIR) ; $(MAKE) clean
distclean: clean
-rm -f $(LIVEMEDIA_DIR)/Makefile $(GROUPSOCK_DIR)/Makefile \
$(USAGE_ENVIRONMENT_DIR)/Makefile $(BASIC_USAGE_ENVIRONMENT_DIR)/Makefile \
$(TESTPROGS_DIR)/Makefile $(MEDIA_SERVER_DIR)/Makefile \
$(PROXY_SERVER_DIR)/Makefile Makefile
live/fix-makefile 000555 001751 000000 00000001043 12265042525 014227 0 ustar 00rsf wheel 000000 000000 #!/bin/sh
# the next line restarts using tclsh \
exec tclsh8.4 "$0" "$@"
set makefileName [lindex $argv 0]
set tmpfileName /tmp/rsftmp
set inFid [open $makefileName r]
set outFid [open $tmpfileName w]
while {![eof $inFid]} {
set line [gets $inFid]
if {[string match *\)\$* $line]} {
set pos [string first \)\$ $line]
set prefix [string range $line 0 $pos]
incr pos
set suffix [string range $line $pos end]
set line $prefix\ $suffix
}
puts $outFid $line
}
close $inFid
close $outFid
file rename -force $tmpfileName $makefileName
live/genWindowsMakefiles.cmd 000444 001751 000000 00000001531 12265042525 016374 0 ustar 00rsf wheel 000000 000000 @Echo OFF
SETLOCAL
for %%I in (%0) do %%~dI
for %%I in (%0) do cd "%%~pI"
cd liveMedia
del /Q liveMedia.mak
type Makefile.head ..\win32config Makefile.tail > liveMedia.mak
cd ../groupsock
del /Q groupsock.mak
type Makefile.head ..\win32config Makefile.tail > groupsock.mak
cd ../UsageEnvironment
del /Q UsageEnvironment.mak
type Makefile.head ..\win32config Makefile.tail > UsageEnvironment.mak
cd ../BasicUsageEnvironment
del /Q BasicUsageEnvironment.mak
type Makefile.head ..\win32config Makefile.tail > BasicUsageEnvironment.mak
cd ../testProgs
del /Q testProgs.mak
type Makefile.head ..\win32config Makefile.tail > testProgs.mak
cd ../mediaServer
del /Q mediaServer.mak
type Makefile.head ..\win32config Makefile.tail > mediaServer.mak
cd ../proxyServer
del /Q proxyServer.mak
type Makefile.head ..\win32config Makefile.tail > proxyServer.mak
ENDLOCAL
live/genWindowsMakefiles 000555 001751 000000 00000001667 12265042525 015647 0 ustar 00rsf wheel 000000 000000 #!/bin/sh
cd liveMedia
/bin/rm -f liveMedia.mak
/bin/rm -f Makefile
cat Makefile.head ../win32config Makefile.tail > liveMedia.mak
cd ../groupsock
/bin/rm -f groupsock.mak
/bin/rm -f Makefile
cat Makefile.head ../win32config Makefile.tail > groupsock.mak
cd ../UsageEnvironment
/bin/rm -f UsageEnvironment.mak
/bin/rm -f Makefile
cat Makefile.head ../win32config Makefile.tail > UsageEnvironment.mak
cd ../BasicUsageEnvironment
/bin/rm -f BasicUsageEnvironment.mak
/bin/rm -f Makefile
cat Makefile.head ../win32config Makefile.tail > BasicUsageEnvironment.mak
cd ../testProgs
/bin/rm -f testProgs.mak
/bin/rm -f Makefile
cat Makefile.head ../win32config Makefile.tail > testProgs.mak
cd ../mediaServer
/bin/rm -f mediaServer.mak
/bin/rm -f Makefile
cat Makefile.head ../win32config Makefile.tail > mediaServer.mak
cd ../proxyServer
/bin/rm -f proxyServer.mak
/bin/rm -f Makefile
cat Makefile.head ../win32config Makefile.tail > proxyServer.mak
live/win32config.Borland 000444 001751 000000 00000002616 12265042525 015402 0 ustar 00rsf wheel 000000 000000 # Comment out the following line to produce Makefiles that generate debuggable code:
NODEBUG=1
# The following definition ensures that we are properly matching
# the WinSock2 library file with the correct header files.
# (will link with "ws2_32.lib" and include "winsock2.h" & "Ws2tcpip.h")
TARGETOS = WINNT
# If for some reason you wish to use WinSock1 instead, uncomment the
# following two definitions.
# (will link with "wsock32.lib" and include "winsock.h")
#TARGETOS = WIN95
#APPVER = 4.0
#!include
UI_OPTS = $(guilflags) $(guilibsdll)
# Use the following to get a console (e.g., for debugging):
CONSOLE_UI_OPTS = $(conlflags) $(conlibsdll)
CPU=i386
TOOLS32 = C:\Progra~1\Borland\CBuilder5
COMPILE_OPTS = $(INCLUDES) $(cdebug) $(cflags) $(cvarsdll) -I. -I$(TOOLS32)\include
C = c
C_COMPILER = $(TOOLS32)\bin\bcc32
C_FLAGS = $(COMPILE_OPTS)
CPP = cpp
CPLUSPLUS_COMPILER = $(C_COMPILER)
CPLUSPLUS_FLAGS = $(COMPILE_OPTS)
OBJ = obj
LINK = $(TOOLS32)\bin\ilink32
LIBRARY_LINK = $(TOOLS32)\bin\tlib
LINK_OPTS_0 = $(linkdebug) msvcirt.lib
LIBRARY_LINK_OPTS = /u
LINK_OPTS = $(LINK_OPTS_0) $(UI_OPTS)
CONSOLE_LINK_OPTS = c0x32
SERVICE_LINK_OPTS = kernel32.lib advapi32.lib shell32.lib -subsystem:console,$(APPVER)
LIB_SUFFIX = lib
LIBS_FOR_CONSOLE_APPLICATION = cw32.lib import32.lib
LIBS_FOR_GUI_APPLICATION = ,,cw32
EXE =
rc32 = $(TOOLS32)\bin\brc32"
.rc.res:
$(rc32) $<
live/win32config 000444 001751 000000 00000002650 12265042525 014020 0 ustar 00rsf wheel 000000 000000 # Comment out the following line to produce Makefiles that generate debuggable code:
NODEBUG=1
# The following definition ensures that we are properly matching
# the WinSock2 library file with the correct header files.
# (will link with "ws2_32.lib" and include "winsock2.h" & "Ws2tcpip.h")
TARGETOS = WINNT
# If for some reason you wish to use WinSock1 instead, uncomment the
# following two definitions.
# (will link with "wsock32.lib" and include "winsock.h")
#TARGETOS = WIN95
#APPVER = 4.0
!include
UI_OPTS = $(guilflags) $(guilibsdll)
# Use the following to get a console (e.g., for debugging):
CONSOLE_UI_OPTS = $(conlflags) $(conlibsdll)
CPU=i386
TOOLS32 = c:\Program Files\DevStudio\Vc
COMPILE_OPTS = $(INCLUDES) $(cdebug) $(cflags) $(cvarsdll) -I. -I"$(TOOLS32)\include"
C = c
C_COMPILER = "$(TOOLS32)\bin\cl"
C_FLAGS = $(COMPILE_OPTS)
CPP = cpp
CPLUSPLUS_COMPILER = $(C_COMPILER)
CPLUSPLUS_FLAGS = $(COMPILE_OPTS)
OBJ = obj
LINK = $(link) -out:
LIBRARY_LINK = lib -out:
LINK_OPTS_0 = $(linkdebug) msvcirt.lib
LIBRARY_LINK_OPTS =
LINK_OPTS = $(LINK_OPTS_0) $(UI_OPTS)
CONSOLE_LINK_OPTS = $(LINK_OPTS_0) $(CONSOLE_UI_OPTS)
SERVICE_LINK_OPTS = kernel32.lib advapi32.lib shell32.lib -subsystem:console,$(APPVER)
LIB_SUFFIX = lib
LIBS_FOR_CONSOLE_APPLICATION =
LIBS_FOR_GUI_APPLICATION =
MULTIMEDIA_LIBS = winmm.lib
EXE = .exe
PLATFORM = Windows
rc32 = "$(TOOLS32)\bin\rc"
.rc.res:
$(rc32) $<
live/liveMedia/include/ 000755 001751 000000 00000000000 12265042432 015263 5 ustar 00rsf wheel 000000 000000 live/liveMedia/RTPSource.cpp 000444 001751 000000 00000031324 12265042432 016173 0 ustar 00rsf wheel 000000 000000 /**********
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the
Free Software Foundation; either version 2.1 of the License, or (at your
option) any later version. (See .)
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
more details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
**********/
// "liveMedia"
// Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved.
// RTP Sources
// Implementation
#include "RTPSource.hh"
#include "GroupsockHelper.hh"
////////// RTPSource //////////
Boolean RTPSource::lookupByName(UsageEnvironment& env,
char const* sourceName,
RTPSource*& resultSource) {
resultSource = NULL; // unless we succeed
MediaSource* source;
if (!MediaSource::lookupByName(env, sourceName, source)) return False;
if (!source->isRTPSource()) {
env.setResultMsg(sourceName, " is not a RTP source");
return False;
}
resultSource = (RTPSource*)source;
return True;
}
Boolean RTPSource::hasBeenSynchronizedUsingRTCP() {
return fCurPacketHasBeenSynchronizedUsingRTCP;
}
Boolean RTPSource::isRTPSource() const {
return True;
}
RTPSource::RTPSource(UsageEnvironment& env, Groupsock* RTPgs,
unsigned char rtpPayloadFormat,
u_int32_t rtpTimestampFrequency)
: FramedSource(env),
fRTPInterface(this, RTPgs),
fCurPacketHasBeenSynchronizedUsingRTCP(False), fLastReceivedSSRC(0),
fRTPPayloadFormat(rtpPayloadFormat), fTimestampFrequency(rtpTimestampFrequency),
fSSRC(our_random32()), fEnableRTCPReports(True) {
fReceptionStatsDB = new RTPReceptionStatsDB();
}
RTPSource::~RTPSource() {
delete fReceptionStatsDB;
}
void RTPSource::getAttributes() const {
envir().setResultMsg(""); // Fix later to get attributes from header #####
}
////////// RTPReceptionStatsDB //////////
RTPReceptionStatsDB::RTPReceptionStatsDB()
: fTable(HashTable::create(ONE_WORD_HASH_KEYS)), fTotNumPacketsReceived(0) {
reset();
}
void RTPReceptionStatsDB::reset() {
fNumActiveSourcesSinceLastReset = 0;
Iterator iter(*this);
RTPReceptionStats* stats;
while ((stats = iter.next()) != NULL) {
stats->reset();
}
}
RTPReceptionStatsDB::~RTPReceptionStatsDB() {
// First, remove and delete all stats records from the table:
RTPReceptionStats* stats;
while ((stats = (RTPReceptionStats*)fTable->RemoveNext()) != NULL) {
delete stats;
}
// Then, delete the table itself:
delete fTable;
}
void RTPReceptionStatsDB
::noteIncomingPacket(u_int32_t SSRC, u_int16_t seqNum,
u_int32_t rtpTimestamp, unsigned timestampFrequency,
Boolean useForJitterCalculation,
struct timeval& resultPresentationTime,
Boolean& resultHasBeenSyncedUsingRTCP,
unsigned packetSize) {
++fTotNumPacketsReceived;
RTPReceptionStats* stats = lookup(SSRC);
if (stats == NULL) {
// This is the first time we've heard from this SSRC.
// Create a new record for it:
stats = new RTPReceptionStats(SSRC, seqNum);
if (stats == NULL) return;
add(SSRC, stats);
}
if (stats->numPacketsReceivedSinceLastReset() == 0) {
++fNumActiveSourcesSinceLastReset;
}
stats->noteIncomingPacket(seqNum, rtpTimestamp, timestampFrequency,
useForJitterCalculation,
resultPresentationTime,
resultHasBeenSyncedUsingRTCP, packetSize);
}
void RTPReceptionStatsDB
::noteIncomingSR(u_int32_t SSRC,
u_int32_t ntpTimestampMSW, u_int32_t ntpTimestampLSW,
u_int32_t rtpTimestamp) {
RTPReceptionStats* stats = lookup(SSRC);
if (stats == NULL) {
// This is the first time we've heard of this SSRC.
// Create a new record for it:
stats = new RTPReceptionStats(SSRC);
if (stats == NULL) return;
add(SSRC, stats);
}
stats->noteIncomingSR(ntpTimestampMSW, ntpTimestampLSW, rtpTimestamp);
}
void RTPReceptionStatsDB::removeRecord(u_int32_t SSRC) {
RTPReceptionStats* stats = lookup(SSRC);
if (stats != NULL) {
long SSRC_long = (long)SSRC;
fTable->Remove((char const*)SSRC_long);
delete stats;
}
}
RTPReceptionStatsDB::Iterator
::Iterator(RTPReceptionStatsDB& receptionStatsDB)
: fIter(HashTable::Iterator::create(*(receptionStatsDB.fTable))) {
}
RTPReceptionStatsDB::Iterator::~Iterator() {
delete fIter;
}
RTPReceptionStats*
RTPReceptionStatsDB::Iterator::next(Boolean includeInactiveSources) {
char const* key; // dummy
// If asked, skip over any sources that haven't been active
// since the last reset:
RTPReceptionStats* stats;
do {
stats = (RTPReceptionStats*)(fIter->next(key));
} while (stats != NULL && !includeInactiveSources
&& stats->numPacketsReceivedSinceLastReset() == 0);
return stats;
}
RTPReceptionStats* RTPReceptionStatsDB::lookup(u_int32_t SSRC) const {
long SSRC_long = (long)SSRC;
return (RTPReceptionStats*)(fTable->Lookup((char const*)SSRC_long));
}
void RTPReceptionStatsDB::add(u_int32_t SSRC, RTPReceptionStats* stats) {
long SSRC_long = (long)SSRC;
fTable->Add((char const*)SSRC_long, stats);
}
////////// RTPReceptionStats //////////
RTPReceptionStats::RTPReceptionStats(u_int32_t SSRC, u_int16_t initialSeqNum) {
initSeqNum(initialSeqNum);
init(SSRC);
}
RTPReceptionStats::RTPReceptionStats(u_int32_t SSRC) {
init(SSRC);
}
RTPReceptionStats::~RTPReceptionStats() {
}
void RTPReceptionStats::init(u_int32_t SSRC) {
fSSRC = SSRC;
fTotNumPacketsReceived = 0;
fTotBytesReceived_hi = fTotBytesReceived_lo = 0;
fBaseExtSeqNumReceived = 0;
fHighestExtSeqNumReceived = 0;
fHaveSeenInitialSequenceNumber = False;
fLastTransit = ~0;
fPreviousPacketRTPTimestamp = 0;
fJitter = 0.0;
fLastReceivedSR_NTPmsw = fLastReceivedSR_NTPlsw = 0;
fLastReceivedSR_time.tv_sec = fLastReceivedSR_time.tv_usec = 0;
fLastPacketReceptionTime.tv_sec = fLastPacketReceptionTime.tv_usec = 0;
fMinInterPacketGapUS = 0x7FFFFFFF;
fMaxInterPacketGapUS = 0;
fTotalInterPacketGaps.tv_sec = fTotalInterPacketGaps.tv_usec = 0;
fHasBeenSynchronized = False;
fSyncTime.tv_sec = fSyncTime.tv_usec = 0;
reset();
}
void RTPReceptionStats::initSeqNum(u_int16_t initialSeqNum) {
fBaseExtSeqNumReceived = 0x10000 | initialSeqNum;
fHighestExtSeqNumReceived = 0x10000 | initialSeqNum;
fHaveSeenInitialSequenceNumber = True;
}
#ifndef MILLION
#define MILLION 1000000
#endif
void RTPReceptionStats
::noteIncomingPacket(u_int16_t seqNum, u_int32_t rtpTimestamp,
unsigned timestampFrequency,
Boolean useForJitterCalculation,
struct timeval& resultPresentationTime,
Boolean& resultHasBeenSyncedUsingRTCP,
unsigned packetSize) {
if (!fHaveSeenInitialSequenceNumber) initSeqNum(seqNum);
++fNumPacketsReceivedSinceLastReset;
++fTotNumPacketsReceived;
u_int32_t prevTotBytesReceived_lo = fTotBytesReceived_lo;
fTotBytesReceived_lo += packetSize;
if (fTotBytesReceived_lo < prevTotBytesReceived_lo) { // wrap-around
++fTotBytesReceived_hi;
}
// Check whether the new sequence number is the highest yet seen:
unsigned oldSeqNum = (fHighestExtSeqNumReceived&0xFFFF);
unsigned seqNumCycle = (fHighestExtSeqNumReceived&0xFFFF0000);
unsigned seqNumDifference = (unsigned)((int)seqNum-(int)oldSeqNum);
unsigned newSeqNum = 0;
if (seqNumLT((u_int16_t)oldSeqNum, seqNum)) {
// This packet was not an old packet received out of order, so check it:
if (seqNumDifference >= 0x8000) {
// The sequence number wrapped around, so start a new cycle:
seqNumCycle += 0x10000;
}
newSeqNum = seqNumCycle|seqNum;
if (newSeqNum > fHighestExtSeqNumReceived) {
fHighestExtSeqNumReceived = newSeqNum;
}
} else if (fTotNumPacketsReceived > 1) {
// This packet was an old packet received out of order
if ((int)seqNumDifference >= 0x8000) {
// The sequence number wrapped around, so switch to an old cycle:
seqNumCycle -= 0x10000;
}
newSeqNum = seqNumCycle|seqNum;
if (newSeqNum < fBaseExtSeqNumReceived) {
fBaseExtSeqNumReceived = newSeqNum;
}
}
// Record the inter-packet delay
struct timeval timeNow;
gettimeofday(&timeNow, NULL);
if (fLastPacketReceptionTime.tv_sec != 0
|| fLastPacketReceptionTime.tv_usec != 0) {
unsigned gap
= (timeNow.tv_sec - fLastPacketReceptionTime.tv_sec)*MILLION
+ timeNow.tv_usec - fLastPacketReceptionTime.tv_usec;
if (gap > fMaxInterPacketGapUS) {
fMaxInterPacketGapUS = gap;
}
if (gap < fMinInterPacketGapUS) {
fMinInterPacketGapUS = gap;
}
fTotalInterPacketGaps.tv_usec += gap;
if (fTotalInterPacketGaps.tv_usec >= MILLION) {
++fTotalInterPacketGaps.tv_sec;
fTotalInterPacketGaps.tv_usec -= MILLION;
}
}
fLastPacketReceptionTime = timeNow;
// Compute the current 'jitter' using the received packet's RTP timestamp,
// and the RTP timestamp that would correspond to the current time.
// (Use the code from appendix A.8 in the RTP spec.)
// Note, however, that we don't use this packet if its timestamp is
// the same as that of the previous packet (this indicates a multi-packet
// fragment), or if we've been explicitly told not to use this packet.
if (useForJitterCalculation
&& rtpTimestamp != fPreviousPacketRTPTimestamp) {
unsigned arrival = (timestampFrequency*timeNow.tv_sec);
arrival += (unsigned)
((2.0*timestampFrequency*timeNow.tv_usec + 1000000.0)/2000000);
// note: rounding
int transit = arrival - rtpTimestamp;
if (fLastTransit == (~0)) fLastTransit = transit; // hack for first time
int d = transit - fLastTransit;
fLastTransit = transit;
if (d < 0) d = -d;
fJitter += (1.0/16.0) * ((double)d - fJitter);
}
// Return the 'presentation time' that corresponds to "rtpTimestamp":
if (fSyncTime.tv_sec == 0 && fSyncTime.tv_usec == 0) {
// This is the first timestamp that we've seen, so use the current
// 'wall clock' time as the synchronization time. (This will be
// corrected later when we receive RTCP SRs.)
fSyncTimestamp = rtpTimestamp;
fSyncTime = timeNow;
}
int timestampDiff = rtpTimestamp - fSyncTimestamp;
// Note: This works even if the timestamp wraps around
// (as long as "int" is 32 bits)
// Divide this by the timestamp frequency to get real time:
double timeDiff = timestampDiff/(double)timestampFrequency;
// Add this to the 'sync time' to get our result:
unsigned const million = 1000000;
unsigned seconds, uSeconds;
if (timeDiff >= 0.0) {
seconds = fSyncTime.tv_sec + (unsigned)(timeDiff);
uSeconds = fSyncTime.tv_usec
+ (unsigned)((timeDiff - (unsigned)timeDiff)*million);
if (uSeconds >= million) {
uSeconds -= million;
++seconds;
}
} else {
timeDiff = -timeDiff;
seconds = fSyncTime.tv_sec - (unsigned)(timeDiff);
uSeconds = fSyncTime.tv_usec
- (unsigned)((timeDiff - (unsigned)timeDiff)*million);
if ((int)uSeconds < 0) {
uSeconds += million;
--seconds;
}
}
resultPresentationTime.tv_sec = seconds;
resultPresentationTime.tv_usec = uSeconds;
resultHasBeenSyncedUsingRTCP = fHasBeenSynchronized;
// Save these as the new synchronization timestamp & time:
fSyncTimestamp = rtpTimestamp;
fSyncTime = resultPresentationTime;
fPreviousPacketRTPTimestamp = rtpTimestamp;
}
void RTPReceptionStats::noteIncomingSR(u_int32_t ntpTimestampMSW,
u_int32_t ntpTimestampLSW,
u_int32_t rtpTimestamp) {
fLastReceivedSR_NTPmsw = ntpTimestampMSW;
fLastReceivedSR_NTPlsw = ntpTimestampLSW;
gettimeofday(&fLastReceivedSR_time, NULL);
// Use this SR to update time synchronization information:
fSyncTimestamp = rtpTimestamp;
fSyncTime.tv_sec = ntpTimestampMSW - 0x83AA7E80; // 1/1/1900 -> 1/1/1970
double microseconds = (ntpTimestampLSW*15625.0)/0x04000000; // 10^6/2^32
fSyncTime.tv_usec = (unsigned)(microseconds+0.5);
fHasBeenSynchronized = True;
}
double RTPReceptionStats::totNumKBytesReceived() const {
double const hiMultiplier = 0x20000000/125.0; // == (2^32)/(10^3)
return fTotBytesReceived_hi*hiMultiplier + fTotBytesReceived_lo/1000.0;
}
unsigned RTPReceptionStats::jitter() const {
return (unsigned)fJitter;
}
void RTPReceptionStats::reset() {
fNumPacketsReceivedSinceLastReset = 0;
fLastResetExtSeqNumReceived = fHighestExtSeqNumReceived;
}
Boolean seqNumLT(u_int16_t s1, u_int16_t s2) {
// a 'less-than' on 16-bit sequence numbers
int diff = s2-s1;
if (diff > 0) {
return (diff < 0x8000);
} else if (diff < 0) {
return (diff < -0x8000);
} else { // diff == 0
return False;
}
}
live/liveMedia/SimpleRTPSource.cpp 000444 001751 000000 00000004643 12265042432 017351 0 ustar 00rsf wheel 000000 000000 /**********
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the
Free Software Foundation; either version 2.1 of the License, or (at your
option) any later version. (See .)
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
more details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
**********/
// "liveMedia"
// Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved.
// A RTP source for a simple RTP payload format that
// - doesn't have any special headers following the RTP header
// - doesn't have any special framing apart from the packet data itself
// Implementation
#include "SimpleRTPSource.hh"
#include
SimpleRTPSource*
SimpleRTPSource::createNew(UsageEnvironment& env,
Groupsock* RTPgs,
unsigned char rtpPayloadFormat,
unsigned rtpTimestampFrequency,
char const* mimeTypeString,
unsigned offset, Boolean doNormalMBitRule) {
return new SimpleRTPSource(env, RTPgs, rtpPayloadFormat,
rtpTimestampFrequency,
mimeTypeString, offset, doNormalMBitRule);
}
SimpleRTPSource
::SimpleRTPSource(UsageEnvironment& env, Groupsock* RTPgs,
unsigned char rtpPayloadFormat,
unsigned rtpTimestampFrequency,
char const* mimeTypeString,
unsigned offset, Boolean doNormalMBitRule)
: MultiFramedRTPSource(env, RTPgs,
rtpPayloadFormat, rtpTimestampFrequency),
fMIMEtypeString(strDup(mimeTypeString)), fOffset(offset) {
fUseMBitForFrameEnd = doNormalMBitRule && strncmp(mimeTypeString, "audio/", 6) != 0;
}
SimpleRTPSource::~SimpleRTPSource() {
delete[] (char*)fMIMEtypeString;
}
Boolean SimpleRTPSource
::processSpecialHeader(BufferedPacket* packet,
unsigned& resultSpecialHeaderSize) {
fCurrentPacketCompletesFrame
= !fUseMBitForFrameEnd || packet->rtpMarkerBit();
resultSpecialHeaderSize = fOffset;
return True;
}
char const* SimpleRTPSource::MIMEtype() const {
if (fMIMEtypeString == NULL) return MultiFramedRTPSource::MIMEtype();
return fMIMEtypeString;
}
live/liveMedia/MP3Transcoder.cpp 000444 001751 000000 00000003455 12265042432 016775 0 ustar 00rsf wheel 000000 000000 /**********
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the
Free Software Foundation; either version 2.1 of the License, or (at your
option) any later version. (See .)
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
more details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
**********/
// "liveMedia"
// Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved.
// MP3 Transcoder
// Implementation
#include "MP3Transcoder.hh"
MP3Transcoder::MP3Transcoder(UsageEnvironment& env,
MP3ADUTranscoder* aduTranscoder)
: MP3FromADUSource(env, aduTranscoder, False) {
}
MP3Transcoder::~MP3Transcoder() {
}
MP3Transcoder* MP3Transcoder::createNew(UsageEnvironment& env,
unsigned outBitrate /* in kbps */,
FramedSource* inputSource) {
MP3Transcoder* newSource = NULL;
do {
// Create the intermediate filters that help implement the transcoder:
ADUFromMP3Source* aduFromMP3
= ADUFromMP3Source::createNew(env, inputSource, False);
// Note: This also checks that "inputSource" is an MP3 source
if (aduFromMP3 == NULL) break;
MP3ADUTranscoder* aduTranscoder
= MP3ADUTranscoder::createNew(env, outBitrate, aduFromMP3);
if (aduTranscoder == NULL) break;
// Then create the transcoder itself:
newSource = new MP3Transcoder(env, aduTranscoder);
} while (0);
return newSource;
}
live/liveMedia/SimpleRTPSink.cpp 000444 001751 000000 00000006733 12265042432 017017 0 ustar 00rsf wheel 000000 000000 /**********
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the
Free Software Foundation; either version 2.1 of the License, or (at your
option) any later version. (See .)
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
more details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
**********/
// "liveMedia"
// Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved.
// A simple RTP sink that packs frames into each outgoing
// packet, without any fragmentation or special headers.
// Implementation
#include "SimpleRTPSink.hh"
SimpleRTPSink::SimpleRTPSink(UsageEnvironment& env, Groupsock* RTPgs,
unsigned char rtpPayloadFormat,
unsigned rtpTimestampFrequency,
char const* sdpMediaTypeString,
char const* rtpPayloadFormatName,
unsigned numChannels,
Boolean allowMultipleFramesPerPacket,
Boolean doNormalMBitRule)
: MultiFramedRTPSink(env, RTPgs, rtpPayloadFormat,
rtpTimestampFrequency, rtpPayloadFormatName,
numChannels),
fAllowMultipleFramesPerPacket(allowMultipleFramesPerPacket), fSetMBitOnNextPacket(False) {
fSDPMediaTypeString
= strDup(sdpMediaTypeString == NULL ? "unknown" : sdpMediaTypeString);
fSetMBitOnLastFrames = doNormalMBitRule && strcmp(fSDPMediaTypeString, "audio") != 0;
}
SimpleRTPSink::~SimpleRTPSink() {
delete[] (char*)fSDPMediaTypeString;
}
SimpleRTPSink*
SimpleRTPSink::createNew(UsageEnvironment& env, Groupsock* RTPgs,
unsigned char rtpPayloadFormat,
unsigned rtpTimestampFrequency,
char const* sdpMediaTypeString,
char const* rtpPayloadFormatName,
unsigned numChannels,
Boolean allowMultipleFramesPerPacket,
Boolean doNormalMBitRule) {
return new SimpleRTPSink(env, RTPgs,
rtpPayloadFormat, rtpTimestampFrequency,
sdpMediaTypeString, rtpPayloadFormatName,
numChannels,
allowMultipleFramesPerPacket,
doNormalMBitRule);
}
void SimpleRTPSink::doSpecialFrameHandling(unsigned fragmentationOffset,
unsigned char* frameStart,
unsigned numBytesInFrame,
struct timeval framePresentationTime,
unsigned numRemainingBytes) {
if (numRemainingBytes == 0) {
// This packet contains the last (or only) fragment of the frame.
// Set the RTP 'M' ('marker') bit, if appropriate:
if (fSetMBitOnLastFrames) setMarkerBit();
}
if (fSetMBitOnNextPacket) {
// An external object has asked for the 'M' bit to be set on the next packet:
setMarkerBit();
fSetMBitOnNextPacket = False;
}
// Important: Also call our base class's doSpecialFrameHandling(),
// to set the packet's timestamp:
MultiFramedRTPSink::doSpecialFrameHandling(fragmentationOffset,
frameStart, numBytesInFrame,
framePresentationTime,
numRemainingBytes);
}
Boolean SimpleRTPSink::
frameCanAppearAfterPacketStart(unsigned char const* /*frameStart*/,
unsigned /*numBytesInFrame*/) const {
return fAllowMultipleFramesPerPacket;
}
char const* SimpleRTPSink::sdpMediaType() const {
return fSDPMediaTypeString;
}
live/liveMedia/rtcp_from_spec.c 000444 001751 000000 00000024452 12265042432 017016 0 ustar 00rsf wheel 000000 000000 /* RTCP code taken directly from the most recent RTP specification:
* RFC 3550
* Implementation
*/
#include "rtcp_from_spec.h"
/*****
A.7 Computing the RTCP Transmission Interval
The following functions implement the RTCP transmission and reception
rules described in Section 6.2. These rules are coded in several
functions:
o rtcp_interval() computes the deterministic calculated
interval, measured in seconds. The parameters are defined in
Section 6.3.
o OnExpire() is called when the RTCP transmission timer expires.
o OnReceive() is called whenever an RTCP packet is received.
Both OnExpire() and OnReceive() have event e as an argument. This is
the next scheduled event for that participant, either an RTCP report
or a BYE packet. It is assumed that the following functions are
available:
o Schedule(time t, event e) schedules an event e to occur at
time t. When time t arrives, the function OnExpire is called
with e as an argument.
o Reschedule(time t, event e) reschedules a previously scheduled
event e for time t.
o SendRTCPReport(event e) sends an RTCP report.
o SendBYEPacket(event e) sends a BYE packet.
o TypeOfEvent(event e) returns EVENT_BYE if the event being
processed is for a BYE packet to be sent, else it returns
EVENT_REPORT.
o PacketType(p) returns PACKET_RTCP_REPORT if packet p is an
RTCP report (not BYE), PACKET_BYE if its a BYE RTCP packet,
and PACKET_RTP if its a regular RTP data packet.
o ReceivedPacketSize() and SentPacketSize() return the size of
the referenced packet in octets.
o NewMember(p) returns a 1 if the participant who sent packet p
is not currently in the member list, 0 otherwise. Note this
function is not sufficient for a complete implementation
because each CSRC identifier in an RTP packet and each SSRC in
a BYE packet should be processed.
o NewSender(p) returns a 1 if the participant who sent packet p
is not currently in the sender sublist of the member list, 0
otherwise.
o AddMember() and RemoveMember() to add and remove participants
from the member list.
o AddSender() and RemoveSender() to add and remove participants
from the sender sublist of the member list.
*****/
double rtcp_interval(int members,
int senders,
double rtcp_bw,
int we_sent,
double avg_rtcp_size,
int initial)
{
/*
* Minimum average time between RTCP packets from this site (in
* seconds). This time prevents the reports from `clumping' when
* sessions are small and the law of large numbers isn't helping
* to smooth out the traffic. It also keeps the report interval
* from becoming ridiculously small during transient outages like
* a network partition.
*/
double const RTCP_MIN_TIME = 5.;
/*
* Fraction of the RTCP bandwidth to be shared among active
* senders. (This fraction was chosen so that in a typical
* session with one or two active senders, the computed report
* time would be roughly equal to the minimum report time so that
* we don't unnecessarily slow down receiver reports.) The
* receiver fraction must be 1 - the sender fraction.
*/
double const RTCP_SENDER_BW_FRACTION = 0.25;
double const RTCP_RCVR_BW_FRACTION = (1-RTCP_SENDER_BW_FRACTION);
/*
* To compensate for "unconditional reconsideration" converging to a
* value below the intended average.
*/
double const COMPENSATION = 2.71828 - 1.5;
double t; /* interval */
double rtcp_min_time = RTCP_MIN_TIME;
int n; /* no. of members for computation */
/*
* Very first call at application start-up uses half the min
* delay for quicker notification while still allowing some time
* before reporting for randomization and to learn about other
* sources so the report interval will converge to the correct
* interval more quickly.
*/
if (initial) {
rtcp_min_time /= 2;
}
/*
* If there were active senders, give them at least a minimum
* share of the RTCP bandwidth. Otherwise all participants share
* the RTCP bandwidth equally.
*/
n = members;
if (senders > 0 && senders < members * RTCP_SENDER_BW_FRACTION) {
if (we_sent) {
rtcp_bw *= RTCP_SENDER_BW_FRACTION;
n = senders;
} else {
rtcp_bw *= RTCP_RCVR_BW_FRACTION;
n -= senders;
}
}
/*
* The effective number of sites times the average packet size is
* the total number of octets sent when each site sends a report.
* Dividing this by the effective bandwidth gives the time
* interval over which those packets must be sent in order to
* meet the bandwidth target, with a minimum enforced. In that
* time interval we send one report so this time is also our
* average time between reports.
*/
t = avg_rtcp_size * n / rtcp_bw;
if (t < rtcp_min_time) t = rtcp_min_time;
/*
* To avoid traffic bursts from unintended synchronization with
* other sites, we then pick our actual next report interval as a
* random number uniformly distributed between 0.5*t and 1.5*t.
*/
t = t * (drand48() + 0.5);
t = t / COMPENSATION;
return t;
}
void OnExpire(event e,
int members,
int senders,
double rtcp_bw,
int we_sent,
double *avg_rtcp_size,
int *initial,
time_tp tc,
time_tp *tp,
int *pmembers)
{
/* This function is responsible for deciding whether to send
* an RTCP report or BYE packet now, or to reschedule transmission.
* It is also responsible for updating the pmembers, initial, tp,
* and avg_rtcp_size state variables. This function should be called
* upon expiration of the event timer used by Schedule(). */
double t; /* Interval */
double tn; /* Next transmit time */
/* In the case of a BYE, we use "unconditional reconsideration" to
* reschedule the transmission of the BYE if necessary */
if (TypeOfEvent(e) == EVENT_BYE) {
t = rtcp_interval(members,
senders,
rtcp_bw,
we_sent,
*avg_rtcp_size,
*initial);
tn = *tp + t;
if (tn <= tc) {
SendBYEPacket(e);
exit(1);
} else {
Schedule(tn, e);
}
} else if (TypeOfEvent(e) == EVENT_REPORT) {
t = rtcp_interval(members,
senders,
rtcp_bw,
we_sent,
*avg_rtcp_size,
*initial);
tn = *tp + t;
if (tn <= tc) {
SendRTCPReport(e);
*avg_rtcp_size = (1./16.)*SentPacketSize(e) +
(15./16.)*(*avg_rtcp_size);
*tp = tc;
/* We must redraw the interval. Don't reuse the
one computed above, since its not actually
distributed the same, as we are conditioned
on it being small enough to cause a packet to
be sent */
t = rtcp_interval(members,
senders,
rtcp_bw,
we_sent,
*avg_rtcp_size,
*initial);
Schedule(t+tc,e);
*initial = 0;
} else {
Schedule(tn, e);
}
*pmembers = members;
}
}
void OnReceive(packet p,
event e,
int *members,
int *pmembers,
int *senders,
double *avg_rtcp_size,
double *tp,
double tc,
double tn)
{
/* What we do depends on whether we have left the group, and
* are waiting to send a BYE (TypeOfEvent(e) == EVENT_BYE) or
* an RTCP report. p represents the packet that was just received. */
if (PacketType(p) == PACKET_RTCP_REPORT) {
if (NewMember(p) && (TypeOfEvent(e) == EVENT_REPORT)) {
AddMember(p);
*members += 1;
}
*avg_rtcp_size = (1./16.)*ReceivedPacketSize(p) +
(15./16.)*(*avg_rtcp_size);
} else if (PacketType(p) == PACKET_RTP) {
if (NewMember(p) && (TypeOfEvent(e) == EVENT_REPORT)) {
AddMember(p);
*members += 1;
}
if (NewSender(p) && (TypeOfEvent(e) == EVENT_REPORT)) {
AddSender(p);
*senders += 1;
}
} else if (PacketType(p) == PACKET_BYE) {
*avg_rtcp_size = (1./16.)*ReceivedPacketSize(p) +
(15./16.)*(*avg_rtcp_size);
if (TypeOfEvent(e) == EVENT_REPORT) {
if (NewSender(p) == FALSE) {
RemoveSender(p);
*senders -= 1;
}
if (NewMember(p) == FALSE) {
RemoveMember(p);
*members -= 1;
}
if(*members < *pmembers) {
tn = tc + (((double) *members)/(*pmembers))*(tn - tc);
*tp = tc - (((double) *members)/(*pmembers))*(tc - *tp);
/* Reschedule the next report for time tn */
Reschedule(tn, e);
*pmembers = *members;
}
} else if (TypeOfEvent(e) == EVENT_BYE) {
*members += 1;
}
}
}
live/liveMedia/MP3ADU.cpp 000444 001751 000000 00000050244 12265042432 015300 0 ustar 00rsf wheel 000000 000000 /**********
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the
Free Software Foundation; either version 2.1 of the License, or (at your
option) any later version. (See .)
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
more details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
**********/
// "liveMedia"
// Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved.
// 'ADU' MP3 streams (for improved loss-tolerance)
// Implementation
#include "MP3ADU.hh"
#include "MP3ADUdescriptor.hh"
#include "MP3Internals.hh"
#include
#ifdef TEST_LOSS
#include "GroupsockHelper.hh"
#endif
// Segment data structures, used in the implementation below:
#define SegmentBufSize 2000 /* conservatively high */
class Segment {
public:
unsigned char buf[SegmentBufSize];
unsigned char* dataStart() { return &buf[descriptorSize]; }
unsigned frameSize; // if it's a non-ADU frame
unsigned dataHere(); // if it's a non-ADU frame
unsigned descriptorSize;
static unsigned const headerSize;
unsigned sideInfoSize, aduSize;
unsigned backpointer;
struct timeval presentationTime;
unsigned durationInMicroseconds;
};
unsigned const Segment::headerSize = 4;
#define SegmentQueueSize 20
class SegmentQueue {
public:
SegmentQueue(Boolean directionIsToADU, Boolean includeADUdescriptors)
: fDirectionIsToADU(directionIsToADU),
fIncludeADUdescriptors(includeADUdescriptors) {
reset();
}
Segment s[SegmentQueueSize];
unsigned headIndex() {return fHeadIndex;}
Segment& headSegment() {return s[fHeadIndex];}
unsigned nextFreeIndex() {return fNextFreeIndex;}
Segment& nextFreeSegment() {return s[fNextFreeIndex];}
Boolean isEmpty() {return isEmptyOrFull() && totalDataSize() == 0;}
Boolean isFull() {return isEmptyOrFull() && totalDataSize() > 0;}
static unsigned nextIndex(unsigned ix) {return (ix+1)%SegmentQueueSize;}
static unsigned prevIndex(unsigned ix) {return (ix+SegmentQueueSize-1)%SegmentQueueSize;}
unsigned totalDataSize() {return fTotalDataSize;}
void enqueueNewSegment(FramedSource* inputSource, FramedSource* usingSource);
Boolean dequeue();
Boolean insertDummyBeforeTail(unsigned backpointer);
void reset() { fHeadIndex = fNextFreeIndex = fTotalDataSize = 0; }
private:
static void sqAfterGettingSegment(void* clientData,
unsigned numBytesRead,
unsigned numTruncatedBytes,
struct timeval presentationTime,
unsigned durationInMicroseconds);
Boolean sqAfterGettingCommon(Segment& seg, unsigned numBytesRead);
Boolean isEmptyOrFull() {return headIndex() == nextFreeIndex();}
unsigned fHeadIndex, fNextFreeIndex, fTotalDataSize;
// The following is used for asynchronous reads:
FramedSource* fUsingSource;
// This tells us whether the direction in which we're being used
// is MP3->ADU, or vice-versa. (This flag is used for debugging output.)
Boolean fDirectionIsToADU;
// The following is true iff we're used to enqueue incoming
// ADU frames, and these have an ADU descriptor in front
Boolean fIncludeADUdescriptors;
};
////////// ADUFromMP3Source //////////
ADUFromMP3Source::ADUFromMP3Source(UsageEnvironment& env,
FramedSource* inputSource,
Boolean includeADUdescriptors)
: FramedFilter(env, inputSource),
fAreEnqueueingMP3Frame(False),
fSegments(new SegmentQueue(True /* because we're MP3->ADU */,
False /*no descriptors in incoming frames*/)),
fIncludeADUdescriptors(includeADUdescriptors),
fTotalDataSizeBeforePreviousRead(0), fScale(1), fFrameCounter(0) {
}
ADUFromMP3Source::~ADUFromMP3Source() {
delete fSegments;
}
char const* ADUFromMP3Source::MIMEtype() const {
return "audio/MPA-ROBUST";
}
ADUFromMP3Source* ADUFromMP3Source::createNew(UsageEnvironment& env,
FramedSource* inputSource,
Boolean includeADUdescriptors) {
// The source must be a MPEG audio source:
if (strcmp(inputSource->MIMEtype(), "audio/MPEG") != 0) {
env.setResultMsg(inputSource->name(), " is not an MPEG audio source");
return NULL;
}
return new ADUFromMP3Source(env, inputSource, includeADUdescriptors);
}
void ADUFromMP3Source::resetInput() {
fSegments->reset();
}
Boolean ADUFromMP3Source::setScaleFactor(int scale) {
if (scale < 1) return False;
fScale = scale;
return True;
}
void ADUFromMP3Source::doGetNextFrame() {
if (!fAreEnqueueingMP3Frame) {
// Arrange to enqueue a new MP3 frame:
fTotalDataSizeBeforePreviousRead = fSegments->totalDataSize();
fAreEnqueueingMP3Frame = True;
fSegments->enqueueNewSegment(fInputSource, this);
} else {
// Deliver an ADU from a previously-read MP3 frame:
fAreEnqueueingMP3Frame = False;
if (!doGetNextFrame1()) {
// An internal error occurred; act as if our source went away:
FramedSource::handleClosure(this);
}
}
}
Boolean ADUFromMP3Source::doGetNextFrame1() {
// First, check whether we have enough previously-read data to output an
// ADU for the last-read MP3 frame:
unsigned tailIndex;
Segment* tailSeg;
Boolean needMoreData;
if (fSegments->isEmpty()) {
needMoreData = True;
tailSeg = NULL; tailIndex = 0; // unneeded, but stops compiler warnings
} else {
tailIndex = SegmentQueue::prevIndex(fSegments->nextFreeIndex());
tailSeg = &(fSegments->s[tailIndex]);
needMoreData
= fTotalDataSizeBeforePreviousRead < tailSeg->backpointer // bp points back too far
|| tailSeg->backpointer + tailSeg->dataHere() < tailSeg->aduSize; // not enough data
}
if (needMoreData) {
// We don't have enough data to output an ADU from the last-read MP3
// frame, so need to read another one and try again:
doGetNextFrame();
return True;
}
// Output an ADU from the tail segment:
fFrameSize = tailSeg->headerSize+tailSeg->sideInfoSize+tailSeg->aduSize;
fPresentationTime = tailSeg->presentationTime;
fDurationInMicroseconds = tailSeg->durationInMicroseconds;
unsigned descriptorSize
= fIncludeADUdescriptors ? ADUdescriptor::computeSize(fFrameSize) : 0;
#ifdef DEBUG
fprintf(stderr, "m->a:outputting ADU %d<-%d, nbr:%d, sis:%d, dh:%d, (descriptor size: %d)\n", tailSeg->aduSize, tailSeg->backpointer, fFrameSize, tailSeg->sideInfoSize, tailSeg->dataHere(), descriptorSize);
#endif
if (descriptorSize + fFrameSize > fMaxSize) {
envir() << "ADUFromMP3Source::doGetNextFrame1(): not enough room ("
<< descriptorSize + fFrameSize << ">"
<< fMaxSize << ")\n";
fFrameSize = 0;
return False;
}
unsigned char* toPtr = fTo;
// output the ADU descriptor:
if (fIncludeADUdescriptors) {
fFrameSize += ADUdescriptor::generateDescriptor(toPtr, fFrameSize);
}
// output header and side info:
memmove(toPtr, tailSeg->dataStart(),
tailSeg->headerSize + tailSeg->sideInfoSize);
toPtr += tailSeg->headerSize + tailSeg->sideInfoSize;
// go back to the frame that contains the start of our data:
unsigned offset = 0;
unsigned i = tailIndex;
unsigned prevBytes = tailSeg->backpointer;
while (prevBytes > 0) {
i = SegmentQueue::prevIndex(i);
unsigned dataHere = fSegments->s[i].dataHere();
if (dataHere < prevBytes) {
prevBytes -= dataHere;
} else {
offset = dataHere - prevBytes;
break;
}
}
// dequeue any segments that we no longer need:
while (fSegments->headIndex() != i) {
fSegments->dequeue(); // we're done with it
}
unsigned bytesToUse = tailSeg->aduSize;
while (bytesToUse > 0) {
Segment& seg = fSegments->s[i];
unsigned char* fromPtr
= &seg.dataStart()[seg.headerSize + seg.sideInfoSize + offset];
unsigned dataHere = seg.dataHere() - offset;
unsigned bytesUsedHere = dataHere < bytesToUse ? dataHere : bytesToUse;
memmove(toPtr, fromPtr, bytesUsedHere);
bytesToUse -= bytesUsedHere;
toPtr += bytesUsedHere;
offset = 0;
i = SegmentQueue::nextIndex(i);
}
if (fFrameCounter++%fScale == 0) {
// Call our own 'after getting' function. Because we're not a 'leaf'
// source, we can call this directly, without risking infinite recursion.
afterGetting(this);
} else {
// Don't use this frame; get another one:
doGetNextFrame();
}
return True;
}
////////// MP3FromADUSource //////////
MP3FromADUSource::MP3FromADUSource(UsageEnvironment& env,
FramedSource* inputSource,
Boolean includeADUdescriptors)
: FramedFilter(env, inputSource),
fAreEnqueueingADU(False),
fSegments(new SegmentQueue(False /* because we're ADU->MP3 */,
includeADUdescriptors)) {
}
MP3FromADUSource::~MP3FromADUSource() {
delete fSegments;
}
char const* MP3FromADUSource::MIMEtype() const {
return "audio/MPEG";
}
MP3FromADUSource* MP3FromADUSource::createNew(UsageEnvironment& env,
FramedSource* inputSource,
Boolean includeADUdescriptors) {
// The source must be an MP3 ADU source:
if (strcmp(inputSource->MIMEtype(), "audio/MPA-ROBUST") != 0) {
env.setResultMsg(inputSource->name(), " is not an MP3 ADU source");
return NULL;
}
return new MP3FromADUSource(env, inputSource, includeADUdescriptors);
}
void MP3FromADUSource::doGetNextFrame() {
if (fAreEnqueueingADU) insertDummyADUsIfNecessary();
fAreEnqueueingADU = False;
if (needToGetAnADU()) {
// Before returning a frame, we must enqueue at least one ADU:
#ifdef TEST_LOSS
NOTE: This code no longer works, because it uses synchronous reads,
which are no longer supported.
static unsigned const framesPerPacket = 10;
static unsigned const frameCount = 0;
static Boolean packetIsLost;
while (1) {
if ((frameCount++)%framesPerPacket == 0) {
packetIsLost = (our_random()%10 == 0); // simulate 10% packet loss #####
}
if (packetIsLost) {
// Read and discard the next input frame (that would be part of
// a lost packet):
Segment dummySegment;
unsigned numBytesRead;
struct timeval presentationTime;
// (this works only if the source can be read synchronously)
fInputSource->syncGetNextFrame(dummySegment.buf,
sizeof dummySegment.buf, numBytesRead,
presentationTime);
} else {
break; // from while (1)
}
}
#endif
fAreEnqueueingADU = True;
fSegments->enqueueNewSegment(fInputSource, this);
} else {
// Return a frame now:
generateFrameFromHeadADU();
// sets fFrameSize, fPresentationTime, and fDurationInMicroseconds
// Call our own 'after getting' function. Because we're not a 'leaf'
// source, we can call this directly, without risking infinite recursion.
afterGetting(this);
}
}
Boolean MP3FromADUSource::needToGetAnADU() {
// Check whether we need to first enqueue a new ADU before we
// can generate a frame for our head ADU.
Boolean needToEnqueue = True;
if (!fSegments->isEmpty()) {
unsigned index = fSegments->headIndex();
Segment* seg = &(fSegments->headSegment());
int const endOfHeadFrame = (int) seg->dataHere();
unsigned frameOffset = 0;
while (1) {
int endOfData = frameOffset - seg->backpointer + seg->aduSize;
if (endOfData >= endOfHeadFrame) {
// We already have enough data to generate a frame
needToEnqueue = False;
break;
}
frameOffset += seg->dataHere();
index = SegmentQueue::nextIndex(index);
if (index == fSegments->nextFreeIndex()) break;
seg = &(fSegments->s[index]);
}
}
return needToEnqueue;
}
void MP3FromADUSource::insertDummyADUsIfNecessary() {
if (fSegments->isEmpty()) return; // shouldn't happen
// The tail segment (ADU) is assumed to have been recently
// enqueued. If its backpointer would overlap the data
// of the previous ADU, then we need to insert one or more
// empty, 'dummy' ADUs ahead of it. (This situation should occur
// only if an intermediate ADU was lost.)
unsigned tailIndex
= SegmentQueue::prevIndex(fSegments->nextFreeIndex());
Segment* tailSeg = &(fSegments->s[tailIndex]);
while (1) {
unsigned prevADUend; // relative to the start of the new ADU
if (fSegments->headIndex() != tailIndex) {
// there is a previous segment
unsigned prevIndex = SegmentQueue::prevIndex(tailIndex);
Segment& prevSegment = fSegments->s[prevIndex];
prevADUend = prevSegment.dataHere() + prevSegment.backpointer;
if (prevSegment.aduSize > prevADUend) {
// shouldn't happen if the previous ADU was well-formed
prevADUend = 0;
} else {
prevADUend -= prevSegment.aduSize;
}
} else {
prevADUend = 0;
}
if (tailSeg->backpointer > prevADUend) {
// We need to insert a dummy ADU in front of the tail
#ifdef DEBUG
fprintf(stderr, "a->m:need to insert a dummy ADU (%d, %d, %d) [%d, %d]\n", tailSeg->backpointer, prevADUend, tailSeg->dataHere(), fSegments->headIndex(), fSegments->nextFreeIndex());
#endif
tailIndex = fSegments->nextFreeIndex();
if (!fSegments->insertDummyBeforeTail(prevADUend)) return;
tailSeg = &(fSegments->s[tailIndex]);
} else {
break; // no more dummy ADUs need to be inserted
}
}
}
Boolean MP3FromADUSource::generateFrameFromHeadADU() {
// Output a frame for the head ADU:
if (fSegments->isEmpty()) return False;
unsigned index = fSegments->headIndex();
Segment* seg = &(fSegments->headSegment());
#ifdef DEBUG
fprintf(stderr, "a->m:outputting frame for %d<-%d (fs %d, dh %d), (descriptorSize: %d)\n", seg->aduSize, seg->backpointer, seg->frameSize, seg->dataHere(), seg->descriptorSize);
#endif
unsigned char* toPtr = fTo;
// output header and side info:
fFrameSize = seg->frameSize;
fPresentationTime = seg->presentationTime;
fDurationInMicroseconds = seg->durationInMicroseconds;
memmove(toPtr, seg->dataStart(), seg->headerSize + seg->sideInfoSize);
toPtr += seg->headerSize + seg->sideInfoSize;
// zero out the rest of the frame, in case ADU data doesn't fill it all in
unsigned bytesToZero = seg->dataHere();
for (unsigned i = 0; i < bytesToZero; ++i) {
toPtr[i] = '\0';
}
// Fill in the frame with appropriate ADU data from this and
// subsequent ADUs:
unsigned frameOffset = 0;
unsigned toOffset = 0;
unsigned const endOfHeadFrame = seg->dataHere();
while (toOffset < endOfHeadFrame) {
int startOfData = frameOffset - seg->backpointer;
if (startOfData > (int)endOfHeadFrame) break; // no more ADUs needed
int endOfData = startOfData + seg->aduSize;
if (endOfData > (int)endOfHeadFrame) {
endOfData = endOfHeadFrame;
}
unsigned fromOffset;
if (startOfData <= (int)toOffset) {
fromOffset = toOffset - startOfData;
startOfData = toOffset;
if (endOfData < startOfData) endOfData = startOfData;
} else {
fromOffset = 0;
// we may need some padding bytes beforehand
unsigned bytesToZero = startOfData - toOffset;
#ifdef DEBUG
if (bytesToZero > 0) fprintf(stderr, "a->m:outputting %d zero bytes (%d, %d, %d, %d)\n", bytesToZero, startOfData, toOffset, frameOffset, seg->backpointer);
#endif
toOffset += bytesToZero;
}
unsigned char* fromPtr
= &seg->dataStart()[seg->headerSize + seg->sideInfoSize + fromOffset];
unsigned bytesUsedHere = endOfData - startOfData;
#ifdef DEBUG
if (bytesUsedHere > 0) fprintf(stderr, "a->m:outputting %d bytes from %d<-%d\n", bytesUsedHere, seg->aduSize, seg->backpointer);
#endif
memmove(toPtr + toOffset, fromPtr, bytesUsedHere);
toOffset += bytesUsedHere;
frameOffset += seg->dataHere();
index = SegmentQueue::nextIndex(index);
if (index == fSegments->nextFreeIndex()) break;
seg = &(fSegments->s[index]);
}
fSegments->dequeue();
return True;
}
////////// Segment //////////
unsigned Segment::dataHere() {
int result = frameSize - (headerSize + sideInfoSize);
if (result < 0) {
return 0;
}
return (unsigned)result;
}
////////// SegmentQueue //////////
void SegmentQueue::enqueueNewSegment(FramedSource* inputSource,
FramedSource* usingSource) {
if (isFull()) {
usingSource->envir() << "SegmentQueue::enqueueNewSegment() overflow\n";
FramedSource::handleClosure(usingSource);
return;
}
fUsingSource = usingSource;
Segment& seg = nextFreeSegment();
inputSource->getNextFrame(seg.buf, sizeof seg.buf,
sqAfterGettingSegment, this,
FramedSource::handleClosure, usingSource);
}
void SegmentQueue::sqAfterGettingSegment(void* clientData,
unsigned numBytesRead,
unsigned /*numTruncatedBytes*/,
struct timeval presentationTime,
unsigned durationInMicroseconds) {
SegmentQueue* segQueue = (SegmentQueue*)clientData;
Segment& seg = segQueue->nextFreeSegment();
seg.presentationTime = presentationTime;
seg.durationInMicroseconds = durationInMicroseconds;
if (segQueue->sqAfterGettingCommon(seg, numBytesRead)) {
#ifdef DEBUG
char const* direction = segQueue->fDirectionIsToADU ? "m->a" : "a->m";
fprintf(stderr, "%s:read frame %d<-%d, fs:%d, sis:%d, dh:%d, (descriptor size: %d)\n", direction, seg.aduSize, seg.backpointer, seg.frameSize, seg.sideInfoSize, seg.dataHere(), seg.descriptorSize);
#endif
}
// Continue our original calling source where it left off:
segQueue->fUsingSource->doGetNextFrame();
}
// Common code called after a new segment is enqueued
Boolean SegmentQueue::sqAfterGettingCommon(Segment& seg,
unsigned numBytesRead) {
unsigned char* fromPtr = seg.buf;
if (fIncludeADUdescriptors) {
// The newly-read data is assumed to be an ADU with a descriptor
// in front
(void)ADUdescriptor::getRemainingFrameSize(fromPtr);
seg.descriptorSize = (unsigned)(fromPtr-seg.buf);
} else {
seg.descriptorSize = 0;
}
// parse the MP3-specific info in the frame to get the ADU params
unsigned hdr;
MP3SideInfo sideInfo;
if (!GetADUInfoFromMP3Frame(fromPtr, numBytesRead,
hdr, seg.frameSize,
sideInfo, seg.sideInfoSize,
seg.backpointer, seg.aduSize)) {
return False;
}
// If we've just read an ADU (rather than a regular MP3 frame), then use the
// entire "numBytesRead" data for the 'aduSize', so that we include any
// 'ancillary data' that may be present at the end of the ADU:
if (!fDirectionIsToADU) {
unsigned newADUSize
= numBytesRead - seg.descriptorSize - 4/*header size*/ - seg.sideInfoSize;
if (newADUSize > seg.aduSize) seg.aduSize = newADUSize;
}
fTotalDataSize += seg.dataHere();
fNextFreeIndex = nextIndex(fNextFreeIndex);
return True;
}
Boolean SegmentQueue::dequeue() {
if (isEmpty()) {
fUsingSource->envir() << "SegmentQueue::dequeue(): underflow!\n";
return False;
}
Segment& seg = s[headIndex()];
fTotalDataSize -= seg.dataHere();
fHeadIndex = nextIndex(fHeadIndex);
return True;
}
Boolean SegmentQueue::insertDummyBeforeTail(unsigned backpointer) {
if (isEmptyOrFull()) return False;
// Copy the current tail segment to its new position, then modify the
// old tail segment to be a 'dummy' ADU
unsigned newTailIndex = nextFreeIndex();
Segment& newTailSeg = s[newTailIndex];
unsigned oldTailIndex = prevIndex(newTailIndex);
Segment& oldTailSeg = s[oldTailIndex];
newTailSeg = oldTailSeg; // structure copy
// Begin by setting (replacing) the ADU descriptor of the dummy ADU:
unsigned char* ptr = oldTailSeg.buf;
if (fIncludeADUdescriptors) {
unsigned remainingFrameSize
= oldTailSeg.headerSize + oldTailSeg.sideInfoSize + 0 /* 0-size ADU */;
unsigned currentDescriptorSize = oldTailSeg.descriptorSize;
if (currentDescriptorSize == 2) {
ADUdescriptor::generateTwoByteDescriptor(ptr, remainingFrameSize);
} else {
(void)ADUdescriptor::generateDescriptor(ptr, remainingFrameSize);
}
}
// Then zero out the side info of the dummy frame:
if (!ZeroOutMP3SideInfo(ptr, oldTailSeg.frameSize,
backpointer)) return False;
unsigned dummyNumBytesRead
= oldTailSeg.descriptorSize + 4/*header size*/ + oldTailSeg.sideInfoSize;
return sqAfterGettingCommon(oldTailSeg, dummyNumBytesRead);
}
live/liveMedia/rtcp_from_spec.h 000444 001751 000000 00000004352 12265042432 017020 0 ustar 00rsf wheel 000000 000000 /* RTCP code taken directly from the most recent RTP specification:
* draft-ietf-avt-rtp-new-11.txt
* C header
*/
#ifndef _RTCP_FROM_SPEC_H
#define _RTCP_FROM_SPEC_H
#include
/* Definitions of _ANSI_ARGS and EXTERN that will work in either
C or C++ code:
*/
#undef _ANSI_ARGS_
#if ((defined(__STDC__) || defined(SABER)) && !defined(NO_PROTOTYPE)) || defined(__cplusplus) || defined(USE_PROTOTYPE)
# define _ANSI_ARGS_(x) x
#else
# define _ANSI_ARGS_(x) ()
#endif
#ifdef __cplusplus
# define EXTERN extern "C"
#else
# define EXTERN extern
#endif
/* The code from the spec assumes a type "event"; make this a void*: */
typedef void* event;
#define EVENT_UNKNOWN 0
#define EVENT_REPORT 1
#define EVENT_BYE 2
/* The code from the spec assumes a type "time_tp"; make this a double: */
typedef double time_tp;
/* The code from the spec assumes a type "packet"; make this a void*: */
typedef void* packet;
#define PACKET_UNKNOWN_TYPE 0
#define PACKET_RTP 1
#define PACKET_RTCP_REPORT 2
#define PACKET_BYE 3
/* The code from the spec calls drand48(), but we have drand30() instead */
#define drand48 drand30
/* The code calls "exit()", but we don't want to exit, so make it a noop: */
#define exit(n) do {} while (0)
#ifndef FALSE
#define FALSE 0
#endif
#ifndef TRUE
#define TRUE 1
#endif
/* EXPORTS: */
EXTERN void OnExpire _ANSI_ARGS_((event, int, int, double, int, double*, int*, time_tp, time_tp*, int*));
EXTERN void OnReceive _ANSI_ARGS_((packet, event, int*, int*, int*, double*, double*, double, double));
/* IMPORTS: */
EXTERN void Schedule _ANSI_ARGS_((double,event));
EXTERN void Reschedule _ANSI_ARGS_((double,event));
EXTERN void SendRTCPReport _ANSI_ARGS_((event));
EXTERN void SendBYEPacket _ANSI_ARGS_((event));
EXTERN int TypeOfEvent _ANSI_ARGS_((event));
EXTERN int SentPacketSize _ANSI_ARGS_((event));
EXTERN int PacketType _ANSI_ARGS_((packet));
EXTERN int ReceivedPacketSize _ANSI_ARGS_((packet));
EXTERN int NewMember _ANSI_ARGS_((packet));
EXTERN int NewSender _ANSI_ARGS_((packet));
EXTERN void AddMember _ANSI_ARGS_((packet));
EXTERN void AddSender _ANSI_ARGS_((packet));
EXTERN void RemoveMember _ANSI_ARGS_((packet));
EXTERN void RemoveSender _ANSI_ARGS_((packet));
EXTERN double drand30 _ANSI_ARGS_((void));
#endif
live/liveMedia/MatroskaFileParser.cpp 000444 001751 000000 00000137531 12265042432 020112 0 ustar 00rsf wheel 000000 000000 /**********
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the
Free Software Foundation; either version 2.1 of the License, or (at your
option) any later version. (See .)
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
more details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
**********/
// "liveMedia"
// Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved.
// A parser for a Matroska file.
// Implementation
#include "MatroskaFileParser.hh"
#include "MatroskaDemuxedTrack.hh"
#include
#include // for "gettimeofday()
MatroskaFileParser::MatroskaFileParser(MatroskaFile& ourFile, FramedSource* inputSource,
FramedSource::onCloseFunc* onEndFunc, void* onEndClientData,
MatroskaDemux* ourDemux)
: StreamParser(inputSource, onEndFunc, onEndClientData, continueParsing, this),
fOurFile(ourFile), fInputSource(inputSource),
fOnEndFunc(onEndFunc), fOnEndClientData(onEndClientData),
fOurDemux(ourDemux),
fCurOffsetInFile(0), fSavedCurOffsetInFile(0), fLimitOffsetInFile(0),
fNumHeaderBytesToSkip(0), fClusterTimecode(0), fBlockTimecode(0),
fFrameSizesWithinBlock(NULL),
fPresentationTimeOffset(0.0) {
if (ourDemux == NULL) {
// Initialization
fCurrentParseState = PARSING_START_OF_FILE;
continueParsing();
} else {
fCurrentParseState = LOOKING_FOR_CLUSTER;
// In this case, parsing (of track data) doesn't start until a client starts reading from a track.
}
}
MatroskaFileParser::~MatroskaFileParser() {
delete[] fFrameSizesWithinBlock;
Medium::close(fInputSource);
}
void MatroskaFileParser::seekToTime(double& seekNPT) {
#ifdef DEBUG
fprintf(stderr, "seekToTime(%f)\n", seekNPT);
#endif
if (seekNPT <= 0.0) {
#ifdef DEBUG
fprintf(stderr, "\t=> start of file\n");
#endif
seekNPT = 0.0;
seekToFilePosition(0);
} else if (seekNPT >= fOurFile.fileDuration()) {
#ifdef DEBUG
fprintf(stderr, "\t=> end of file\n");
#endif
seekNPT = fOurFile.fileDuration();
seekToEndOfFile();
} else {
u_int64_t clusterOffsetInFile;
unsigned blockNumWithinCluster;
if (!fOurFile.lookupCuePoint(seekNPT, clusterOffsetInFile, blockNumWithinCluster)) {
#ifdef DEBUG
fprintf(stderr, "\t=> not supported\n");
#endif
return; // seeking not supported
}
#ifdef DEBUG
fprintf(stderr, "\t=> seek time %f, file position %llu, block number within cluster %d\n", seekNPT, clusterOffsetInFile, blockNumWithinCluster);
#endif
seekToFilePosition(clusterOffsetInFile);
fCurrentParseState = LOOKING_FOR_BLOCK;
// LATER handle "blockNumWithinCluster"; for now, we assume that it's 0 #####
}
}
void MatroskaFileParser
::continueParsing(void* clientData, unsigned char* /*ptr*/, unsigned /*size*/, struct timeval /*presentationTime*/) {
((MatroskaFileParser*)clientData)->continueParsing();
}
void MatroskaFileParser::continueParsing() {
if (fInputSource != NULL) {
if (fInputSource->isCurrentlyAwaitingData()) return; // Our input source is currently being read. Wait until that read completes
if (!parse()) {
// We didn't complete the parsing, because we had to read more data from the source, or because we're waiting for
// another read from downstream. Once that happens, we'll get called again.
return;
}
}
// We successfully parsed the file's 'Track' headers. Call our 'done' function now:
if (fOnEndFunc != NULL) (*fOnEndFunc)(fOnEndClientData);
}
Boolean MatroskaFileParser::parse() {
Boolean areDone = False;
try {
skipRemainingHeaderBytes(True); // if any
do {
switch (fCurrentParseState) {
case PARSING_START_OF_FILE: {
areDone = parseStartOfFile();
break;
}
case LOOKING_FOR_TRACKS: {
lookForNextTrack();
break;
}
case PARSING_TRACK: {
areDone = parseTrack();
if (areDone && fOurFile.fCuesOffset > 0) {
// We've finished parsing the 'Track' information. There are also 'Cues' in the file, so parse those before finishing:
// Seek to the specified position in the file. We were already told that the 'Cues' begins there:
#ifdef DEBUG
fprintf(stderr, "Seeking to file position %llu (the previously-reported location of 'Cues')\n", fOurFile.fCuesOffset);
#endif
seekToFilePosition(fOurFile.fCuesOffset);
fCurrentParseState = PARSING_CUES;
areDone = False;
}
break;
}
case PARSING_CUES: {
areDone = parseCues();
break;
}
case LOOKING_FOR_CLUSTER: {
if (fOurFile.fClusterOffset > 0) {
// Optimization: Seek to the specified position in the file. We were already told that the 'Cluster' begins there:
#ifdef DEBUG
fprintf(stderr, "Optimization: Seeking to file position %llu (the previously-reported location of a 'Cluster')\n", fOurFile.fClusterOffset);
#endif
seekToFilePosition(fOurFile.fClusterOffset);
}
fCurrentParseState = LOOKING_FOR_BLOCK;
break;
}
case LOOKING_FOR_BLOCK: {
lookForNextBlock();
break;
}
case PARSING_BLOCK: {
parseBlock();
break;
}
case DELIVERING_FRAME_WITHIN_BLOCK: {
if (!deliverFrameWithinBlock()) return False;
break;
}
case DELIVERING_FRAME_BYTES: {
deliverFrameBytes();
return False; // Halt parsing for now. A new 'read' from downstream will cause parsing to resume.
break;
}
}
} while (!areDone);
return True;
} catch (int /*e*/) {
#ifdef DEBUG
fprintf(stderr, "MatroskaFileParser::parse() EXCEPTION (This is normal behavior - *not* an error)\n");
#endif
return False; // the parsing got interrupted
}
}
Boolean MatroskaFileParser::parseStartOfFile() {
#ifdef DEBUG
fprintf(stderr, "parsing start of file\n");
#endif
EBMLId id;
EBMLDataSize size;
// The file must begin with the standard EBML header (which we skip):
if (!parseEBMLIdAndSize(id, size) || id != MATROSKA_ID_EBML) {
fOurFile.envir() << "ERROR: File does not begin with an EBML header\n";
return True; // We're done with the file, because it's not valid
}
#ifdef DEBUG
fprintf(stderr, "MatroskaFileParser::parseStartOfFile(): Parsed id 0x%s (%s), size: %lld\n", id.hexString(), id.stringName(), size.val());
#endif
fCurrentParseState = LOOKING_FOR_TRACKS;
skipHeader(size);
return False; // because we have more parsing to do - inside the 'Track' header
}
void MatroskaFileParser::lookForNextTrack() {
#ifdef DEBUG
fprintf(stderr, "looking for Track\n");
#endif
EBMLId id;
EBMLDataSize size;
// Read and skip over (or enter) each Matroska header, until we get to a 'Track'.
while (fCurrentParseState == LOOKING_FOR_TRACKS) {
while (!parseEBMLIdAndSize(id, size)) {}
#ifdef DEBUG
fprintf(stderr, "MatroskaFileParser::lookForNextTrack(): Parsed id 0x%s (%s), size: %lld\n", id.hexString(), id.stringName(), size.val());
#endif
switch (id.val()) {
case MATROSKA_ID_SEGMENT: { // 'Segment' header: enter this
// Remember the position, within the file, of the start of Segment data, because Seek Positions are relative to this:
fOurFile.fSegmentDataOffset = fCurOffsetInFile;
break;
}
case MATROSKA_ID_SEEK_HEAD: { // 'Seek Head' header: enter this
break;
}
case MATROSKA_ID_SEEK: { // 'Seek' header: enter this
break;
}
case MATROSKA_ID_SEEK_ID: { // 'Seek ID' header: get this value
if (parseEBMLNumber(fLastSeekId)) {
#ifdef DEBUG
fprintf(stderr, "\tSeek ID 0x%s:\t%s\n", fLastSeekId.hexString(), fLastSeekId.stringName());
#endif
}
break;
}
case MATROSKA_ID_SEEK_POSITION: { // 'Seek Position' header: get this value
u_int64_t seekPosition;
if (parseEBMLVal_unsigned64(size, seekPosition)) {
u_int64_t offsetInFile = fOurFile.fSegmentDataOffset + seekPosition;
#ifdef DEBUG
fprintf(stderr, "\tSeek Position %llu (=> offset within the file: %llu (0x%llx))\n", seekPosition, offsetInFile, offsetInFile);
#endif
// The only 'Seek Position's that we care about are for 'Cluster' and 'Cues':
if (fLastSeekId == MATROSKA_ID_CLUSTER) {
fOurFile.fClusterOffset = offsetInFile;
} else if (fLastSeekId == MATROSKA_ID_CUES) {
fOurFile.fCuesOffset = offsetInFile;
}
}
break;
}
case MATROSKA_ID_INFO: { // 'Segment Info' header: enter this
break;
}
case MATROSKA_ID_TIMECODE_SCALE: { // 'Timecode Scale' header: get this value
unsigned timecodeScale;
if (parseEBMLVal_unsigned(size, timecodeScale) && timecodeScale > 0) {
fOurFile.fTimecodeScale = timecodeScale;
#ifdef DEBUG
fprintf(stderr, "\tTimecode Scale %u ns (=> Segment Duration == %f seconds)\n",
fOurFile.timecodeScale(), fOurFile.segmentDuration()*(fOurFile.fTimecodeScale/1000000000.0f));
#endif
}
break;
}
case MATROSKA_ID_DURATION: { // 'Segment Duration' header: get this value
if (parseEBMLVal_float(size, fOurFile.fSegmentDuration)) {
#ifdef DEBUG
fprintf(stderr, "\tSegment Duration %f (== %f seconds)\n",
fOurFile.segmentDuration(), fOurFile.segmentDuration()*(fOurFile.fTimecodeScale/1000000000.0f));
#endif
}
break;
}
#ifdef DEBUG
case MATROSKA_ID_TITLE: { // 'Segment Title': display this value
char* title;
if (parseEBMLVal_string(size, title)) {
#ifdef DEBUG
fprintf(stderr, "\tTitle: %s\n", title);
#endif
delete[] title;
}
break;
}
#endif
case MATROSKA_ID_TRACKS: { // enter this, and move on to parsing 'Tracks'
fLimitOffsetInFile = fCurOffsetInFile + size.val(); // Make sure we don't read past the end of this header
fCurrentParseState = PARSING_TRACK;
break;
}
default: { // skip over this header
skipHeader(size);
break;
}
}
setParseState();
}
}
Boolean MatroskaFileParser::parseTrack() {
#ifdef DEBUG
fprintf(stderr, "parsing Track\n");
#endif
// Read and process each Matroska header, until we get to the end of the Track:
MatroskaTrack* track = NULL;
EBMLId id;
EBMLDataSize size;
while (fCurOffsetInFile < fLimitOffsetInFile) {
while (!parseEBMLIdAndSize(id, size)) {}
#ifdef DEBUG
if (id == MATROSKA_ID_TRACK_ENTRY) fprintf(stderr, "\n"); // makes debugging output easier to read
fprintf(stderr, "MatroskaFileParser::parseTrack(): Parsed id 0x%s (%s), size: %lld\n", id.hexString(), id.stringName(), size.val());
#endif
switch (id.val()) {
case MATROSKA_ID_TRACK_ENTRY: { // 'Track Entry' header: enter this
// Create a new "MatroskaTrack" object for this entry:
if (track != NULL && track->trackNumber == 0) delete track; // We had a previous "MatroskaTrack" object that was never used
track = new MatroskaTrack;
break;
}
case MATROSKA_ID_TRACK_NUMBER: {
unsigned trackNumber;
if (parseEBMLVal_unsigned(size, trackNumber)) {
#ifdef DEBUG
fprintf(stderr, "\tTrack Number %d\n", trackNumber);
#endif
if (track != NULL && trackNumber != 0) {
track->trackNumber = trackNumber;
fOurFile.addTrack(track, trackNumber);
}
}
break;
}
case MATROSKA_ID_TRACK_TYPE: {
unsigned trackType;
if (parseEBMLVal_unsigned(size, trackType) && track != NULL) {
// We convert the Matroska 'track type' code into our own code (which we can use as a bitmap):
track->trackType
= trackType == 1 ? MATROSKA_TRACK_TYPE_VIDEO : trackType == 2 ? MATROSKA_TRACK_TYPE_AUDIO
: trackType == 0x11 ? MATROSKA_TRACK_TYPE_SUBTITLE : MATROSKA_TRACK_TYPE_OTHER;
#ifdef DEBUG
fprintf(stderr, "\tTrack Type 0x%02x (%s)\n", trackType,
track->trackType == MATROSKA_TRACK_TYPE_VIDEO ? "video" :
track->trackType == MATROSKA_TRACK_TYPE_AUDIO ? "audio" :
track->trackType == MATROSKA_TRACK_TYPE_SUBTITLE ? "subtitle" :
"");
#endif
}
break;
}
case MATROSKA_ID_FLAG_ENABLED: {
unsigned flagEnabled;
if (parseEBMLVal_unsigned(size, flagEnabled)) {
#ifdef DEBUG
fprintf(stderr, "\tTrack is Enabled: %d\n", flagEnabled);
#endif
if (track != NULL) track->isEnabled = flagEnabled != 0;
}
break;
}
case MATROSKA_ID_FLAG_DEFAULT: {
unsigned flagDefault;
if (parseEBMLVal_unsigned(size, flagDefault)) {
#ifdef DEBUG
fprintf(stderr, "\tTrack is Default: %d\n", flagDefault);
#endif
if (track != NULL) track->isDefault = flagDefault != 0;
}
break;
}
case MATROSKA_ID_FLAG_FORCED: {
unsigned flagForced;
if (parseEBMLVal_unsigned(size, flagForced)) {
#ifdef DEBUG
fprintf(stderr, "\tTrack is Forced: %d\n", flagForced);
#endif
if (track != NULL) track->isForced = flagForced != 0;
}
break;
}
case MATROSKA_ID_DEFAULT_DURATION: {
unsigned defaultDuration;
if (parseEBMLVal_unsigned(size, defaultDuration)) {
#ifdef DEBUG
fprintf(stderr, "\tDefault duration %f ms\n", defaultDuration/1000000.0);
#endif
if (track != NULL) track->defaultDuration = defaultDuration;
}
break;
}
case MATROSKA_ID_MAX_BLOCK_ADDITION_ID: {
unsigned maxBlockAdditionID;
if (parseEBMLVal_unsigned(size, maxBlockAdditionID)) {
#ifdef DEBUG
fprintf(stderr, "\tMax Block Addition ID: %u\n", maxBlockAdditionID);
#endif
}
break;
}
case MATROSKA_ID_NAME: {
char* name;
if (parseEBMLVal_string(size, name)) {
#ifdef DEBUG
fprintf(stderr, "\tName: %s\n", name);
#endif
if (track != NULL) {
delete[] track->name; track->name = name;
} else {
delete[] name;
}
}
break;
}
case MATROSKA_ID_LANGUAGE: {
char* language;
if (parseEBMLVal_string(size, language)) {
#ifdef DEBUG
fprintf(stderr, "\tLanguage: %s\n", language);
#endif
if (track != NULL) {
delete[] track->language; track->language = language;
} else {
delete[] language;
}
}
break;
}
case MATROSKA_ID_CODEC: {
char* codecID;
if (parseEBMLVal_string(size, codecID)) {
#ifdef DEBUG
fprintf(stderr, "\tCodec ID: %s\n", codecID);
#endif
if (track != NULL) {
delete[] track->codecID; track->codecID = codecID;
} else {
delete[] codecID;
}
}
break;
}
case MATROSKA_ID_CODEC_PRIVATE: {
u_int8_t* codecPrivate;
unsigned codecPrivateSize;
if (parseEBMLVal_binary(size, codecPrivate)) {
codecPrivateSize = (unsigned)size.val();
#ifdef DEBUG
fprintf(stderr, "\tCodec Private: ");
for (unsigned i = 0; i < codecPrivateSize; ++i) fprintf(stderr, "%02x:", codecPrivate[i]);
fprintf(stderr, "\n");
#endif
if (track != NULL) {
delete[] track->codecPrivate; track->codecPrivate = codecPrivate;
track->codecPrivateSize = codecPrivateSize;
// Hack for H.264 and H.265: The 'codec private' data contains
// the size of NAL unit lengths:
if (track->codecID != NULL) {
if (strcmp(track->codecID, "V_MPEG4/ISO/AVC") == 0) { // H.264
// Byte 4 of the 'codec private' data contains 'lengthSizeMinusOne':
if (codecPrivateSize >= 5) track->subframeSizeSize = (codecPrivate[4])&0x3 + 1;
} else if (strcmp(track->codecID, "V_MPEGH/ISO/HEVC") == 0) { // H.265
// H.265 'codec private' data is *supposed* to use the format that's described in
// http://lists.matroska.org/pipermail/matroska-devel/2013-September/004567.html
// However, some Matroska files use the same format that was used for H.264.
// We check for this here, by checking various fields that are supposed to be
// 'all-1' in the 'correct' format:
if (codecPrivateSize < 23 || (codecPrivate[13]&0xF0) != 0xF0 ||
(codecPrivate[15]&0xFC) != 0xFC || (codecPrivate[16]&0xFC) != 0xFC ||
(codecPrivate[17]&0xF8) != 0xF8 || (codecPrivate[18]&0xF8) != 0xF8) {
// The 'correct' format isn't being used, so assume the H.264 format instead:
track->codecPrivateUsesH264FormatForH265 = True;
// Byte 4 of the 'codec private' data contains 'lengthSizeMinusOne':
if (codecPrivateSize >= 5) track->subframeSizeSize = (codecPrivate[4])&0x3 + 1;
} else {
// This looks like the 'correct' format:
track->codecPrivateUsesH264FormatForH265 = False;
// Byte 21 of the 'codec private' data contains 'lengthSizeMinusOne':
track->subframeSizeSize = (codecPrivate[21])&0x3 + 1;
}
}
}
} else {
delete[] codecPrivate;
}
}
break;
}
case MATROSKA_ID_VIDEO: { // 'Video settings' header: enter this
break;
}
case MATROSKA_ID_PIXEL_WIDTH: {
unsigned pixelWidth;
if (parseEBMLVal_unsigned(size, pixelWidth)) {
#ifdef DEBUG
fprintf(stderr, "\tPixel Width %d\n", pixelWidth);
#endif
}
break;
}
case MATROSKA_ID_PIXEL_HEIGHT: {
unsigned pixelHeight;
if (parseEBMLVal_unsigned(size, pixelHeight)) {
#ifdef DEBUG
fprintf(stderr, "\tPixel Height %d\n", pixelHeight);
#endif
}
break;
}
case MATROSKA_ID_DISPLAY_WIDTH: {
unsigned displayWidth;
if (parseEBMLVal_unsigned(size, displayWidth)) {
#ifdef DEBUG
fprintf(stderr, "\tDisplay Width %d\n", displayWidth);
#endif
}
break;
}
case MATROSKA_ID_DISPLAY_HEIGHT: {
unsigned displayHeight;
if (parseEBMLVal_unsigned(size, displayHeight)) {
#ifdef DEBUG
fprintf(stderr, "\tDisplay Height %d\n", displayHeight);
#endif
}
break;
}
case MATROSKA_ID_DISPLAY_UNIT: {
unsigned displayUnit;
if (parseEBMLVal_unsigned(size, displayUnit)) {
#ifdef DEBUG
fprintf(stderr, "\tDisplay Unit %d\n", displayUnit);
#endif
}
break;
}
case MATROSKA_ID_AUDIO: { // 'Audio settings' header: enter this
break;
}
case MATROSKA_ID_SAMPLING_FREQUENCY: {
float samplingFrequency;
if (parseEBMLVal_float(size, samplingFrequency)) {
if (track != NULL) {
track->samplingFrequency = (unsigned)samplingFrequency;
#ifdef DEBUG
fprintf(stderr, "\tSampling frequency %f (->%d)\n", samplingFrequency, track->samplingFrequency);
#endif
}
}
break;
}
case MATROSKA_ID_OUTPUT_SAMPLING_FREQUENCY: {
float outputSamplingFrequency;
if (parseEBMLVal_float(size, outputSamplingFrequency)) {
#ifdef DEBUG
fprintf(stderr, "\tOutput sampling frequency %f\n", outputSamplingFrequency);
#endif
}
break;
}
case MATROSKA_ID_CHANNELS: {
unsigned numChannels;
if (parseEBMLVal_unsigned(size, numChannels)) {
#ifdef DEBUG
fprintf(stderr, "\tChannels %d\n", numChannels);
#endif
if (track != NULL) track->numChannels = numChannels;
}
break;
}
case MATROSKA_ID_BIT_DEPTH: {
unsigned bitDepth;
if (parseEBMLVal_unsigned(size, bitDepth)) {
#ifdef DEBUG
fprintf(stderr, "\tBit Depth %d\n", bitDepth);
#endif
}
break;
}
case MATROSKA_ID_CONTENT_ENCODINGS:
case MATROSKA_ID_CONTENT_ENCODING: { // 'Content Encodings' or 'Content Encoding' header: enter this
break;
}
case MATROSKA_ID_CONTENT_COMPRESSION: { // 'Content Compression' header: enter this
// Note: We currently support only 'Header Stripping' compression, not 'zlib' compression (the default algorithm).
// Therefore, we disable this track, unless/until we later see that 'Header Stripping' is supported:
if (track != NULL) track->isEnabled = False;
break;
}
case MATROSKA_ID_CONTENT_COMP_ALGO: {
unsigned contentCompAlgo;
if (parseEBMLVal_unsigned(size, contentCompAlgo)) {
#ifdef DEBUG
fprintf(stderr, "\tContent Compression Algorithm %d (%s)\n", contentCompAlgo,
contentCompAlgo == 0 ? "zlib" : contentCompAlgo == 3 ? "Header Stripping" : "");
#endif
// The only compression algorithm that we support is #3: Header Stripping; disable the track otherwise
if (track != NULL) track->isEnabled = contentCompAlgo == 3;
}
break;
}
case MATROSKA_ID_CONTENT_COMP_SETTINGS: {
u_int8_t* headerStrippedBytes;
unsigned headerStrippedBytesSize;
if (parseEBMLVal_binary(size, headerStrippedBytes)) {
headerStrippedBytesSize = (unsigned)size.val();
#ifdef DEBUG
fprintf(stderr, "\tHeader Stripped Bytes: ");
for (unsigned i = 0; i < headerStrippedBytesSize; ++i) fprintf(stderr, "%02x:", headerStrippedBytes[i]);
fprintf(stderr, "\n");
#endif
if (track != NULL) {
delete[] track->headerStrippedBytes; track->headerStrippedBytes = headerStrippedBytes;
track->headerStrippedBytesSize = headerStrippedBytesSize;
} else {
delete[] headerStrippedBytes;
}
}
break;
}
case MATROSKA_ID_CONTENT_ENCRYPTION: { // 'Content Encrpytion' header: skip this
// Note: We don't currently support encryption at all. Therefore, we disable this track:
if (track != NULL) track->isEnabled = False;
// Fall through to...
}
default: { // We don't process this header, so just skip over it:
skipHeader(size);
break;
}
}
setParseState();
}
fLimitOffsetInFile = 0; // reset
if (track != NULL && track->trackNumber == 0) delete track; // We had a previous "MatroskaTrack" object that was never used
return True; // we're done parsing track entries
}
void MatroskaFileParser::lookForNextBlock() {
#ifdef DEBUG
fprintf(stderr, "looking for Block\n");
#endif
// Read and skip over each Matroska header, until we get to a 'Cluster':
EBMLId id;
EBMLDataSize size;
while (fCurrentParseState == LOOKING_FOR_BLOCK) {
while (!parseEBMLIdAndSize(id, size)) {}
#ifdef DEBUG
fprintf(stderr, "MatroskaFileParser::lookForNextBlock(): Parsed id 0x%s (%s), size: %lld\n", id.hexString(), id.stringName(), size.val());
#endif
switch (id.val()) {
case MATROSKA_ID_SEGMENT: { // 'Segment' header: enter this
break;
}
case MATROSKA_ID_CLUSTER: { // 'Cluster' header: enter this
break;
}
case MATROSKA_ID_TIMECODE: { // 'Timecode' header: get this value
unsigned timecode;
if (parseEBMLVal_unsigned(size, timecode)) {
fClusterTimecode = timecode;
#ifdef DEBUG
fprintf(stderr, "\tCluster timecode: %d (== %f seconds)\n", fClusterTimecode, fClusterTimecode*(fOurFile.fTimecodeScale/1000000000.0));
#endif
}
break;
}
case MATROSKA_ID_BLOCK_GROUP: { // 'Block Group' header: enter this
break;
}
case MATROSKA_ID_SIMPLEBLOCK:
case MATROSKA_ID_BLOCK: { // 'SimpleBlock' or 'Block' header: enter this (and we're done)
fBlockSize = (unsigned)size.val();
fCurrentParseState = PARSING_BLOCK;
break;
}
case MATROSKA_ID_BLOCK_DURATION: { // 'Block Duration' header: get this value (but we currently don't do anything with it)
unsigned blockDuration;
if (parseEBMLVal_unsigned(size, blockDuration)) {
#ifdef DEBUG
fprintf(stderr, "\tblock duration: %d (== %f ms)\n", blockDuration, (float)(blockDuration*fOurFile.fTimecodeScale/1000000.0));
#endif
}
break;
}
// Attachments are parsed only if we're in DEBUG mode (otherwise we just skip over them):
#ifdef DEBUG
case MATROSKA_ID_ATTACHMENTS: { // 'Attachments': enter this
break;
}
case MATROSKA_ID_ATTACHED_FILE: { // 'Attached File': enter this
break;
}
case MATROSKA_ID_FILE_DESCRIPTION: { // 'File Description': get this value
char* fileDescription;
if (parseEBMLVal_string(size, fileDescription)) {
#ifdef DEBUG
fprintf(stderr, "\tFile Description: %s\n", fileDescription);
#endif
delete[] fileDescription;
}
break;
}
case MATROSKA_ID_FILE_NAME: { // 'File Name': get this value
char* fileName;
if (parseEBMLVal_string(size, fileName)) {
#ifdef DEBUG
fprintf(stderr, "\tFile Name: %s\n", fileName);
#endif
delete[] fileName;
}
break;
}
case MATROSKA_ID_FILE_MIME_TYPE: { // 'File MIME Type': get this value
char* fileMIMEType;
if (parseEBMLVal_string(size, fileMIMEType)) {
#ifdef DEBUG
fprintf(stderr, "\tFile MIME Type: %s\n", fileMIMEType);
#endif
delete[] fileMIMEType;
}
break;
}
case MATROSKA_ID_FILE_UID: { // 'File UID': get this value
unsigned fileUID;
if (parseEBMLVal_unsigned(size, fileUID)) {
#ifdef DEBUG
fprintf(stderr, "\tFile UID: 0x%x\n", fileUID);
#endif
}
break;
}
#endif
default: { // skip over this header
skipHeader(size);
break;
}
}
setParseState();
}
}
Boolean MatroskaFileParser::parseCues() {
#if defined(DEBUG) || defined(DEBUG_CUES)
fprintf(stderr, "parsing Cues\n");
#endif
EBMLId id;
EBMLDataSize size;
// Read the next header, which should be MATROSKA_ID_CUES:
if (!parseEBMLIdAndSize(id, size) || id != MATROSKA_ID_CUES) return True; // The header wasn't what we expected, so we're done
fLimitOffsetInFile = fCurOffsetInFile + size.val(); // Make sure we don't read past the end of this header
double currentCueTime = 0.0;
u_int64_t currentClusterOffsetInFile = 0;
while (fCurOffsetInFile < fLimitOffsetInFile) {
while (!parseEBMLIdAndSize(id, size)) {}
#ifdef DEBUG_CUES
if (id == MATROSKA_ID_CUE_POINT) fprintf(stderr, "\n"); // makes debugging output easier to read
fprintf(stderr, "MatroskaFileParser::parseCues(): Parsed id 0x%s (%s), size: %lld\n", id.hexString(), id.stringName(), size.val());
#endif
switch (id.val()) {
case MATROSKA_ID_CUE_POINT: { // 'Cue Point' header: enter this
break;
}
case MATROSKA_ID_CUE_TIME: { // 'Cue Time' header: get this value
unsigned cueTime;
if (parseEBMLVal_unsigned(size, cueTime)) {
currentCueTime = cueTime*(fOurFile.fTimecodeScale/1000000000.0);
#ifdef DEBUG_CUES
fprintf(stderr, "\tCue Time %d (== %f seconds)\n", cueTime, currentCueTime);
#endif
}
break;
}
case MATROSKA_ID_CUE_TRACK_POSITIONS: { // 'Cue Track Positions' header: enter this
break;
}
case MATROSKA_ID_CUE_TRACK: { // 'Cue Track' header: get this value (but only for debugging; we don't do anything with it)
unsigned cueTrack;
if (parseEBMLVal_unsigned(size, cueTrack)) {
#ifdef DEBUG_CUES
fprintf(stderr, "\tCue Track %d\n", cueTrack);
#endif
}
break;
}
case MATROSKA_ID_CUE_CLUSTER_POSITION: { // 'Cue Cluster Position' header: get this value
u_int64_t cueClusterPosition;
if (parseEBMLVal_unsigned64(size, cueClusterPosition)) {
currentClusterOffsetInFile = fOurFile.fSegmentDataOffset + cueClusterPosition;
#ifdef DEBUG_CUES
fprintf(stderr, "\tCue Cluster Position %llu (=> offset within the file: %llu (0x%llx))\n", cueClusterPosition, currentClusterOffsetInFile, currentClusterOffsetInFile);
#endif
// Record this cue point:
fOurFile.addCuePoint(currentCueTime, currentClusterOffsetInFile, 1/*default block number within cluster*/);
}
break;
}
case MATROSKA_ID_CUE_BLOCK_NUMBER: { // 'Cue Block Number' header: get this value
unsigned cueBlockNumber;
if (parseEBMLVal_unsigned(size, cueBlockNumber) && cueBlockNumber != 0) {
#ifdef DEBUG_CUES
fprintf(stderr, "\tCue Block Number %d\n", cueBlockNumber);
#endif
// Record this cue point (overwriting any existing entry for this cue time):
fOurFile.addCuePoint(currentCueTime, currentClusterOffsetInFile, cueBlockNumber);
}
break;
}
default: { // We don't process this header, so just skip over it:
skipHeader(size);
break;
}
}
setParseState();
}
fLimitOffsetInFile = 0; // reset
#if defined(DEBUG) || defined(DEBUG_CUES)
fprintf(stderr, "done parsing Cues\n");
#endif
#ifdef DEBUG_CUES
fprintf(stderr, "Cue Point tree: ");
fOurFile.printCuePoints(stderr);
fprintf(stderr, "\n");
#endif
return True; // we're done parsing Cues
}
typedef enum { NoLacing, XiphLacing, FixedSizeLacing, EBMLLacing } MatroskaLacingType;
void MatroskaFileParser::parseBlock() {
#ifdef DEBUG
fprintf(stderr, "parsing SimpleBlock or Block\n");
#endif
do {
unsigned blockStartPos = curOffset();
// The block begins with the track number:
EBMLNumber trackNumber;
if (!parseEBMLNumber(trackNumber)) break;
fBlockTrackNumber = (unsigned)trackNumber.val();
// If this track is not being read, then skip the rest of this block, and look for another one:
if (fOurDemux->lookupDemuxedTrack(fBlockTrackNumber) == NULL) {
unsigned headerBytesSeen = curOffset() - blockStartPos;
if (headerBytesSeen < fBlockSize) {
skipBytes(fBlockSize - headerBytesSeen);
}
#ifdef DEBUG
fprintf(stderr, "\tSkipped block for unused track number %d\n", fBlockTrackNumber);
#endif
fCurrentParseState = LOOKING_FOR_BLOCK;
setParseState();
return;
}
MatroskaTrack* track = fOurFile.lookup(fBlockTrackNumber);
if (track == NULL) break; // shouldn't happen
// The next two bytes are the block's timecode (relative to the cluster timecode)
fBlockTimecode = (get1Byte()<<8)|get1Byte();
// The next byte indicates the type of 'lacing' used:
u_int8_t c = get1Byte();
c &= 0x6; // we're interested in bits 5-6 only
MatroskaLacingType lacingType = (c==0x0)?NoLacing : (c==0x02)?XiphLacing : (c==0x04)?FixedSizeLacing : EBMLLacing;
#ifdef DEBUG
fprintf(stderr, "\ttrack number %d, timecode %d (=> %f seconds), %s lacing\n", fBlockTrackNumber, fBlockTimecode, (fClusterTimecode+fBlockTimecode)*(fOurFile.fTimecodeScale/1000000000.0), (lacingType==NoLacing)?"no" : (lacingType==XiphLacing)?"Xiph" : (lacingType==FixedSizeLacing)?"fixed-size" : "EBML");
#endif
if (lacingType == NoLacing) {
fNumFramesInBlock = 1;
} else {
// The next byte tells us how many frames are present in this block
fNumFramesInBlock = get1Byte() + 1;
}
delete[] fFrameSizesWithinBlock; fFrameSizesWithinBlock = new unsigned[fNumFramesInBlock];
if (fFrameSizesWithinBlock == NULL) break;
if (lacingType == NoLacing) {
unsigned headerBytesSeen = curOffset() - blockStartPos;
if (headerBytesSeen > fBlockSize) break;
fFrameSizesWithinBlock[0] = fBlockSize - headerBytesSeen;
} else if (lacingType == FixedSizeLacing) {
unsigned headerBytesSeen = curOffset() - blockStartPos;
if (headerBytesSeen > fBlockSize) break;
unsigned frameBytesAvailable = fBlockSize - headerBytesSeen;
unsigned constantFrameSize = frameBytesAvailable/fNumFramesInBlock;
for (unsigned i = 0; i < fNumFramesInBlock; ++i) {
fFrameSizesWithinBlock[i] = constantFrameSize;
}
// If there are any bytes left over, assign them to the last frame:
fFrameSizesWithinBlock[fNumFramesInBlock-1] += frameBytesAvailable%fNumFramesInBlock;
} else { // EBML or Xiph lacing
unsigned curFrameSize = 0;
unsigned frameSizesTotal = 0;
unsigned i;
for (i = 0; i < fNumFramesInBlock-1; ++i) {
if (lacingType == EBMLLacing) {
EBMLNumber frameSize;
if (!parseEBMLNumber(frameSize)) break;
unsigned fsv = (unsigned)frameSize.val();
if (i == 0) {
curFrameSize = fsv;
} else {
// The value we read is a signed value, that's added to the previous frame size, to get the current frame size:
unsigned toSubtract = (fsv>0xFFFFFF)?0x07FFFFFF : (fsv>0xFFFF)?0x0FFFFF : (fsv>0xFF)?0x1FFF : 0x3F;
int fsv_signed = fsv - toSubtract;
curFrameSize += fsv_signed;
if ((int)curFrameSize < 0) break;
}
} else { // Xiph lacing
curFrameSize = 0;
u_int8_t c;
do {
c = get1Byte();
curFrameSize += c;
} while (c == 0xFF);
}
fFrameSizesWithinBlock[i] = curFrameSize;
frameSizesTotal += curFrameSize;
}
if (i != fNumFramesInBlock-1) break; // an error occurred within the "for" loop
// Compute the size of the final frame within the block (from the block's size, and the frame sizes already computed):)
unsigned headerBytesSeen = curOffset() - blockStartPos;
if (headerBytesSeen + frameSizesTotal > fBlockSize) break;
fFrameSizesWithinBlock[i] = fBlockSize - (headerBytesSeen + frameSizesTotal);
}
// We're done parsing headers within the block, and (as a result) we now know the sizes of all frames within the block.
// If we have 'stripped bytes' that are common to (the front of) all frames, then count them now:
if (track->headerStrippedBytesSize != 0) {
for (unsigned i = 0; i < fNumFramesInBlock; ++i) fFrameSizesWithinBlock[i] += track->headerStrippedBytesSize;
}
#ifdef DEBUG
fprintf(stderr, "\tThis block contains %d frame(s); size(s):", fNumFramesInBlock);
unsigned frameSizesTotal = 0;
for (unsigned i = 0; i < fNumFramesInBlock; ++i) {
fprintf(stderr, " %d", fFrameSizesWithinBlock[i]);
frameSizesTotal += fFrameSizesWithinBlock[i];
}
if (fNumFramesInBlock > 1) fprintf(stderr, " (total: %u)", frameSizesTotal);
fprintf(stderr, " bytes\n");
#endif
// Next, start delivering these frames:
fCurrentParseState = DELIVERING_FRAME_WITHIN_BLOCK;
fCurOffsetWithinFrame = fNextFrameNumberToDeliver = 0;
setParseState();
return;
} while (0);
// An error occurred. Try to recover:
#ifdef DEBUG
fprintf(stderr, "parseBlock(): Error parsing data; trying to recover...\n");
#endif
fCurrentParseState = LOOKING_FOR_BLOCK;
}
Boolean MatroskaFileParser::deliverFrameWithinBlock() {
#ifdef DEBUG
fprintf(stderr, "delivering frame within SimpleBlock or Block\n");
#endif
do {
MatroskaTrack* track = fOurFile.lookup(fBlockTrackNumber);
if (track == NULL) break; // shouldn't happen
MatroskaDemuxedTrack* demuxedTrack = fOurDemux->lookupDemuxedTrack(fBlockTrackNumber);
if (demuxedTrack == NULL) break; // shouldn't happen
if (!demuxedTrack->isCurrentlyAwaitingData()) {
// Someone has been reading this stream, but isn't right now.
// We can't deliver this frame until he asks for it, so punt for now.
// The next time he asks for a frame, he'll get it.
#ifdef DEBUG
fprintf(stderr, "\tdeferring delivery of frame #%d (%d bytes)", fNextFrameNumberToDeliver, fFrameSizesWithinBlock[fNextFrameNumberToDeliver]);
if (track->haveSubframes()) fprintf(stderr, "[offset %d]", fCurOffsetWithinFrame);
fprintf(stderr, "\n");
#endif
restoreSavedParserState(); // so we read from the beginning next time
return False;
}
unsigned frameSize = fFrameSizesWithinBlock[fNextFrameNumberToDeliver];
if (track->haveSubframes()) {
// The next "track->subframeSizeSize" bytes contain the length of a 'subframe':
if (fCurOffsetWithinFrame + track->subframeSizeSize > frameSize) break; // sanity check
unsigned subframeSize = 0;
for (unsigned i = 0; i < track->subframeSizeSize; ++i) {
u_int8_t c;
getCommonFrameBytes(track, &c, 1, 0);
if (fCurFrameNumBytesToGet > 0) { // it'll be 1
c = get1Byte();
++fCurOffsetWithinFrame;
}
subframeSize = subframeSize*256 + c;
}
if (subframeSize == 0 || fCurOffsetWithinFrame + subframeSize > frameSize) break; // sanity check
frameSize = subframeSize;
}
// Compute the presentation time of this frame (from the cluster timecode, the block timecode, and the default duration):
double pt = (fClusterTimecode+fBlockTimecode)*(fOurFile.fTimecodeScale/1000000000.0)
+ fNextFrameNumberToDeliver*(track->defaultDuration/1000000000.0);
if (fPresentationTimeOffset == 0.0) {
// This is the first time we've computed a presentation time. Compute an offset to make the presentation times aligned
// with 'wall clock' time:
struct timeval timeNow;
gettimeofday(&timeNow, NULL);
double ptNow = timeNow.tv_sec + timeNow.tv_usec/1000000.0;
fPresentationTimeOffset = ptNow - pt;
}
pt += fPresentationTimeOffset;
struct timeval presentationTime;
presentationTime.tv_sec = (unsigned)pt;
presentationTime.tv_usec = (unsigned)((pt - presentationTime.tv_sec)*1000000);
unsigned durationInMicroseconds = track->defaultDuration/1000;
if (track->haveSubframes()) {
// If this is a 'subframe', use a duration of 0 instead (unless it's the last 'subframe'):
if (fCurOffsetWithinFrame + frameSize + track->subframeSizeSize < fFrameSizesWithinBlock[fNextFrameNumberToDeliver]) {
// There's room for at least one more subframe after this, so give this subframe a duration of 0
durationInMicroseconds = 0;
}
}
if (track->defaultDuration == 0) {
// Adjust the frame duration to keep the sum of frame durations aligned with presentation times.
if (demuxedTrack->prevPresentationTime().tv_sec != 0) { // not the first time for this track
demuxedTrack->durationImbalance()
+= (presentationTime.tv_sec - demuxedTrack->prevPresentationTime().tv_sec)*1000000
+ (presentationTime.tv_usec - demuxedTrack->prevPresentationTime().tv_usec);
}
int adjustment = 0;
if (demuxedTrack->durationImbalance() > 0) {
// The duration needs to be increased.
int const adjustmentThreshold = 100000; // don't increase the duration by more than this amount (in case there's a mistake)
adjustment = demuxedTrack->durationImbalance() > adjustmentThreshold
? adjustmentThreshold : demuxedTrack->durationImbalance();
} else if (demuxedTrack->durationImbalance() < 0) {
// The duration needs to be decreased.
adjustment = (unsigned)(-demuxedTrack->durationImbalance()) < durationInMicroseconds
? demuxedTrack->durationImbalance() : -(int)durationInMicroseconds;
}
durationInMicroseconds += adjustment;
demuxedTrack->durationImbalance() -= durationInMicroseconds; // for next time
demuxedTrack->prevPresentationTime() = presentationTime; // for next time
}
demuxedTrack->presentationTime() = presentationTime;
demuxedTrack->durationInMicroseconds() = durationInMicroseconds;
// Deliver the next block now:
if (frameSize > demuxedTrack->maxSize()) {
demuxedTrack->numTruncatedBytes() = frameSize - demuxedTrack->maxSize();
demuxedTrack->frameSize() = demuxedTrack->maxSize();
} else { // normal case
demuxedTrack->numTruncatedBytes() = 0;
demuxedTrack->frameSize() = frameSize;
}
getCommonFrameBytes(track, demuxedTrack->to(), demuxedTrack->frameSize(), demuxedTrack->numTruncatedBytes());
// Next, deliver (and/or skip) bytes from the input file:
fCurrentParseState = DELIVERING_FRAME_BYTES;
setParseState();
return True;
} while (0);
// An error occurred. Try to recover:
#ifdef DEBUG
fprintf(stderr, "deliverFrameWithinBlock(): Error parsing data; trying to recover...\n");
#endif
fCurrentParseState = LOOKING_FOR_BLOCK;
return True;
}
void MatroskaFileParser::deliverFrameBytes() {
do {
MatroskaTrack* track = fOurFile.lookup(fBlockTrackNumber);
if (track == NULL) break; // shouldn't happen
MatroskaDemuxedTrack* demuxedTrack = fOurDemux->lookupDemuxedTrack(fBlockTrackNumber);
if (demuxedTrack == NULL) break; // shouldn't happen
unsigned const BANK_SIZE = bankSize();
while (fCurFrameNumBytesToGet > 0) {
// Hack: We can get no more than BANK_SIZE bytes at a time:
unsigned numBytesToGet = fCurFrameNumBytesToGet > BANK_SIZE ? BANK_SIZE : fCurFrameNumBytesToGet;
getBytes(fCurFrameTo, numBytesToGet);
fCurFrameTo += numBytesToGet;
fCurFrameNumBytesToGet -= numBytesToGet;
fCurOffsetWithinFrame += numBytesToGet;
setParseState();
}
while (fCurFrameNumBytesToSkip > 0) {
// Hack: We can skip no more than BANK_SIZE bytes at a time:
unsigned numBytesToSkip = fCurFrameNumBytesToSkip > BANK_SIZE ? BANK_SIZE : fCurFrameNumBytesToSkip;
skipBytes(numBytesToSkip);
fCurFrameNumBytesToSkip -= numBytesToSkip;
fCurOffsetWithinFrame += numBytesToSkip;
setParseState();
}
#ifdef DEBUG
fprintf(stderr, "\tdelivered frame #%d: %d bytes", fNextFrameNumberToDeliver, demuxedTrack->frameSize());
if (track->haveSubframes()) fprintf(stderr, "[offset %d]", fCurOffsetWithinFrame - track->subframeSizeSize - demuxedTrack->frameSize() - demuxedTrack->numTruncatedBytes());
if (demuxedTrack->numTruncatedBytes() > 0) fprintf(stderr, " (%d bytes truncated)", demuxedTrack->numTruncatedBytes());
fprintf(stderr, " @%u.%06u (%.06f from start); duration %u us\n", demuxedTrack->presentationTime().tv_sec, demuxedTrack->presentationTime().tv_usec, demuxedTrack->presentationTime().tv_sec+demuxedTrack->presentationTime().tv_usec/1000000.0-fPresentationTimeOffset, demuxedTrack->durationInMicroseconds());
#endif
if (!track->haveSubframes()
|| fCurOffsetWithinFrame + track->subframeSizeSize >= fFrameSizesWithinBlock[fNextFrameNumberToDeliver]) {
// Either we don't have subframes, or there's no more room for another subframe => We're completely done with this frame now:
++fNextFrameNumberToDeliver;
fCurOffsetWithinFrame = 0;
}
if (fNextFrameNumberToDeliver == fNumFramesInBlock) {
// We've delivered all of the frames from this block. Look for another block next:
fCurrentParseState = LOOKING_FOR_BLOCK;
} else {
fCurrentParseState = DELIVERING_FRAME_WITHIN_BLOCK;
}
setParseState();
FramedSource::afterGetting(demuxedTrack); // completes delivery
return;
} while (0);
// An error occurred. Try to recover:
#ifdef DEBUG
fprintf(stderr, "deliverFrameBytes(): Error parsing data; trying to recover...\n");
#endif
fCurrentParseState = LOOKING_FOR_BLOCK;
}
void MatroskaFileParser
::getCommonFrameBytes(MatroskaTrack* track, u_int8_t* to, unsigned numBytesToGet, unsigned numBytesToSkip) {
if (track->headerStrippedBytesSize > fCurOffsetWithinFrame) {
// We have some common 'header stripped' bytes that remain to be prepended to the frame. Use these first:
unsigned numRemainingHeaderStrippedBytes = track->headerStrippedBytesSize - fCurOffsetWithinFrame;
unsigned numHeaderStrippedBytesToGet;
if (numBytesToGet <= numRemainingHeaderStrippedBytes) {
numHeaderStrippedBytesToGet = numBytesToGet;
numBytesToGet = 0;
if (numBytesToGet + numBytesToSkip <= numRemainingHeaderStrippedBytes) {
numBytesToSkip = 0;
} else {
numBytesToSkip = numBytesToGet + numBytesToSkip - numRemainingHeaderStrippedBytes;
}
} else {
numHeaderStrippedBytesToGet = numRemainingHeaderStrippedBytes;
numBytesToGet = numBytesToGet - numRemainingHeaderStrippedBytes;
}
if (numHeaderStrippedBytesToGet > 0) {
memmove(to, &track->headerStrippedBytes[fCurOffsetWithinFrame], numHeaderStrippedBytesToGet);
to += numHeaderStrippedBytesToGet;
fCurOffsetWithinFrame += numHeaderStrippedBytesToGet;
}
}
fCurFrameTo = to;
fCurFrameNumBytesToGet = numBytesToGet;
fCurFrameNumBytesToSkip = numBytesToSkip;
}
Boolean MatroskaFileParser::parseEBMLNumber(EBMLNumber& num) {
unsigned i;
u_int8_t bitmask = 0x80;
for (i = 0; i < EBML_NUMBER_MAX_LEN; ++i) {
while (1) {
if (fLimitOffsetInFile > 0 && fCurOffsetInFile > fLimitOffsetInFile) return False; // We've hit our pre-set limit
num.data[i] = get1Byte();
++fCurOffsetInFile;
// If we're looking for an id, skip any leading bytes that don't contain a '1' in the first 4 bits:
if (i == 0/*we're a leading byte*/ && !num.stripLeading1/*we're looking for an id*/ && (num.data[i]&0xF0) == 0) {
setParseState(); // ensures that we make forward progress if the parsing gets interrupted
continue;
}
break;
}
if ((num.data[0]&bitmask) != 0) {
// num[i] is the last byte of the id
if (num.stripLeading1) num.data[0] &=~ bitmask;
break;
}
bitmask >>= 1;
}
if (i == EBML_NUMBER_MAX_LEN) return False;
num.len = i+1;
return True;
}
Boolean MatroskaFileParser::parseEBMLIdAndSize(EBMLId& id, EBMLDataSize& size) {
return parseEBMLNumber(id) && parseEBMLNumber(size);
}
Boolean MatroskaFileParser::parseEBMLVal_unsigned64(EBMLDataSize& size, u_int64_t& result) {
u_int64_t sv = size.val();
if (sv > 8) return False; // size too large
result = 0; // initially
for (unsigned i = (unsigned)sv; i > 0; --i) {
if (fLimitOffsetInFile > 0 && fCurOffsetInFile > fLimitOffsetInFile) return False; // We've hit our pre-set limit
u_int8_t c = get1Byte();
++fCurOffsetInFile;
result = result*256 + c;
}
return True;
}
Boolean MatroskaFileParser::parseEBMLVal_unsigned(EBMLDataSize& size, unsigned& result) {
if (size.val() > 4) return False; // size too large
u_int64_t result64;
if (!parseEBMLVal_unsigned64(size, result64)) return False;
result = (unsigned)result64;
return True;
}
Boolean MatroskaFileParser::parseEBMLVal_float(EBMLDataSize& size, float& result) {
if (size.val() == 4) {
// Normal case. Read the value as if it were a 4-byte integer, then copy it to the 'float' result:
unsigned resultAsUnsigned;
if (!parseEBMLVal_unsigned(size, resultAsUnsigned)) return False;
if (sizeof result != sizeof resultAsUnsigned) return False;
memcpy(&result, &resultAsUnsigned, sizeof result);
return True;
} else if (size.val() == 8) {
// Read the value as if it were an 8-byte integer, then copy it to a 'double', the convert that to the 'float' result:
u_int64_t resultAsUnsigned64;
if (!parseEBMLVal_unsigned64(size, resultAsUnsigned64)) return False;
double resultDouble;
if (sizeof resultDouble != sizeof resultAsUnsigned64) return False;
memcpy(&resultDouble, &resultAsUnsigned64, sizeof resultDouble);
result = (float)resultDouble;
return True;
} else {
// Unworkable size
return False;
}
}
Boolean MatroskaFileParser::parseEBMLVal_string(EBMLDataSize& size, char*& result) {
unsigned resultLength = (unsigned)size.val();
result = new char[resultLength + 1]; // allow for the trailing '\0'
if (result == NULL) return False;
char* p = result;
unsigned i;
for (i = 0; i < resultLength; ++i) {
if (fLimitOffsetInFile > 0 && fCurOffsetInFile > fLimitOffsetInFile) break; // We've hit our pre-set limit
u_int8_t c = get1Byte();
++fCurOffsetInFile;
*p++ = c;
}
if (i < resultLength) { // an error occurred
delete[] result;
result = NULL;
return False;
}
*p = '\0';
return True;
}
Boolean MatroskaFileParser::parseEBMLVal_binary(EBMLDataSize& size, u_int8_t*& result) {
unsigned resultLength = (unsigned)size.val();
result = new u_int8_t[resultLength];
if (result == NULL) return False;
u_int8_t* p = result;
unsigned i;
for (i = 0; i < resultLength; ++i) {
if (fLimitOffsetInFile > 0 && fCurOffsetInFile > fLimitOffsetInFile) break; // We've hit our pre-set limit
u_int8_t c = get1Byte();
++fCurOffsetInFile;
*p++ = c;
}
if (i < resultLength) { // an error occurred
delete[] result;
result = NULL;
return False;
}
return True;
}
void MatroskaFileParser::skipHeader(EBMLDataSize const& size) {
u_int64_t sv = (unsigned)size.val();
#ifdef DEBUG
fprintf(stderr, "\tskipping %llu bytes\n", sv);
#endif
fNumHeaderBytesToSkip = sv;
skipRemainingHeaderBytes(False);
}
void MatroskaFileParser::skipRemainingHeaderBytes(Boolean isContinuation) {
if (fNumHeaderBytesToSkip == 0) return; // common case
// Hack: To avoid tripping into a parser 'internal error' if we try to skip an excessively large
// distance, break up the skipping into manageable chunks, to ensure forward progress:
unsigned const maxBytesToSkip = bankSize();
while (fNumHeaderBytesToSkip > 0) {
unsigned numBytesToSkipNow
= fNumHeaderBytesToSkip < maxBytesToSkip ? (unsigned)fNumHeaderBytesToSkip : maxBytesToSkip;
setParseState();
skipBytes(numBytesToSkipNow);
#ifdef DEBUG
if (isContinuation || numBytesToSkipNow < fNumHeaderBytesToSkip) {
fprintf(stderr, "\t\t(skipped %u bytes; %llu bytes remaining)\n",
numBytesToSkipNow, fNumHeaderBytesToSkip - numBytesToSkipNow);
}
#endif
fCurOffsetInFile += numBytesToSkipNow;
fNumHeaderBytesToSkip -= numBytesToSkipNow;
}
}
void MatroskaFileParser::setParseState() {
fSavedCurOffsetInFile = fCurOffsetInFile;
fSavedCurOffsetWithinFrame = fCurOffsetWithinFrame;
saveParserState();
}
void MatroskaFileParser::restoreSavedParserState() {
StreamParser::restoreSavedParserState();
fCurOffsetInFile = fSavedCurOffsetInFile;
fCurOffsetWithinFrame = fSavedCurOffsetWithinFrame;
}
void MatroskaFileParser::seekToFilePosition(u_int64_t offsetInFile) {
ByteStreamFileSource* fileSource = (ByteStreamFileSource*)fInputSource; // we know it's a "ByteStreamFileSource"
if (fileSource != NULL) {
fileSource->seekToByteAbsolute(offsetInFile);
resetStateAfterSeeking();
}
}
void MatroskaFileParser::seekToEndOfFile() {
ByteStreamFileSource* fileSource = (ByteStreamFileSource*)fInputSource; // we know it's a "ByteStreamFileSource"
if (fileSource != NULL) {
fileSource->seekToEnd();
resetStateAfterSeeking();
}
}
void MatroskaFileParser::resetStateAfterSeeking() {
// Because we're resuming parsing after seeking to a new position in the file, reset the parser state:
fCurOffsetInFile = fSavedCurOffsetInFile = 0;
fCurOffsetWithinFrame = fSavedCurOffsetWithinFrame = 0;
flushInput();
}
live/liveMedia/MatroskaFile.cpp 000444 001751 000000 00000047343 12265042432 016736 0 ustar 00rsf wheel 000000 000000 /**********
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the
Free Software Foundation; either version 2.1 of the License, or (at your
option) any later version. (See .)
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
more details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
**********/
// "liveMedia"
// Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved.
// A class that encapsulates a Matroska file.
// Implementation
#include "MatroskaFileParser.hh"
#include "MatroskaDemuxedTrack.hh"
#include
////////// CuePoint definition //////////
class CuePoint {
public:
CuePoint(double cueTime, u_int64_t clusterOffsetInFile, unsigned blockNumWithinCluster/* 1-based */);
virtual ~CuePoint();
static void addCuePoint(CuePoint*& root, double cueTime, u_int64_t clusterOffsetInFile, unsigned blockNumWithinCluster/* 1-based */,
Boolean& needToReviseBalanceOfParent);
// If "cueTime" == "root.fCueTime", replace the existing data, otherwise add to the left or right subtree.
// (Note that this is a static member function because - as a result of tree rotation - "root" might change.)
Boolean lookup(double& cueTime, u_int64_t& resultClusterOffsetInFile, unsigned& resultBlockNumWithinCluster);
static void fprintf(FILE* fid, CuePoint* cuePoint); // used for debugging; it's static to allow for "cuePoint == NULL"
private:
// The "CuePoint" tree is implemented as an AVL Tree, to keep it balanced (for efficient lookup).
CuePoint* fSubTree[2]; // 0 => left; 1 => right
CuePoint* left() const { return fSubTree[0]; }
CuePoint* right() const { return fSubTree[1]; }
char fBalance; // height of right subtree - height of left subtree
static void rotate(unsigned direction/*0 => left; 1 => right*/, CuePoint*& root); // used to keep the tree in balance
double fCueTime;
u_int64_t fClusterOffsetInFile;
unsigned fBlockNumWithinCluster; // 0-based
};
UsageEnvironment& operator<<(UsageEnvironment& env, const CuePoint* cuePoint); // used for debugging
////////// MatroskaTrackTable definition /////////
// For looking up and iterating over the file's tracks:
class MatroskaTrackTable {
public:
MatroskaTrackTable();
virtual ~MatroskaTrackTable();
void add(MatroskaTrack* newTrack, unsigned trackNumber);
MatroskaTrack* lookup(unsigned trackNumber);
unsigned numTracks() const;
class Iterator {
public:
Iterator(MatroskaTrackTable& ourTable);
virtual ~Iterator();
MatroskaTrack* next();
private:
HashTable::Iterator* fIter;
};
private:
friend class Iterator;
HashTable* fTable;
};
////////// MatroskaFile implementation //////////
void MatroskaFile
::createNew(UsageEnvironment& env, char const* fileName, onCreationFunc* onCreation, void* onCreationClientData,
char const* preferredLanguage) {
new MatroskaFile(env, fileName, onCreation, onCreationClientData, preferredLanguage);
}
MatroskaFile::MatroskaFile(UsageEnvironment& env, char const* fileName, onCreationFunc* onCreation, void* onCreationClientData,
char const* preferredLanguage)
: Medium(env),
fFileName(strDup(fileName)), fOnCreation(onCreation), fOnCreationClientData(onCreationClientData),
fPreferredLanguage(strDup(preferredLanguage)),
fTimecodeScale(1000000), fSegmentDuration(0.0), fSegmentDataOffset(0), fClusterOffset(0), fCuesOffset(0), fCuePoints(NULL),
fChosenVideoTrackNumber(0), fChosenAudioTrackNumber(0), fChosenSubtitleTrackNumber(0) {
fTrackTable = new MatroskaTrackTable;
fDemuxesTable = HashTable::create(ONE_WORD_HASH_KEYS);
FramedSource* inputSource = ByteStreamFileSource::createNew(envir(), fileName);
if (inputSource == NULL) {
// The specified input file does not exist!
fParserForInitialization = NULL;
handleEndOfTrackHeaderParsing(); // we have no file, and thus no tracks, but we still need to signal this
} else {
// Initialize ourselves by parsing the file's 'Track' headers:
fParserForInitialization = new MatroskaFileParser(*this, inputSource, handleEndOfTrackHeaderParsing, this, NULL);
}
}
MatroskaFile::~MatroskaFile() {
delete fParserForInitialization;
delete fCuePoints;
// Delete any outstanding "MatroskaDemux"s, and the table for them:
MatroskaDemux* demux;
while ((demux = (MatroskaDemux*)fDemuxesTable->RemoveNext()) != NULL) {
delete demux;
}
delete fDemuxesTable;
delete fTrackTable;
delete[] (char*)fPreferredLanguage;
delete[] (char*)fFileName;
}
void MatroskaFile::handleEndOfTrackHeaderParsing(void* clientData) {
((MatroskaFile*)clientData)->handleEndOfTrackHeaderParsing();
}
class TrackChoiceRecord {
public:
unsigned trackNumber;
u_int8_t trackType;
unsigned choiceFlags;
};
void MatroskaFile::handleEndOfTrackHeaderParsing() {
// Having parsed all of our track headers, iterate through the tracks to figure out which ones should be played.
// The Matroska 'specification' is rather imprecise about this (as usual). However, we use the following algorithm:
// - Use one (but no more) enabled track of each type (video, audio, subtitle). (Ignore all tracks that are not 'enabled'.)
// - For each track type, choose the one that's 'forced'.
// - If more than one is 'forced', choose the first one that matches our preferred language, or the first if none matches.
// - If none is 'forced', choose the one that's 'default'.
// - If more than one is 'default', choose the first one that matches our preferred language, or the first if none matches.
// - If none is 'default', choose the first one that matches our preferred language, or the first if none matches.
unsigned numTracks = fTrackTable->numTracks();
if (numTracks > 0) {
TrackChoiceRecord* trackChoice = new TrackChoiceRecord[numTracks];
unsigned numEnabledTracks = 0;
MatroskaTrackTable::Iterator iter(*fTrackTable);
MatroskaTrack* track;
while ((track = iter.next()) != NULL) {
if (!track->isEnabled || track->trackType == 0 || track->codecID == NULL) continue; // track not enabled, or not fully-defined
trackChoice[numEnabledTracks].trackNumber = track->trackNumber;
trackChoice[numEnabledTracks].trackType = track->trackType;
// Assign flags for this track so that, when sorted, the largest value becomes our choice:
unsigned choiceFlags = 0;
if (fPreferredLanguage != NULL && track->language != NULL && strcmp(fPreferredLanguage, track->language) == 0) {
// This track matches our preferred language:
choiceFlags |= 1;
}
if (track->isForced) {
choiceFlags |= 4;
} else if (track->isDefault) {
choiceFlags |= 2;
}
trackChoice[numEnabledTracks].choiceFlags = choiceFlags;
++numEnabledTracks;
}
// Choose the desired track for each track type:
for (u_int8_t trackType = 0x01; trackType != MATROSKA_TRACK_TYPE_OTHER; trackType <<= 1) {
int bestNum = -1;
int bestChoiceFlags = -1;
for (unsigned i = 0; i < numEnabledTracks; ++i) {
if (trackChoice[i].trackType == trackType && (int)trackChoice[i].choiceFlags > bestChoiceFlags) {
bestNum = i;
bestChoiceFlags = (int)trackChoice[i].choiceFlags;
}
}
if (bestChoiceFlags >= 0) { // There is a track for this track type
if (trackType == MATROSKA_TRACK_TYPE_VIDEO) fChosenVideoTrackNumber = trackChoice[bestNum].trackNumber;
else if (trackType == MATROSKA_TRACK_TYPE_AUDIO) fChosenAudioTrackNumber = trackChoice[bestNum].trackNumber;
else fChosenSubtitleTrackNumber = trackChoice[bestNum].trackNumber;
}
}
delete[] trackChoice;
}
#ifdef DEBUG
if (fChosenVideoTrackNumber > 0) fprintf(stderr, "Chosen video track: #%d\n", fChosenVideoTrackNumber); else fprintf(stderr, "No chosen video track\n");
if (fChosenAudioTrackNumber > 0) fprintf(stderr, "Chosen audio track: #%d\n", fChosenAudioTrackNumber); else fprintf(stderr, "No chosen audio track\n");
if (fChosenSubtitleTrackNumber > 0) fprintf(stderr, "Chosen subtitle track: #%d\n", fChosenSubtitleTrackNumber); else fprintf(stderr, "No chosen subtitle track\n");
#endif
// Delete our parser, because it's done its job now:
delete fParserForInitialization; fParserForInitialization = NULL;
// Finally, signal our caller that we've been created and initialized:
if (fOnCreation != NULL) (*fOnCreation)(this, fOnCreationClientData);
}
MatroskaTrack* MatroskaFile::lookup(unsigned trackNumber) const {
return fTrackTable->lookup(trackNumber);
}
MatroskaDemux* MatroskaFile::newDemux() {
MatroskaDemux* demux = new MatroskaDemux(*this);
fDemuxesTable->Add((char const*)demux, demux);
return demux;
}
void MatroskaFile::removeDemux(MatroskaDemux* demux) {
fDemuxesTable->Remove((char const*)demux);
}
float MatroskaFile::fileDuration() {
if (fCuePoints == NULL) return 0.0; // Hack, because the RTSP server code assumes that duration > 0 => seekable. (fix this) #####
return segmentDuration()*(timecodeScale()/1000000000.0f);
}
void MatroskaFile::addTrack(MatroskaTrack* newTrack, unsigned trackNumber) {
fTrackTable->add(newTrack, trackNumber);
}
void MatroskaFile::addCuePoint(double cueTime, u_int64_t clusterOffsetInFile, unsigned blockNumWithinCluster) {
Boolean dummy = False; // not used
CuePoint::addCuePoint(fCuePoints, cueTime, clusterOffsetInFile, blockNumWithinCluster, dummy);
}
Boolean MatroskaFile::lookupCuePoint(double& cueTime, u_int64_t& resultClusterOffsetInFile, unsigned& resultBlockNumWithinCluster) {
if (fCuePoints == NULL) return False;
(void)fCuePoints->lookup(cueTime, resultClusterOffsetInFile, resultBlockNumWithinCluster);
return True;
}
void MatroskaFile::printCuePoints(FILE* fid) {
CuePoint::fprintf(fid, fCuePoints);
}
////////// MatroskaTrackTable implementation //////////
MatroskaTrackTable::MatroskaTrackTable()
: fTable(HashTable::create(ONE_WORD_HASH_KEYS)) {
}
MatroskaTrackTable::~MatroskaTrackTable() {
// Remove and delete all of our "MatroskaTrack" descriptors, and the hash table itself:
MatroskaTrack* track;
while ((track = (MatroskaTrack*)fTable->RemoveNext()) != NULL) {
delete track;
}
delete fTable;
}
void MatroskaTrackTable::add(MatroskaTrack* newTrack, unsigned trackNumber) {
if (newTrack != NULL && newTrack->trackNumber != 0) fTable->Remove((char const*)newTrack->trackNumber);
MatroskaTrack* existingTrack = (MatroskaTrack*)fTable->Add((char const*)trackNumber, newTrack);
delete existingTrack; // in case it wasn't NULL
}
MatroskaTrack* MatroskaTrackTable::lookup(unsigned trackNumber) {
return (MatroskaTrack*)fTable->Lookup((char const*)trackNumber);
}
unsigned MatroskaTrackTable::numTracks() const { return fTable->numEntries(); }
MatroskaTrackTable::Iterator::Iterator(MatroskaTrackTable& ourTable) {
fIter = HashTable::Iterator::create(*(ourTable.fTable));
}
MatroskaTrackTable::Iterator::~Iterator() {
delete fIter;
}
MatroskaTrack* MatroskaTrackTable::Iterator::next() {
char const* key;
return (MatroskaTrack*)fIter->next(key);
}
////////// MatroskaTrack implementation //////////
MatroskaTrack::MatroskaTrack()
: trackNumber(0/*not set*/), trackType(0/*unknown*/),
isEnabled(True), isDefault(True), isForced(False),
defaultDuration(0),
name(NULL), language(NULL), codecID(NULL),
samplingFrequency(0), numChannels(2), mimeType(""),
codecPrivateSize(0), codecPrivate(NULL), codecPrivateUsesH264FormatForH265(False),
headerStrippedBytesSize(0), headerStrippedBytes(NULL),
subframeSizeSize(0) {
}
MatroskaTrack::~MatroskaTrack() {
delete[] name; delete[] language; delete[] codecID;
delete[] codecPrivate;
delete[] headerStrippedBytes;
}
////////// MatroskaDemux implementation //////////
MatroskaDemux::MatroskaDemux(MatroskaFile& ourFile)
: Medium(ourFile.envir()),
fOurFile(ourFile), fDemuxedTracksTable(HashTable::create(ONE_WORD_HASH_KEYS)),
fNextTrackTypeToCheck(0x1) {
fOurParser = new MatroskaFileParser(ourFile, ByteStreamFileSource::createNew(envir(), ourFile.fileName()),
handleEndOfFile, this, this);
}
MatroskaDemux::~MatroskaDemux() {
// Begin by acting as if we've reached the end of the source file. This should cause all of our demuxed tracks to get closed.
handleEndOfFile();
// Then delete our table of "MatroskaDemuxedTrack"s
// - but not the "MatroskaDemuxedTrack"s themselves; that should have already happened:
delete fDemuxedTracksTable;
delete fOurParser;
fOurFile.removeDemux(this);
}
FramedSource* MatroskaDemux::newDemuxedTrack() {
unsigned dummyResultTrackNumber;
return newDemuxedTrack(dummyResultTrackNumber);
}
FramedSource* MatroskaDemux::newDemuxedTrack(unsigned& resultTrackNumber) {
FramedSource* result;
resultTrackNumber = 0;
for (result = NULL; result == NULL && fNextTrackTypeToCheck != MATROSKA_TRACK_TYPE_OTHER;
fNextTrackTypeToCheck <<= 1) {
if (fNextTrackTypeToCheck == MATROSKA_TRACK_TYPE_VIDEO) resultTrackNumber = fOurFile.chosenVideoTrackNumber();
else if (fNextTrackTypeToCheck == MATROSKA_TRACK_TYPE_AUDIO) resultTrackNumber = fOurFile.chosenAudioTrackNumber();
else if (fNextTrackTypeToCheck == MATROSKA_TRACK_TYPE_SUBTITLE) resultTrackNumber = fOurFile.chosenSubtitleTrackNumber();
result = newDemuxedTrackByTrackNumber(resultTrackNumber);
}
return result;
}
FramedSource* MatroskaDemux::newDemuxedTrackByTrackNumber(unsigned trackNumber) {
if (trackNumber == 0) return NULL;
FramedSource* track = new MatroskaDemuxedTrack(envir(), trackNumber, *this);
fDemuxedTracksTable->Add((char const*)trackNumber, track);
return track;
}
MatroskaDemuxedTrack* MatroskaDemux::lookupDemuxedTrack(unsigned trackNumber) {
return (MatroskaDemuxedTrack*)fDemuxedTracksTable->Lookup((char const*)trackNumber);
}
void MatroskaDemux::removeTrack(unsigned trackNumber) {
fDemuxedTracksTable->Remove((char const*)trackNumber);
if (fDemuxedTracksTable->numEntries() == 0) {
// We no longer have any demuxed tracks, so delete ourselves now:
delete this;
}
}
void MatroskaDemux::continueReading() {
fOurParser->continueParsing();
}
void MatroskaDemux::seekToTime(double& seekNPT) {
if (fOurParser != NULL) fOurParser->seekToTime(seekNPT);
}
void MatroskaDemux::handleEndOfFile(void* clientData) {
((MatroskaDemux*)clientData)->handleEndOfFile();
}
void MatroskaDemux::handleEndOfFile() {
// Iterate through all of our 'demuxed tracks', handling 'end of input' on each one.
// Hack: Because this can cause the hash table to get modified underneath us, we don't call the handlers until after we've
// first iterated through all of the tracks.
unsigned numTracks = fDemuxedTracksTable->numEntries();
if (numTracks == 0) return;
MatroskaDemuxedTrack** tracks = new MatroskaDemuxedTrack*[numTracks];
HashTable::Iterator* iter = HashTable::Iterator::create(*fDemuxedTracksTable);
unsigned i;
char const* trackNumber;
for (i = 0; i < numTracks; ++i) {
tracks[i] = (MatroskaDemuxedTrack*)iter->next(trackNumber);
}
delete iter;
for (i = 0; i < numTracks; ++i) {
if (tracks[i] == NULL) continue; // sanity check; shouldn't happen
FramedSource::handleClosure(tracks[i]);
}
delete[] tracks;
}
////////// CuePoint implementation //////////
CuePoint::CuePoint(double cueTime, u_int64_t clusterOffsetInFile, unsigned blockNumWithinCluster)
: fBalance(0),
fCueTime(cueTime), fClusterOffsetInFile(clusterOffsetInFile), fBlockNumWithinCluster(blockNumWithinCluster - 1) {
fSubTree[0] = fSubTree[1] = NULL;
}
CuePoint::~CuePoint() {
delete fSubTree[0]; delete fSubTree[1];
}
#ifndef ABS
#define ABS(x) (x)<0 ? -(x) : (x)
#endif
void CuePoint::addCuePoint(CuePoint*& root, double cueTime, u_int64_t clusterOffsetInFile, unsigned blockNumWithinCluster,
Boolean& needToReviseBalanceOfParent) {
needToReviseBalanceOfParent = False; // by default; may get changed below
if (root == NULL) {
root = new CuePoint(cueTime, clusterOffsetInFile, blockNumWithinCluster);
needToReviseBalanceOfParent = True;
} else if (cueTime == root->fCueTime) {
// Replace existing data:
root->fClusterOffsetInFile = clusterOffsetInFile;
root->fBlockNumWithinCluster = blockNumWithinCluster - 1;
} else {
// Add to our left or right subtree:
int direction = cueTime > root->fCueTime; // 0 (left) or 1 (right)
Boolean needToReviseOurBalance = False;
addCuePoint(root->fSubTree[direction], cueTime, clusterOffsetInFile, blockNumWithinCluster, needToReviseOurBalance);
if (needToReviseOurBalance) {
// We need to change our 'balance' number, perhaps while also performing a rotation to bring ourself back into balance:
if (root->fBalance == 0) {
// We were balanced before, but now we're unbalanced (by 1) on the "direction" side:
root->fBalance = -1 + 2*direction; // -1 for "direction" 0; 1 for "direction" 1
needToReviseBalanceOfParent = True;
} else if (root->fBalance == 1 - 2*direction) { // 1 for "direction" 0; -1 for "direction" 1
// We were unbalanced (by 1) on the side opposite to where we added an entry, so now we're balanced:
root->fBalance = 0;
} else {
// We were unbalanced (by 1) on the side where we added an entry, so now we're unbalanced by 2, and have to rebalance:
if (root->fSubTree[direction]->fBalance == -1 + 2*direction) { // -1 for "direction" 0; 1 for "direction" 1
// We're 'doubly-unbalanced' on this side, so perform a single rotation in the opposite direction:
root->fBalance = root->fSubTree[direction]->fBalance = 0;
rotate(1-direction, root);
} else {
// This is the Left-Right case (for "direction" 0) or the Right-Left case (for "direction" 1); perform two rotations:
char newParentCurBalance = root->fSubTree[direction]->fSubTree[1-direction]->fBalance;
if (newParentCurBalance == 1 - 2*direction) { // 1 for "direction" 0; -1 for "direction" 1
root->fBalance = 0;
root->fSubTree[direction]->fBalance = -1 + 2*direction; // -1 for "direction" 0; 1 for "direction" 1
} else if (newParentCurBalance == 0) {
root->fBalance = 0;
root->fSubTree[direction]->fBalance = 0;
} else {
root->fBalance = 1 - 2*direction; // 1 for "direction" 0; -1 for "direction" 1
root->fSubTree[direction]->fBalance = 0;
}
rotate(direction, root->fSubTree[direction]);
root->fSubTree[direction]->fBalance = 0; // the new root will be balanced
rotate(1-direction, root);
}
}
}
}
}
Boolean CuePoint::lookup(double& cueTime, u_int64_t& resultClusterOffsetInFile, unsigned& resultBlockNumWithinCluster) {
if (cueTime < fCueTime) {
if (left() == NULL) {
resultClusterOffsetInFile = 0;
resultBlockNumWithinCluster = 0;
return False;
} else {
return left()->lookup(cueTime, resultClusterOffsetInFile, resultBlockNumWithinCluster);
}
} else {
if (right() == NULL || !right()->lookup(cueTime, resultClusterOffsetInFile, resultBlockNumWithinCluster)) {
// Use this record:
cueTime = fCueTime;
resultClusterOffsetInFile = fClusterOffsetInFile;
resultBlockNumWithinCluster = fBlockNumWithinCluster;
}
return True;
}
}
void CuePoint::fprintf(FILE* fid, CuePoint* cuePoint) {
if (cuePoint != NULL) {
::fprintf(fid, "[");
fprintf(fid, cuePoint->left());
::fprintf(fid, ",%.1f{%d},", cuePoint->fCueTime, cuePoint->fBalance);
fprintf(fid, cuePoint->right());
::fprintf(fid, "]");
}
}
void CuePoint::rotate(unsigned direction/*0 => left; 1 => right*/, CuePoint*& root) {
CuePoint* pivot = root->fSubTree[1-direction]; // ASSERT: pivot != NULL
root->fSubTree[1-direction] = pivot->fSubTree[direction];
pivot->fSubTree[direction] = root;
root = pivot;
}
live/liveMedia/MP3StreamState.cpp 000444 001751 000000 00000032144 12265042432 017122 0 ustar 00rsf wheel 000000 000000 /**********
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the
Free Software Foundation; either version 2.1 of the License, or (at your
option) any later version. (See .)
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
more details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
**********/
// "liveMedia"
// Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved.
// A class encapsulating the state of a MP3 stream
// Implementation
#include "MP3StreamState.hh"
#include "InputFile.hh"
#include "GroupsockHelper.hh"
#if defined(__WIN32__) || defined(_WIN32)
#define snprintf _snprintf
#if _MSC_VER >= 1400 // 1400 == vs2005
#define fileno _fileno
#endif
#endif
#define MILLION 1000000
MP3StreamState::MP3StreamState(UsageEnvironment& env)
: fEnv(env), fFid(NULL), fPresentationTimeScale(1) {
}
MP3StreamState::~MP3StreamState() {
// Close our open file or socket:
if (fFid != NULL && fFid != stdin) {
if (fFidIsReallyASocket) {
intptr_t fid_long = (intptr_t)fFid;
closeSocket((int)fid_long);
} else {
CloseInputFile(fFid);
}
}
}
void MP3StreamState::assignStream(FILE* fid, unsigned fileSize) {
fFid = fid;
if (fileSize == (unsigned)(-1)) { /*HACK#####*/
fFidIsReallyASocket = 1;
fFileSize = 0;
} else {
fFidIsReallyASocket = 0;
fFileSize = fileSize;
}
fNumFramesInFile = 0; // until we know otherwise
fIsVBR = fHasXingTOC = False; // ditto
// Set the first frame's 'presentation time' to the current wall time:
gettimeofday(&fNextFramePresentationTime, NULL);
}
struct timeval MP3StreamState::currentFramePlayTime() const {
unsigned const numSamples = 1152;
unsigned const freq = fr().samplingFreq*(1 + fr().isMPEG2);
// result is numSamples/freq
unsigned const uSeconds
= ((numSamples*2*MILLION)/freq + 1)/2; // rounds to nearest integer
struct timeval result;
result.tv_sec = uSeconds/MILLION;
result.tv_usec = uSeconds%MILLION;
return result;
}
float MP3StreamState::filePlayTime() const {
unsigned numFramesInFile = fNumFramesInFile;
if (numFramesInFile == 0) {
// Estimate the number of frames from the file size, and the
// size of the current frame:
numFramesInFile = fFileSize/(4 + fCurrentFrame.frameSize);
}
struct timeval const pt = currentFramePlayTime();
return numFramesInFile*(pt.tv_sec + pt.tv_usec/(float)MILLION);
}
unsigned MP3StreamState::getByteNumberFromPositionFraction(float fraction) {
if (fHasXingTOC) {
// The file is VBR, with a Xing TOC; use it to determine which byte to seek to:
float percent = fraction*100.0f;
unsigned a = (unsigned)percent;
if (a > 99) a = 99;
unsigned fa = fXingTOC[a];
unsigned fb;
if (a < 99) {
fb = fXingTOC[a+1];
} else {
fb = 256;
}
fraction = (fa + (fb-fa)*(percent-a))/256.0f;
}
return (unsigned)(fraction*fFileSize);
}
void MP3StreamState::seekWithinFile(unsigned seekByteNumber) {
if (fFidIsReallyASocket) return; // it's not seekable
SeekFile64(fFid, seekByteNumber, SEEK_SET);
}
unsigned MP3StreamState::findNextHeader(struct timeval& presentationTime) {
presentationTime = fNextFramePresentationTime;
if (!findNextFrame()) return 0;
// From this frame, figure out the *next* frame's presentation time:
struct timeval framePlayTime = currentFramePlayTime();
if (fPresentationTimeScale > 1) {
// Scale this value
unsigned secondsRem = framePlayTime.tv_sec % fPresentationTimeScale;
framePlayTime.tv_sec -= secondsRem;
framePlayTime.tv_usec += secondsRem*MILLION;
framePlayTime.tv_sec /= fPresentationTimeScale;
framePlayTime.tv_usec /= fPresentationTimeScale;
}
fNextFramePresentationTime.tv_usec += framePlayTime.tv_usec;
fNextFramePresentationTime.tv_sec
+= framePlayTime.tv_sec + fNextFramePresentationTime.tv_usec/MILLION;
fNextFramePresentationTime.tv_usec %= MILLION;
return fr().hdr;
}
Boolean MP3StreamState::readFrame(unsigned char* outBuf, unsigned outBufSize,
unsigned& resultFrameSize,
unsigned& resultDurationInMicroseconds) {
/* We assume that "mp3FindNextHeader()" has already been called */
resultFrameSize = 4 + fr().frameSize;
if (outBufSize < resultFrameSize) {
#ifdef DEBUG_ERRORS
fprintf(stderr, "Insufficient buffer size for reading input frame (%d, need %d)\n",
outBufSize, resultFrameSize);
#endif
if (outBufSize < 4) outBufSize = 0;
resultFrameSize = outBufSize;
return False;
}
if (resultFrameSize >= 4) {
unsigned& hdr = fr().hdr;
*outBuf++ = (unsigned char)(hdr>>24);
*outBuf++ = (unsigned char)(hdr>>16);
*outBuf++ = (unsigned char)(hdr>>8);
*outBuf++ = (unsigned char)(hdr);
memmove(outBuf, fr().frameBytes, resultFrameSize-4);
}
struct timeval const pt = currentFramePlayTime();
resultDurationInMicroseconds = pt.tv_sec*MILLION + pt.tv_usec;
return True;
}
void MP3StreamState::getAttributes(char* buffer, unsigned bufferSize) const {
char const* formatStr
= "bandwidth %d MPEGnumber %d MPEGlayer %d samplingFrequency %d isStereo %d playTime %d isVBR %d";
unsigned fpt = (unsigned)(filePlayTime() + 0.5); // rounds to nearest integer
#if defined(IRIX) || defined(ALPHA) || defined(_QNX4) || defined(IMN_PIM) || defined(CRIS)
/* snprintf() isn't defined, so just use sprintf() - ugh! */
sprintf(buffer, formatStr,
fr().bitrate, fr().isMPEG2 ? 2 : 1, fr().layer, fr().samplingFreq, fr().isStereo,
fpt, fIsVBR);
#else
snprintf(buffer, bufferSize, formatStr,
fr().bitrate, fr().isMPEG2 ? 2 : 1, fr().layer, fr().samplingFreq, fr().isStereo,
fpt, fIsVBR);
#endif
}
// This is crufty old code that needs to be cleaned up #####
#define HDRCMPMASK 0xfffffd00
Boolean MP3StreamState::findNextFrame() {
unsigned char hbuf[8];
unsigned l; int i;
int attempt = 0;
read_again:
if (readFromStream(hbuf, 4) != 4) return False;
fr().hdr = ((unsigned long) hbuf[0] << 24)
| ((unsigned long) hbuf[1] << 16)
| ((unsigned long) hbuf[2] << 8)
| (unsigned long) hbuf[3];
#ifdef DEBUG_PARSE
fprintf(stderr, "fr().hdr: 0x%08x\n", fr().hdr);
#endif
if (fr().oldHdr != fr().hdr || !fr().oldHdr) {
i = 0;
init_resync:
#ifdef DEBUG_PARSE
fprintf(stderr, "init_resync: fr().hdr: 0x%08x\n", fr().hdr);
#endif
if ( (fr().hdr & 0xffe00000) != 0xffe00000
|| (fr().hdr & 0x00060000) == 0 // undefined 'layer' field
|| (fr().hdr & 0x0000F000) == 0 // 'free format' bitrate index
|| (fr().hdr & 0x0000F000) == 0x0000F000 // undefined bitrate index
|| (fr().hdr & 0x00000C00) == 0x00000C00 // undefined frequency index
|| (fr().hdr & 0x00000003) != 0x00000000 // 'emphasis' field unexpectedly set
) {
/* RSF: Do the following test even if we're not at the
start of the file, in case we have two or more
separate MP3 files cat'ed together:
*/
/* Check for RIFF hdr */
if (fr().hdr == ('R'<<24)+('I'<<16)+('F'<<8)+'F') {
unsigned char buf[70 /*was: 40*/];
#ifdef DEBUG_ERRORS
fprintf(stderr,"Skipped RIFF header\n");
#endif
readFromStream(buf, 66); /* already read 4 */
goto read_again;
}
/* Check for ID3 hdr */
if ((fr().hdr&0xFFFFFF00) == ('I'<<24)+('D'<<16)+('3'<<8)) {
unsigned tagSize, bytesToSkip;
unsigned char buf[1000];
readFromStream(buf, 6); /* already read 4 */
tagSize = ((buf[2]&0x7F)<<21) + ((buf[3]&0x7F)<<14) + ((buf[4]&0x7F)<<7) + (buf[5]&0x7F);
bytesToSkip = tagSize;
while (bytesToSkip > 0) {
unsigned bytesToRead = sizeof buf;
if (bytesToRead > bytesToSkip) {
bytesToRead = bytesToSkip;
}
readFromStream(buf, bytesToRead);
bytesToSkip -= bytesToRead;
}
#ifdef DEBUG_ERRORS
fprintf(stderr,"Skipped %d-byte ID3 header\n", tagSize);
#endif
goto read_again;
}
/* give up after 20,000 bytes */
if (i++ < 20000/*4096*//*1024*/) {
memmove (&hbuf[0], &hbuf[1], 3);
if (readFromStream(hbuf+3,1) != 1) {
return False;
}
fr().hdr <<= 8;
fr().hdr |= hbuf[3];
fr().hdr &= 0xffffffff;
#ifdef DEBUG_PARSE
fprintf(stderr, "calling init_resync %d\n", i);
#endif
goto init_resync;
}
#ifdef DEBUG_ERRORS
fprintf(stderr,"Giving up searching valid MPEG header\n");
#endif
return False;
#ifdef DEBUG_ERRORS
fprintf(stderr,"Illegal Audio-MPEG-Header 0x%08lx at offset 0x%lx.\n",
fr().hdr,tell_stream(str)-4);
#endif
/* Read more bytes until we find something that looks
reasonably like a valid header. This is not a
perfect strategy, but it should get us back on the
track within a short time (and hopefully without
too much distortion in the audio output). */
do {
attempt++;
memmove (&hbuf[0], &hbuf[1], 7);
if (readFromStream(&hbuf[3],1) != 1) {
return False;
}
/* This is faster than combining fr().hdr from scratch */
fr().hdr = ((fr().hdr << 8) | hbuf[3]) & 0xffffffff;
if (!fr().oldHdr)
goto init_resync; /* "considered harmful", eh? */
} while ((fr().hdr & HDRCMPMASK) != (fr().oldHdr & HDRCMPMASK)
&& (fr().hdr & HDRCMPMASK) != (fr().firstHdr & HDRCMPMASK));
#ifdef DEBUG_ERRORS
fprintf (stderr, "Skipped %d bytes in input.\n", attempt);
#endif
}
if (!fr().firstHdr) {
fr().firstHdr = fr().hdr;
}
fr().setParamsFromHeader();
fr().setBytePointer(fr().frameBytes, fr().frameSize);
fr().oldHdr = fr().hdr;
if (fr().isFreeFormat) {
#ifdef DEBUG_ERRORS
fprintf(stderr,"Free format not supported.\n");
#endif
return False;
}
#ifdef MP3_ONLY
if (fr().layer != 3) {
#ifdef DEBUG_ERRORS
fprintf(stderr, "MPEG layer %d is not supported!\n", fr().layer);
#endif
return False;
}
#endif
}
if ((l = readFromStream(fr().frameBytes, fr().frameSize))
!= fr().frameSize) {
if (l == 0) return False;
memset(fr().frameBytes+1, 0, fr().frameSize-1);
}
return True;
}
static Boolean socketIsReadable(int socket) {
const unsigned numFds = socket+1;
fd_set rd_set;
FD_ZERO(&rd_set);
FD_SET((unsigned)socket, &rd_set);
struct timeval timeout;
timeout.tv_sec = timeout.tv_usec = 0;
int result = select(numFds, &rd_set, NULL, NULL, &timeout);
return result != 0; // not > 0, because windows can return -1 for file sockets
}
static char watchVariable;
static void checkFunc(void* /*clientData*/) {
watchVariable = ~0;
}
static void waitUntilSocketIsReadable(UsageEnvironment& env, int socket) {
while (!socketIsReadable(socket)) {
// Delay a short period of time before checking again.
unsigned usecsToDelay = 1000; // 1 ms
env.taskScheduler().scheduleDelayedTask(usecsToDelay,
(TaskFunc*)checkFunc, (void*)NULL);
watchVariable = 0;
env.taskScheduler().doEventLoop(&watchVariable);
// This allows other tasks to run while we're waiting:
}
}
unsigned MP3StreamState::readFromStream(unsigned char* buf,
unsigned numChars) {
// Hack for doing socket I/O instead of file I/O (e.g., on Windows)
if (fFidIsReallyASocket) {
intptr_t fid_long = (intptr_t)fFid;
int sock = (int)fid_long;
unsigned totBytesRead = 0;
do {
waitUntilSocketIsReadable(fEnv, sock);
int bytesRead
= recv(sock, &((char*)buf)[totBytesRead], numChars-totBytesRead, 0);
if (bytesRead < 0) return 0;
totBytesRead += (unsigned)bytesRead;
} while (totBytesRead < numChars);
return totBytesRead;
} else {
#ifndef _WIN32_WCE
waitUntilSocketIsReadable(fEnv, (int)fileno(fFid));
#endif
return fread(buf, 1, numChars, fFid);
}
}
#define XING_FRAMES_FLAG 0x0001
#define XING_BYTES_FLAG 0x0002
#define XING_TOC_FLAG 0x0004
#define XING_VBR_SCALE_FLAG 0x0008
void MP3StreamState::checkForXingHeader() {
// Look for 'Xing' in the first 4 bytes after the 'side info':
if (fr().frameSize < fr().sideInfoSize) return;
unsigned bytesAvailable = fr().frameSize - fr().sideInfoSize;
unsigned char* p = &(fr().frameBytes[fr().sideInfoSize]);
if (bytesAvailable < 8) return;
if (p[0] != 'X' || p[1] != 'i' || p[2] != 'n' || p[3] != 'g') return;
// We found it.
fIsVBR = True;
u_int32_t flags = (p[4]<<24) | (p[5]<<16) | (p[6]<<8) | p[7];
unsigned i = 8;
bytesAvailable -= 8;
if (flags&XING_FRAMES_FLAG) {
// The next 4 bytes are the number of frames:
if (bytesAvailable < 4) return;
fNumFramesInFile = (p[i]<<24)|(p[i+1]<<16)|(p[i+2]<<8)|(p[i+3]);
i += 4; bytesAvailable -= 4;
}
if (flags&XING_BYTES_FLAG) {
// The next 4 bytes is the file size:
if (bytesAvailable < 4) return;
fFileSize = (p[i]<<24)|(p[i+1]<<16)|(p[i+2]<<8)|(p[i+3]);
i += 4; bytesAvailable -= 4;
}
if (flags&XING_TOC_FLAG) {
// Fill in the Xing 'table of contents':
if (bytesAvailable < XING_TOC_LENGTH) return;
fHasXingTOC = True;
for (unsigned j = 0; j < XING_TOC_LENGTH; ++j) {
fXingTOC[j] = p[i+j];
}
i += XING_TOC_FLAG; bytesAvailable -= XING_TOC_FLAG;
}
}
live/liveMedia/H264or5VideoStreamDiscreteFramer.cpp 000444 001751 000000 00000010657 12265042432 022407 0 ustar 00rsf wheel 000000 000000 /**********
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the
Free Software Foundation; either version 2.1 of the License, or (at your
option) any later version. (See .)
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
more details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
**********/
// "liveMedia"
// Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved.
// A simplified version of "H264or5VideoStreamFramer" that takes only complete,
// discrete frames (rather than an arbitrary byte stream) as input.
// This avoids the parsing and data copying overhead of the full
// "H264or5VideoStreamFramer".
// Implementation
#include "H264or5VideoStreamDiscreteFramer.hh"
H264or5VideoStreamDiscreteFramer
::H264or5VideoStreamDiscreteFramer(int hNumber, UsageEnvironment& env, FramedSource* inputSource)
: H264or5VideoStreamFramer(hNumber, env, inputSource, False/*don't create a parser*/, False) {
}
H264or5VideoStreamDiscreteFramer::~H264or5VideoStreamDiscreteFramer() {
}
void H264or5VideoStreamDiscreteFramer::doGetNextFrame() {
// Arrange to read data (which should be a complete H.264 or H.265 NAL unit)
// from our data source, directly into the client's input buffer.
// After reading this, we'll do some parsing on the frame.
fInputSource->getNextFrame(fTo, fMaxSize,
afterGettingFrame, this,
FramedSource::handleClosure, this);
}
void H264or5VideoStreamDiscreteFramer
::afterGettingFrame(void* clientData, unsigned frameSize,
unsigned numTruncatedBytes,
struct timeval presentationTime,
unsigned durationInMicroseconds) {
H264or5VideoStreamDiscreteFramer* source = (H264or5VideoStreamDiscreteFramer*)clientData;
source->afterGettingFrame1(frameSize, numTruncatedBytes, presentationTime, durationInMicroseconds);
}
void H264or5VideoStreamDiscreteFramer
::afterGettingFrame1(unsigned frameSize, unsigned numTruncatedBytes,
struct timeval presentationTime,
unsigned durationInMicroseconds) {
// Get the "nal_unit_type", to see if this NAL unit is one that we want to save a copy of:
u_int8_t nal_unit_type;
if (fHNumber == 264 && frameSize >= 1) {
nal_unit_type = fTo[0]&0x1F;
} else if (fHNumber == 265 && frameSize >= 2) {
nal_unit_type = (fTo[0]&0x7E)>>1;
} else {
// This is too short to be a valid NAL unit, so just assume a bogus nal_unit_type
nal_unit_type = 0xFF;
}
// Begin by checking for a (likely) common error: NAL units that (erroneously) begin with a
// 0x00000001 or 0x000001 'start code'. (Those start codes should only be in byte-stream data;
// *not* data that consists of discrete NAL units.)
// Once again, to be clear: The NAL units that you feed to a "H264or5VideoStreamDiscreteFramer"
// MUST NOT include start codes.
if (frameSize >= 4 && fTo[0] == 0 && fTo[1] == 0 && ((fTo[2] == 0 && fTo[3] == 1) || fTo[2] == 1)) {
envir() << "H264or5VideoStreamDiscreteFramer error: MPEG 'start code' seen in the input\n";
} else if (isVPS(nal_unit_type)) { // Video parameter set (VPS)
saveCopyOfVPS(fTo, frameSize);
} else if (isSPS(nal_unit_type)) { // Sequence parameter set (SPS)
saveCopyOfSPS(fTo, frameSize);
} else if (isPPS(nal_unit_type)) { // Picture parameter set (PPS)
saveCopyOfPPS(fTo, frameSize);
}
// Next, check whether this NAL unit ends the current 'access unit' (basically, a video frame).
// Unfortunately, we can't do this reliably, because we don't yet know anything about the
// *next* NAL unit that we'll see. So, we guess this as best as we can, by assuming that
// if this NAL unit is a VCL NAL unit, then it ends the current 'access unit'.
if (isVCL(nal_unit_type)) fPictureEndMarker = True;
// Finally, complete delivery to the client:
fFrameSize = frameSize;
fNumTruncatedBytes = numTruncatedBytes;
fPresentationTime = presentationTime;
fDurationInMicroseconds = durationInMicroseconds;
afterGetting(this);
}
live/liveMedia/H265VideoFileServerMediaSubsession.cpp 000444 001751 000000 00000011122 12265042432 022757 0 ustar 00rsf wheel 000000 000000 /**********
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the
Free Software Foundation; either version 2.1 of the License, or (at your
option) any later version. (See .)
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
more details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
**********/
// "liveMedia"
// Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved.
// A 'ServerMediaSubsession' object that creates new, unicast, "RTPSink"s
// on demand, from a H265 video file.
// Implementation
#include "H265VideoFileServerMediaSubsession.hh"
#include "H265VideoRTPSink.hh"
#include "ByteStreamFileSource.hh"
#include "H265VideoStreamFramer.hh"
H265VideoFileServerMediaSubsession*
H265VideoFileServerMediaSubsession::createNew(UsageEnvironment& env,
char const* fileName,
Boolean reuseFirstSource) {
return new H265VideoFileServerMediaSubsession(env, fileName, reuseFirstSource);
}
H265VideoFileServerMediaSubsession::H265VideoFileServerMediaSubsession(UsageEnvironment& env,
char const* fileName, Boolean reuseFirstSource)
: FileServerMediaSubsession(env, fileName, reuseFirstSource),
fAuxSDPLine(NULL), fDoneFlag(0), fDummyRTPSink(NULL) {
}
H265VideoFileServerMediaSubsession::~H265VideoFileServerMediaSubsession() {
delete[] fAuxSDPLine;
}
static void afterPlayingDummy(void* clientData) {
H265VideoFileServerMediaSubsession* subsess = (H265VideoFileServerMediaSubsession*)clientData;
subsess->afterPlayingDummy1();
}
void H265VideoFileServerMediaSubsession::afterPlayingDummy1() {
// Unschedule any pending 'checking' task:
envir().taskScheduler().unscheduleDelayedTask(nextTask());
// Signal the event loop that we're done:
setDoneFlag();
}
static void checkForAuxSDPLine(void* clientData) {
H265VideoFileServerMediaSubsession* subsess = (H265VideoFileServerMediaSubsession*)clientData;
subsess->checkForAuxSDPLine1();
}
void H265VideoFileServerMediaSubsession::checkForAuxSDPLine1() {
char const* dasl;
if (fAuxSDPLine != NULL) {
// Signal the event loop that we're done:
setDoneFlag();
} else if (fDummyRTPSink != NULL && (dasl = fDummyRTPSink->auxSDPLine()) != NULL) {
fAuxSDPLine = strDup(dasl);
fDummyRTPSink = NULL;
// Signal the event loop that we're done:
setDoneFlag();
} else {
// try again after a brief delay:
int uSecsToDelay = 100000; // 100 ms
nextTask() = envir().taskScheduler().scheduleDelayedTask(uSecsToDelay,
(TaskFunc*)checkForAuxSDPLine, this);
}
}
char const* H265VideoFileServerMediaSubsession::getAuxSDPLine(RTPSink* rtpSink, FramedSource* inputSource) {
if (fAuxSDPLine != NULL) return fAuxSDPLine; // it's already been set up (for a previous client)
if (fDummyRTPSink == NULL) { // we're not already setting it up for another, concurrent stream
// Note: For H265 video files, the 'config' information (used for several payload-format
// specific parameters in the SDP description) isn't known until we start reading the file.
// This means that "rtpSink"s "auxSDPLine()" will be NULL initially,
// and we need to start reading data from our file until this changes.
fDummyRTPSink = rtpSink;
// Start reading the file:
fDummyRTPSink->startPlaying(*inputSource, afterPlayingDummy, this);
// Check whether the sink's 'auxSDPLine()' is ready:
checkForAuxSDPLine(this);
}
envir().taskScheduler().doEventLoop(&fDoneFlag);
return fAuxSDPLine;
}
FramedSource* H265VideoFileServerMediaSubsession::createNewStreamSource(unsigned /*clientSessionId*/, unsigned& estBitrate) {
estBitrate = 500; // kbps, estimate
// Create the video source:
ByteStreamFileSource* fileSource = ByteStreamFileSource::createNew(envir(), fFileName);
if (fileSource == NULL) return NULL;
fFileSize = fileSource->fileSize();
// Create a framer for the Video Elementary Stream:
return H265VideoStreamFramer::createNew(envir(), fileSource);
}
RTPSink* H265VideoFileServerMediaSubsession
::createNewRTPSink(Groupsock* rtpGroupsock,
unsigned char rtpPayloadTypeIfDynamic,
FramedSource* /*inputSource*/) {
return H265VideoRTPSink::createNew(envir(), rtpGroupsock, rtpPayloadTypeIfDynamic);
}
live/liveMedia/DeviceSource.cpp 000444 001751 000000 00000015701 12265042432 016726 0 ustar 00rsf wheel 000000 000000 /**********
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the
Free Software Foundation; either version 2.1 of the License, or (at your
option) any later version. (See .)
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
more details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
**********/
// "liveMedia"
// Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved.
// A template for a MediaSource encapsulating an audio/video input device
//
// NOTE: Sections of this code labeled "%%% TO BE WRITTEN %%%" are incomplete, and need to be written by the programmer
// (depending on the features of the particular device).
// Implementation
#include "DeviceSource.hh"
#include // for "gettimeofday()"
DeviceSource*
DeviceSource::createNew(UsageEnvironment& env,
DeviceParameters params) {
return new DeviceSource(env, params);
}
EventTriggerId DeviceSource::eventTriggerId = 0;
unsigned DeviceSource::referenceCount = 0;
DeviceSource::DeviceSource(UsageEnvironment& env,
DeviceParameters params)
: FramedSource(env), fParams(params) {
if (referenceCount == 0) {
// Any global initialization of the device would be done here:
//%%% TO BE WRITTEN %%%
}
++referenceCount;
// Any instance-specific initialization of the device would be done here:
//%%% TO BE WRITTEN %%%
// We arrange here for our "deliverFrame" member function to be called
// whenever the next frame of data becomes available from the device.
//
// If the device can be accessed as a readable socket, then one easy way to do this is using a call to
// envir().taskScheduler().turnOnBackgroundReadHandling( ... )
// (See examples of this call in the "liveMedia" directory.)
//
// If, however, the device *cannot* be accessed as a readable socket, then instead we can implement it using 'event triggers':
// Create an 'event trigger' for this device (if it hasn't already been done):
if (eventTriggerId == 0) {
eventTriggerId = envir().taskScheduler().createEventTrigger(deliverFrame0);
}
}
DeviceSource::~DeviceSource() {
// Any instance-specific 'destruction' (i.e., resetting) of the device would be done here:
//%%% TO BE WRITTEN %%%
--referenceCount;
if (referenceCount == 0) {
// Any global 'destruction' (i.e., resetting) of the device would be done here:
//%%% TO BE WRITTEN %%%
// Reclaim our 'event trigger'
envir().taskScheduler().deleteEventTrigger(eventTriggerId);
eventTriggerId = 0;
}
}
void DeviceSource::doGetNextFrame() {
// This function is called (by our 'downstream' object) when it asks for new data.
// Note: If, for some reason, the source device stops being readable (e.g., it gets closed), then you do the following:
if (0 /* the source stops being readable */ /*%%% TO BE WRITTEN %%%*/) {
handleClosure(this);
return;
}
// If a new frame of data is immediately available to be delivered, then do this now:
if (0 /* a new frame of data is immediately available to be delivered*/ /*%%% TO BE WRITTEN %%%*/) {
deliverFrame();
}
// No new data is immediately available to be delivered. We don't do anything more here.
// Instead, our event trigger must be called (e.g., from a separate thread) when new data becomes available.
}
void DeviceSource::deliverFrame0(void* clientData) {
((DeviceSource*)clientData)->deliverFrame();
}
void DeviceSource::deliverFrame() {
// This function is called when new frame data is available from the device.
// We deliver this data by copying it to the 'downstream' object, using the following parameters (class members):
// 'in' parameters (these should *not* be modified by this function):
// fTo: The frame data is copied to this address.
// (Note that the variable "fTo" is *not* modified. Instead,
// the frame data is copied to the address pointed to by "fTo".)
// fMaxSize: This is the maximum number of bytes that can be copied
// (If the actual frame is larger than this, then it should
// be truncated, and "fNumTruncatedBytes" set accordingly.)
// 'out' parameters (these are modified by this function):
// fFrameSize: Should be set to the delivered frame size (<= fMaxSize).
// fNumTruncatedBytes: Should be set iff the delivered frame would have been
// bigger than "fMaxSize", in which case it's set to the number of bytes
// that have been omitted.
// fPresentationTime: Should be set to the frame's presentation time
// (seconds, microseconds). This time must be aligned with 'wall-clock time' - i.e., the time that you would get
// by calling "gettimeofday()".
// fDurationInMicroseconds: Should be set to the frame's duration, if known.
// If, however, the device is a 'live source' (e.g., encoded from a camera or microphone), then we probably don't need
// to set this variable, because - in this case - data will never arrive 'early'.
// Note the code below.
if (!isCurrentlyAwaitingData()) return; // we're not ready for the data yet
u_int8_t* newFrameDataStart = (u_int8_t*)0xDEADBEEF; //%%% TO BE WRITTEN %%%
unsigned newFrameSize = 0; //%%% TO BE WRITTEN %%%
// Deliver the data here:
if (newFrameSize > fMaxSize) {
fFrameSize = fMaxSize;
fNumTruncatedBytes = newFrameSize - fMaxSize;
} else {
fFrameSize = newFrameSize;
}
gettimeofday(&fPresentationTime, NULL); // If you have a more accurate time - e.g., from an encoder - then use that instead.
// If the device is *not* a 'live source' (e.g., it comes instead from a file or buffer), then set "fDurationInMicroseconds" here.
memmove(fTo, newFrameDataStart, fFrameSize);
// After delivering the data, inform the reader that it is now available:
FramedSource::afterGetting(this);
}
// The following code would be called to signal that a new frame of data has become available.
// This (unlike other "LIVE555 Streaming Media" library code) may be called from a separate thread.
// (Note, however, that "triggerEvent()" cannot be called with the same 'event trigger id' from different threads.
// Also, if you want to have multiple device threads, each one using a different 'event trigger id', then you will need
// to make "eventTriggerId" a non-static member variable of "DeviceSource".)
void signalNewFrameData() {
TaskScheduler* ourScheduler = NULL; //%%% TO BE WRITTEN %%%
DeviceSource* ourDevice = NULL; //%%% TO BE WRITTEN %%%
if (ourScheduler != NULL) { // sanity check
ourScheduler->triggerEvent(DeviceSource::eventTriggerId, ourDevice);
}
}
live/liveMedia/MediaSource.cpp 000444 001751 000000 00000005052 12265042432 016544 0 ustar 00rsf wheel 000000 000000 /**********
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the
Free Software Foundation; either version 2.1 of the License, or (at your
option) any later version. (See .)
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
more details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
**********/
// "liveMedia"
// Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved.
// Media Sources
// Implementation
#include "MediaSource.hh"
////////// MediaSource //////////
MediaSource::MediaSource(UsageEnvironment& env)
: Medium(env) {
}
MediaSource::~MediaSource() {
}
Boolean MediaSource::isSource() const {
return True;
}
char const* MediaSource::MIMEtype() const {
return "application/OCTET-STREAM"; // default type
}
Boolean MediaSource::isFramedSource() const {
return False; // default implementation
}
Boolean MediaSource::isRTPSource() const {
return False; // default implementation
}
Boolean MediaSource::isMPEG1or2VideoStreamFramer() const {
return False; // default implementation
}
Boolean MediaSource::isMPEG4VideoStreamFramer() const {
return False; // default implementation
}
Boolean MediaSource::isH264VideoStreamFramer() const {
return False; // default implementation
}
Boolean MediaSource::isH265VideoStreamFramer() const {
return False; // default implementation
}
Boolean MediaSource::isDVVideoStreamFramer() const {
return False; // default implementation
}
Boolean MediaSource::isJPEGVideoSource() const {
return False; // default implementation
}
Boolean MediaSource::isAMRAudioSource() const {
return False; // default implementation
}
Boolean MediaSource::lookupByName(UsageEnvironment& env,
char const* sourceName,
MediaSource*& resultSource) {
resultSource = NULL; // unless we succeed
Medium* medium;
if (!Medium::lookupByName(env, sourceName, medium)) return False;
if (!medium->isSource()) {
env.setResultMsg(sourceName, " is not a media source");
return False;
}
resultSource = (MediaSource*)medium;
return True;
}
void MediaSource::getAttributes() const {
// Default implementation
envir().setResultMsg("");
}
live/liveMedia/COPYING 000755 001751 000000 00000000000 12265042432 016132 2../COPYING ustar 00rsf wheel 000000 000000 live/liveMedia/MP3FileSource.cpp 000444 001751 000000 00000012727 12265042432 016733 0 ustar 00rsf wheel 000000 000000 /**********
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the
Free Software Foundation; either version 2.1 of the License, or (at your
option) any later version. (See .)
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
more details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
**********/
// "liveMedia"
// Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved.
// MP3 File Sources
// Implementation
#include "MP3FileSource.hh"
#include "MP3StreamState.hh"
#include "InputFile.hh"
////////// MP3FileSource //////////
MP3FileSource::MP3FileSource(UsageEnvironment& env, FILE* fid)
: FramedFileSource(env, fid),
fStreamState(new MP3StreamState(env)) {
}
MP3FileSource::~MP3FileSource() {
delete fStreamState;
}
char const* MP3FileSource::MIMEtype() const {
return "audio/MPEG";
}
MP3FileSource* MP3FileSource::createNew(UsageEnvironment& env, char const* fileName) {
MP3FileSource* newSource = NULL;
do {
FILE* fid;
fid = OpenInputFile(env, fileName);
if (fid == NULL) break;
newSource = new MP3FileSource(env, fid);
if (newSource == NULL) break;
unsigned fileSize = (unsigned)GetFileSize(fileName, fid);
newSource->assignStream(fid, fileSize);
if (!newSource->initializeStream()) break;
return newSource;
} while (0);
Medium::close(newSource);
return NULL;
}
float MP3FileSource::filePlayTime() const {
return fStreamState->filePlayTime();
}
unsigned MP3FileSource::fileSize() const {
return fStreamState->fileSize();
}
void MP3FileSource::setPresentationTimeScale(unsigned scale) {
fStreamState->setPresentationTimeScale(scale);
}
void MP3FileSource::seekWithinFile(double seekNPT, double streamDuration) {
float fileDuration = filePlayTime();
// First, make sure that 0.0 <= seekNPT <= seekNPT + streamDuration <= fileDuration
if (seekNPT < 0.0) {
seekNPT = 0.0;
} else if (seekNPT > fileDuration) {
seekNPT = fileDuration;
}
if (streamDuration < 0.0) {
streamDuration = 0.0;
} else if (seekNPT + streamDuration > fileDuration) {
streamDuration = fileDuration - seekNPT;
}
float seekFraction = (float)seekNPT/fileDuration;
unsigned seekByteNumber = fStreamState->getByteNumberFromPositionFraction(seekFraction);
fStreamState->seekWithinFile(seekByteNumber);
fLimitNumBytesToStream = False; // by default
if (streamDuration > 0.0) {
float endFraction = (float)(seekNPT + streamDuration)/fileDuration;
unsigned endByteNumber = fStreamState->getByteNumberFromPositionFraction(endFraction);
if (endByteNumber > seekByteNumber) { // sanity check
fNumBytesToStream = endByteNumber - seekByteNumber;
fLimitNumBytesToStream = True;
}
} else {
}
}
void MP3FileSource::getAttributes() const {
char buffer[200];
fStreamState->getAttributes(buffer, sizeof buffer);
envir().setResultMsg(buffer);
}
void MP3FileSource::doGetNextFrame() {
if (!doGetNextFrame1()) {
handleClosure(this);
return;
}
// Switch to another task:
#if defined(__WIN32__) || defined(_WIN32)
// HACK: liveCaster/lc uses an implementation of scheduleDelayedTask()
// that performs very badly (chewing up lots of CPU time, apparently polling)
// on Windows. Until this is fixed, we just call our "afterGetting()"
// function directly. This avoids infinite recursion, as long as our sink
// is discontinuous, which is the case for the RTP sink that liveCaster/lc
// uses. #####
afterGetting(this);
#else
nextTask() = envir().taskScheduler().scheduleDelayedTask(0,
(TaskFunc*)afterGetting, this);
#endif
}
Boolean MP3FileSource::doGetNextFrame1() {
if (fLimitNumBytesToStream && fNumBytesToStream == 0) return False; // we've already streamed as much as we were asked for
if (!fHaveJustInitialized) {
if (fStreamState->findNextHeader(fPresentationTime) == 0) return False;
} else {
fPresentationTime = fFirstFramePresentationTime;
fHaveJustInitialized = False;
}
if (!fStreamState->readFrame(fTo, fMaxSize, fFrameSize, fDurationInMicroseconds)) {
char tmp[200];
sprintf(tmp,
"Insufficient buffer size %d for reading MPEG audio frame (needed %d)\n",
fMaxSize, fFrameSize);
envir().setResultMsg(tmp);
fFrameSize = fMaxSize;
return False;
}
if (fNumBytesToStream > fFrameSize) fNumBytesToStream -= fFrameSize; else fNumBytesToStream = 0;
return True;
}
void MP3FileSource::assignStream(FILE* fid, unsigned fileSize) {
fStreamState->assignStream(fid, fileSize);
}
Boolean MP3FileSource::initializeStream() {
// Make sure the file has an appropriate header near the start:
if (fStreamState->findNextHeader(fFirstFramePresentationTime) == 0) {
envir().setResultMsg("not an MPEG audio file");
return False;
}
fStreamState->checkForXingHeader(); // in case this is a VBR file
fHaveJustInitialized = True;
fLimitNumBytesToStream = False;
fNumBytesToStream = 0;
// Hack: It's possible that our environment's 'result message' has been
// reset within this function, so set it again to our name now:
envir().setResultMsg(name());
return True;
}
live/liveMedia/FileSink.cpp 000444 001751 000000 00000012417 12265042432 016053 0 ustar 00rsf wheel 000000 000000 /**********
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the
Free Software Foundation; either version 2.1 of the License, or (at your
option) any later version. (See .)
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
more details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
**********/
// "liveMedia"
// Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved.
// File sinks
// Implementation
#if (defined(__WIN32__) || defined(_WIN32)) && !defined(_WIN32_WCE)
#include
#include
#endif
#include "FileSink.hh"
#include "GroupsockHelper.hh"
#include "OutputFile.hh"
////////// FileSink //////////
FileSink::FileSink(UsageEnvironment& env, FILE* fid, unsigned bufferSize,
char const* perFrameFileNamePrefix)
: MediaSink(env), fOutFid(fid), fBufferSize(bufferSize), fSamePresentationTimeCounter(0) {
fBuffer = new unsigned char[bufferSize];
if (perFrameFileNamePrefix != NULL) {
fPerFrameFileNamePrefix = strDup(perFrameFileNamePrefix);
fPerFrameFileNameBuffer = new char[strlen(perFrameFileNamePrefix) + 100];
} else {
fPerFrameFileNamePrefix = NULL;
fPerFrameFileNameBuffer = NULL;
}
fPrevPresentationTime.tv_sec = ~0; fPrevPresentationTime.tv_usec = 0;
}
FileSink::~FileSink() {
delete[] fPerFrameFileNameBuffer;
delete[] fPerFrameFileNamePrefix;
delete[] fBuffer;
if (fOutFid != NULL) fclose(fOutFid);
}
FileSink* FileSink::createNew(UsageEnvironment& env, char const* fileName,
unsigned bufferSize, Boolean oneFilePerFrame) {
do {
FILE* fid;
char const* perFrameFileNamePrefix;
if (oneFilePerFrame) {
// Create the fid for each frame
fid = NULL;
perFrameFileNamePrefix = fileName;
} else {
// Normal case: create the fid once
fid = OpenOutputFile(env, fileName);
if (fid == NULL) break;
perFrameFileNamePrefix = NULL;
}
return new FileSink(env, fid, bufferSize, perFrameFileNamePrefix);
} while (0);
return NULL;
}
Boolean FileSink::continuePlaying() {
if (fSource == NULL) return False;
fSource->getNextFrame(fBuffer, fBufferSize,
afterGettingFrame, this,
onSourceClosure, this);
return True;
}
void FileSink::afterGettingFrame(void* clientData, unsigned frameSize,
unsigned numTruncatedBytes,
struct timeval presentationTime,
unsigned /*durationInMicroseconds*/) {
FileSink* sink = (FileSink*)clientData;
sink->afterGettingFrame(frameSize, numTruncatedBytes, presentationTime);
}
void FileSink::addData(unsigned char const* data, unsigned dataSize,
struct timeval presentationTime) {
if (fPerFrameFileNameBuffer != NULL && fOutFid == NULL) {
// Special case: Open a new file on-the-fly for this frame
if (presentationTime.tv_usec == fPrevPresentationTime.tv_usec &&
presentationTime.tv_sec == fPrevPresentationTime.tv_sec) {
// The presentation time is unchanged from the previous frame, so we add a 'counter'
// suffix to the file name, to distinguish them:
sprintf(fPerFrameFileNameBuffer, "%s-%lu.%06lu-%u", fPerFrameFileNamePrefix,
presentationTime.tv_sec, presentationTime.tv_usec, ++fSamePresentationTimeCounter);
} else {
sprintf(fPerFrameFileNameBuffer, "%s-%lu.%06lu", fPerFrameFileNamePrefix,
presentationTime.tv_sec, presentationTime.tv_usec);
fPrevPresentationTime = presentationTime; // for next time
fSamePresentationTimeCounter = 0; // for next time
}
fOutFid = OpenOutputFile(envir(), fPerFrameFileNameBuffer);
}
// Write to our file:
#ifdef TEST_LOSS
static unsigned const framesPerPacket = 10;
static unsigned const frameCount = 0;
static Boolean const packetIsLost;
if ((frameCount++)%framesPerPacket == 0) {
packetIsLost = (our_random()%10 == 0); // simulate 10% packet loss #####
}
if (!packetIsLost)
#endif
if (fOutFid != NULL && data != NULL) {
fwrite(data, 1, dataSize, fOutFid);
}
}
void FileSink::afterGettingFrame(unsigned frameSize,
unsigned numTruncatedBytes,
struct timeval presentationTime) {
if (numTruncatedBytes > 0) {
envir() << "FileSink::afterGettingFrame(): The input frame data was too large for our buffer size ("
<< fBufferSize << "). "
<< numTruncatedBytes << " bytes of trailing data was dropped! Correct this by increasing the \"bufferSize\" parameter in the \"createNew()\" call to at least "
<< fBufferSize + numTruncatedBytes << "\n";
}
addData(fBuffer, frameSize, presentationTime);
if (fOutFid == NULL || fflush(fOutFid) == EOF) {
// The output file has closed. Handle this the same way as if the input source had closed:
if (fSource != NULL) fSource->stopGettingFrames();
onSourceClosure();
return;
}
if (fPerFrameFileNameBuffer != NULL) {
if (fOutFid != NULL) { fclose(fOutFid); fOutFid = NULL; }
}
// Then try getting the next frame:
continuePlaying();
}
live/liveMedia/RTPInterface.cpp 000444 001751 000000 00000054740 12265042432 016642 0 ustar 00rsf wheel 000000 000000 /**********
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the
Free Software Foundation; either version 2.1 of the License, or (at your
option) any later version. (See .)
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
more details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
**********/
// "liveMedia"
// Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved.
// An abstraction of a network interface used for RTP (or RTCP).
// (This allows the RTP-over-TCP hack (RFC 2326, section 10.12) to
// be implemented transparently.)
// Implementation
#include "RTPInterface.hh"
#include
#include
////////// Helper Functions - Definition //////////
// Helper routines and data structures, used to implement
// sending/receiving RTP/RTCP over a TCP socket:
// Reading RTP-over-TCP is implemented using two levels of hash tables.
// The top-level hash table maps TCP socket numbers to a
// "SocketDescriptor" that contains a hash table for each of the
// sub-channels that are reading from this socket.
static HashTable* socketHashTable(UsageEnvironment& env, Boolean createIfNotPresent = True) {
_Tables* ourTables = _Tables::getOurTables(env, createIfNotPresent);
if (ourTables == NULL) return NULL;
if (ourTables->socketTable == NULL) {
// Create a new socket number -> SocketDescriptor mapping table:
ourTables->socketTable = HashTable::create(ONE_WORD_HASH_KEYS);
}
return (HashTable*)(ourTables->socketTable);
}
class SocketDescriptor {
public:
SocketDescriptor(UsageEnvironment& env, int socketNum);
virtual ~SocketDescriptor();
void registerRTPInterface(unsigned char streamChannelId,
RTPInterface* rtpInterface);
RTPInterface* lookupRTPInterface(unsigned char streamChannelId);
void deregisterRTPInterface(unsigned char streamChannelId);
void setServerRequestAlternativeByteHandler(ServerRequestAlternativeByteHandler* handler, void* clientData) {
fServerRequestAlternativeByteHandler = handler;
fServerRequestAlternativeByteHandlerClientData = clientData;
}
private:
static void tcpReadHandler(SocketDescriptor*, int mask);
Boolean tcpReadHandler1(int mask);
private:
UsageEnvironment& fEnv;
int fOurSocketNum;
HashTable* fSubChannelHashTable;
ServerRequestAlternativeByteHandler* fServerRequestAlternativeByteHandler;
void* fServerRequestAlternativeByteHandlerClientData;
u_int8_t fStreamChannelId, fSizeByte1;
Boolean fReadErrorOccurred, fDeleteMyselfNext, fAreInReadHandlerLoop;
enum { AWAITING_DOLLAR, AWAITING_STREAM_CHANNEL_ID, AWAITING_SIZE1, AWAITING_SIZE2, AWAITING_PACKET_DATA } fTCPReadingState;
};
static SocketDescriptor* lookupSocketDescriptor(UsageEnvironment& env, int sockNum, Boolean createIfNotFound = True) {
HashTable* table = socketHashTable(env, createIfNotFound);
if (table == NULL) return NULL;
char const* key = (char const*)(long)sockNum;
SocketDescriptor* socketDescriptor = (SocketDescriptor*)(table->Lookup(key));
if (socketDescriptor == NULL) {
if (createIfNotFound) {
socketDescriptor = new SocketDescriptor(env, sockNum);
table->Add((char const*)(long)(sockNum), socketDescriptor);
} else if (table->IsEmpty()) {
// We can also delete the table (to reclaim space):
_Tables* ourTables = _Tables::getOurTables(env);
delete table;
ourTables->socketTable = NULL;
ourTables->reclaimIfPossible();
}
}
return socketDescriptor;
}
static void removeSocketDescription(UsageEnvironment& env, int sockNum) {
char const* key = (char const*)(long)sockNum;
HashTable* table = socketHashTable(env);
table->Remove(key);
if (table->IsEmpty()) {
// We can also delete the table (to reclaim space):
_Tables* ourTables = _Tables::getOurTables(env);
delete table;
ourTables->socketTable = NULL;
ourTables->reclaimIfPossible();
}
}
////////// RTPInterface - Implementation //////////
RTPInterface::RTPInterface(Medium* owner, Groupsock* gs)
: fOwner(owner), fGS(gs),
fTCPStreams(NULL),
fNextTCPReadSize(0), fNextTCPReadStreamSocketNum(-1),
fNextTCPReadStreamChannelId(0xFF), fReadHandlerProc(NULL),
fAuxReadHandlerFunc(NULL), fAuxReadHandlerClientData(NULL) {
// Make the socket non-blocking, even though it will be read from only asynchronously, when packets arrive.
// The reason for this is that, in some OSs, reads on a blocking socket can (allegedly) sometimes block,
// even if the socket was previously reported (e.g., by "select()") as having data available.
// (This can supposedly happen if the UDP checksum fails, for example.)
makeSocketNonBlocking(fGS->socketNum());
increaseSendBufferTo(envir(), fGS->socketNum(), 50*1024);
}
RTPInterface::~RTPInterface() {
stopNetworkReading();
delete fTCPStreams;
}
void RTPInterface::setStreamSocket(int sockNum,
unsigned char streamChannelId) {
fGS->removeAllDestinations();
envir().taskScheduler().disableBackgroundHandling(fGS->socketNum()); // turn off any reading on our datagram socket
fGS->reset(); // and close our datagram socket, because we won't be using it anymore
addStreamSocket(sockNum, streamChannelId);
}
void RTPInterface::addStreamSocket(int sockNum,
unsigned char streamChannelId) {
if (sockNum < 0) return;
for (tcpStreamRecord* streams = fTCPStreams; streams != NULL;
streams = streams->fNext) {
if (streams->fStreamSocketNum == sockNum
&& streams->fStreamChannelId == streamChannelId) {
return; // we already have it
}
}
fTCPStreams = new tcpStreamRecord(sockNum, streamChannelId, fTCPStreams);
// Also, make sure this new socket is set up for receiving RTP/RTCP-over-TCP:
SocketDescriptor* socketDescriptor = lookupSocketDescriptor(envir(), sockNum);
socketDescriptor->registerRTPInterface(streamChannelId, this);
}
static void deregisterSocket(UsageEnvironment& env, int sockNum, unsigned char streamChannelId) {
SocketDescriptor* socketDescriptor = lookupSocketDescriptor(env, sockNum, False);
if (socketDescriptor != NULL) {
socketDescriptor->deregisterRTPInterface(streamChannelId);
// Note: This may delete "socketDescriptor",
// if no more interfaces are using this socket
}
}
void RTPInterface::removeStreamSocket(int sockNum,
unsigned char streamChannelId) {
for (tcpStreamRecord** streamsPtr = &fTCPStreams; *streamsPtr != NULL;
streamsPtr = &((*streamsPtr)->fNext)) {
if ((*streamsPtr)->fStreamSocketNum == sockNum
&& (*streamsPtr)->fStreamChannelId == streamChannelId) {
deregisterSocket(envir(), sockNum, streamChannelId);
// Then remove the record pointed to by *streamsPtr :
tcpStreamRecord* next = (*streamsPtr)->fNext;
(*streamsPtr)->fNext = NULL;
delete (*streamsPtr);
*streamsPtr = next;
return;
}
}
}
void RTPInterface::setServerRequestAlternativeByteHandler(UsageEnvironment& env, int socketNum,
ServerRequestAlternativeByteHandler* handler, void* clientData) {
SocketDescriptor* socketDescriptor = lookupSocketDescriptor(env, socketNum, False);
if (socketDescriptor != NULL) socketDescriptor->setServerRequestAlternativeByteHandler(handler, clientData);
}
void RTPInterface::clearServerRequestAlternativeByteHandler(UsageEnvironment& env, int socketNum) {
setServerRequestAlternativeByteHandler(env, socketNum, NULL, NULL);
}
Boolean RTPInterface::sendPacket(unsigned char* packet, unsigned packetSize) {
Boolean success = True; // we'll return False instead if any of the sends fail
// Normal case: Send as a UDP packet:
if (!fGS->output(envir(), fGS->ttl(), packet, packetSize)) success = False;
// Also, send over each of our TCP sockets:
for (tcpStreamRecord* streams = fTCPStreams; streams != NULL;
streams = streams->fNext) {
if (!sendRTPorRTCPPacketOverTCP(packet, packetSize,
streams->fStreamSocketNum, streams->fStreamChannelId)) {
success = False;
}
}
return success;
}
void RTPInterface
::startNetworkReading(TaskScheduler::BackgroundHandlerProc* handlerProc) {
// Normal case: Arrange to read UDP packets:
envir().taskScheduler().
turnOnBackgroundReadHandling(fGS->socketNum(), handlerProc, fOwner);
// Also, receive RTP over TCP, on each of our TCP connections:
fReadHandlerProc = handlerProc;
for (tcpStreamRecord* streams = fTCPStreams; streams != NULL;
streams = streams->fNext) {
// Get a socket descriptor for "streams->fStreamSocketNum":
SocketDescriptor* socketDescriptor = lookupSocketDescriptor(envir(), streams->fStreamSocketNum);
// Tell it about our subChannel:
socketDescriptor->registerRTPInterface(streams->fStreamChannelId, this);
}
}
Boolean RTPInterface::handleRead(unsigned char* buffer, unsigned bufferMaxSize,
unsigned& bytesRead, struct sockaddr_in& fromAddress, Boolean& packetReadWasIncomplete) {
packetReadWasIncomplete = False; // by default
Boolean readSuccess;
if (fNextTCPReadStreamSocketNum < 0) {
// Normal case: read from the (datagram) 'groupsock':
readSuccess = fGS->handleRead(buffer, bufferMaxSize, bytesRead, fromAddress);
} else {
// Read from the TCP connection:
bytesRead = 0;
unsigned totBytesToRead = fNextTCPReadSize;
if (totBytesToRead > bufferMaxSize) totBytesToRead = bufferMaxSize;
unsigned curBytesToRead = totBytesToRead;
int curBytesRead;
while ((curBytesRead = readSocket(envir(), fNextTCPReadStreamSocketNum,
&buffer[bytesRead], curBytesToRead,
fromAddress)) > 0) {
bytesRead += curBytesRead;
if (bytesRead >= totBytesToRead) break;
curBytesToRead -= curBytesRead;
}
fNextTCPReadSize -= bytesRead;
if (fNextTCPReadSize == 0) {
// We've read all of the data that we asked for
readSuccess = True;
} else if (curBytesRead < 0) {
// There was an error reading the socket
bytesRead = 0;
readSuccess = False;
} else {
// We need to read more bytes, and there was not an error reading the socket
packetReadWasIncomplete = True;
return True;
}
fNextTCPReadStreamSocketNum = -1; // default, for next time
}
if (readSuccess && fAuxReadHandlerFunc != NULL) {
// Also pass the newly-read packet data to our auxilliary handler:
(*fAuxReadHandlerFunc)(fAuxReadHandlerClientData, buffer, bytesRead);
}
return readSuccess;
}
void RTPInterface::stopNetworkReading() {
// Normal case
envir().taskScheduler().turnOffBackgroundReadHandling(fGS->socketNum());
// Also turn off read handling on each of our TCP connections:
for (tcpStreamRecord* streams = fTCPStreams; streams != NULL; streams = streams->fNext) {
deregisterSocket(envir(), streams->fStreamSocketNum, streams->fStreamChannelId);
}
}
////////// Helper Functions - Implementation /////////
Boolean RTPInterface::sendRTPorRTCPPacketOverTCP(u_int8_t* packet, unsigned packetSize,
int socketNum, unsigned char streamChannelId) {
#ifdef DEBUG_SEND
fprintf(stderr, "sendRTPorRTCPPacketOverTCP: %d bytes over channel %d (socket %d)\n",
packetSize, streamChannelId, socketNum); fflush(stderr);
#endif
// Send a RTP/RTCP packet over TCP, using the encoding defined in RFC 2326, section 10.12:
// $
// (If the initial "send()" of '$' succeeds, then we force
// the subsequent "send()" for the data to succeed, even if we have to do so with
// a blocking "send()".)
do {
u_int8_t framingHeader[4];
framingHeader[0] = '$';
framingHeader[1] = streamChannelId;
framingHeader[2] = (u_int8_t) ((packetSize&0xFF00)>>8);
framingHeader[3] = (u_int8_t) (packetSize&0xFF);
if (!sendDataOverTCP(socketNum, framingHeader, 4, False)) break;
if (!sendDataOverTCP(socketNum, packet, packetSize, True)) break;
#ifdef DEBUG_SEND
fprintf(stderr, "sendRTPorRTCPPacketOverTCP: completed\n"); fflush(stderr);
#endif
return True;
} while (0);
#ifdef DEBUG_SEND
fprintf(stderr, "sendRTPorRTCPPacketOverTCP: failed! (errno %d)\n", envir().getErrno()); fflush(stderr);
#endif
return False;
}
Boolean RTPInterface::sendDataOverTCP(int socketNum, u_int8_t const* data, unsigned dataSize, Boolean forceSendToSucceed) {
int sendResult = send(socketNum, (char const*)data, dataSize, 0/*flags*/);
if (sendResult < (int)dataSize) {
// The TCP send() failed - at least partially.
unsigned numBytesSentSoFar = sendResult < 0 ? 0 : (unsigned)sendResult;
if (numBytesSentSoFar > 0 || (forceSendToSucceed && envir().getErrno() == EAGAIN)) {
// The OS's TCP send buffer has filled up (because the stream's bitrate has exceeded
// the capacity of the TCP connection!).
// Force this data write to succeed, by blocking if necessary until it does:
unsigned numBytesRemainingToSend = dataSize - numBytesSentSoFar;
#ifdef DEBUG_SEND
fprintf(stderr, "sendDataOverTCP: resending %d-byte send (blocking)\n", numBytesRemainingToSend); fflush(stderr);
#endif
makeSocketBlocking(socketNum);
sendResult = send(socketNum, (char const*)(&data[numBytesSentSoFar]), numBytesRemainingToSend, 0/*flags*/);
makeSocketNonBlocking(socketNum);
return sendResult == (int)numBytesRemainingToSend;
}
return False;
}
return True;
}
SocketDescriptor::SocketDescriptor(UsageEnvironment& env, int socketNum)
:fEnv(env), fOurSocketNum(socketNum),
fSubChannelHashTable(HashTable::create(ONE_WORD_HASH_KEYS)),
fServerRequestAlternativeByteHandler(NULL), fServerRequestAlternativeByteHandlerClientData(NULL),
fReadErrorOccurred(False), fDeleteMyselfNext(False), fAreInReadHandlerLoop(False), fTCPReadingState(AWAITING_DOLLAR) {
}
SocketDescriptor::~SocketDescriptor() {
fEnv.taskScheduler().turnOffBackgroundReadHandling(fOurSocketNum);
removeSocketDescription(fEnv, fOurSocketNum);
if (fSubChannelHashTable != NULL) {
// Remove knowledge of this socket from any "RTPInterface"s that are using it:
HashTable::Iterator* iter = HashTable::Iterator::create(*fSubChannelHashTable);
RTPInterface* rtpInterface;
char const* key;
while ((rtpInterface = (RTPInterface*)(iter->next(key))) != NULL) {
u_int64_t streamChannelIdLong = (u_int64_t)key;
unsigned char streamChannelId = (unsigned char)streamChannelIdLong;
rtpInterface->removeStreamSocket(fOurSocketNum, streamChannelId);
}
delete iter;
// Then remove the hash table entries themselves, and then remove the hash table:
while (fSubChannelHashTable->RemoveNext() != NULL) {}
delete fSubChannelHashTable;
}
// Finally:
if (fServerRequestAlternativeByteHandler != NULL) {
// Hack: Pass a special character to our alternative byte handler, to tell it that either
// - an error occurred when reading the TCP socket, or
// - no error occurred, but it needs to take over control of the TCP socket once again.
u_int8_t specialChar = fReadErrorOccurred ? 0xFF : 0xFE;
(*fServerRequestAlternativeByteHandler)(fServerRequestAlternativeByteHandlerClientData, specialChar);
}
}
void SocketDescriptor::registerRTPInterface(unsigned char streamChannelId,
RTPInterface* rtpInterface) {
Boolean isFirstRegistration = fSubChannelHashTable->IsEmpty();
#if defined(DEBUG_SEND)||defined(DEBUG_RECEIVE)
fprintf(stderr, "SocketDescriptor(socket %d)::registerRTPInterface(channel %d): isFirstRegistration %d\n", fOurSocketNum, streamChannelId, isFirstRegistration);
#endif
fSubChannelHashTable->Add((char const*)(long)streamChannelId,
rtpInterface);
if (isFirstRegistration) {
// Arrange to handle reads on this TCP socket:
TaskScheduler::BackgroundHandlerProc* handler
= (TaskScheduler::BackgroundHandlerProc*)&tcpReadHandler;
fEnv.taskScheduler().
setBackgroundHandling(fOurSocketNum, SOCKET_READABLE|SOCKET_EXCEPTION, handler, this);
}
}
RTPInterface* SocketDescriptor
::lookupRTPInterface(unsigned char streamChannelId) {
char const* lookupArg = (char const*)(long)streamChannelId;
return (RTPInterface*)(fSubChannelHashTable->Lookup(lookupArg));
}
void SocketDescriptor
::deregisterRTPInterface(unsigned char streamChannelId) {
#if defined(DEBUG_SEND)||defined(DEBUG_RECEIVE)
fprintf(stderr, "SocketDescriptor(socket %d)::deregisterRTPInterface(channel %d)\n", fOurSocketNum, streamChannelId);
#endif
fSubChannelHashTable->Remove((char const*)(long)streamChannelId);
if (fSubChannelHashTable->IsEmpty()) {
// No more interfaces are using us, so it's curtains for us now:
if (fAreInReadHandlerLoop) {
fDeleteMyselfNext = True; // we can't delete ourself yet, but we'll do so from "tcpReadHandler()" below
} else {
delete this;
}
}
}
void SocketDescriptor::tcpReadHandler(SocketDescriptor* socketDescriptor, int mask) {
// Call the read handler until it returns false, with a limit to avoid starving other sockets
unsigned count = 2000;
socketDescriptor->fAreInReadHandlerLoop = True;
while (!socketDescriptor->fDeleteMyselfNext && socketDescriptor->tcpReadHandler1(mask) && --count > 0) {}
socketDescriptor->fAreInReadHandlerLoop = False;
if (socketDescriptor->fDeleteMyselfNext) delete socketDescriptor;
}
Boolean SocketDescriptor::tcpReadHandler1(int mask) {
// We expect the following data over the TCP channel:
// optional RTSP command or response bytes (before the first '$' character)
// a '$' character
// a 1-byte channel id
// a 2-byte packet size (in network byte order)
// the packet data.
// However, because the socket is being read asynchronously, this data might arrive in pieces.
u_int8_t c;
struct sockaddr_in fromAddress;
if (fTCPReadingState != AWAITING_PACKET_DATA) {
int result = readSocket(fEnv, fOurSocketNum, &c, 1, fromAddress);
if (result == 0) { // There was no more data to read
return False;
} else if (result != 1) { // error reading TCP socket, so we will no longer handle it
#ifdef DEBUG_RECEIVE
fprintf(stderr, "SocketDescriptor(socket %d)::tcpReadHandler(): readSocket(1 byte) returned %d (error)\n", fOurSocketNum, result);
#endif
fReadErrorOccurred = True;
fDeleteMyselfNext = True;
return False;
}
}
Boolean callAgain = True;
switch (fTCPReadingState) {
case AWAITING_DOLLAR: {
if (c == '$') {
#ifdef DEBUG_RECEIVE
fprintf(stderr, "SocketDescriptor(socket %d)::tcpReadHandler(): Saw '$'\n", fOurSocketNum);
#endif
fTCPReadingState = AWAITING_STREAM_CHANNEL_ID;
} else {
// This character is part of a RTSP request or command, which is handled separately:
if (fServerRequestAlternativeByteHandler != NULL && c != 0xFF && c != 0xFE) {
// Hack: 0xFF and 0xFE are used as special signaling characters, so don't send them
(*fServerRequestAlternativeByteHandler)(fServerRequestAlternativeByteHandlerClientData, c);
}
}
break;
}
case AWAITING_STREAM_CHANNEL_ID: {
// The byte that we read is the stream channel id.
if (lookupRTPInterface(c) != NULL) { // sanity check
fStreamChannelId = c;
fTCPReadingState = AWAITING_SIZE1;
} else {
// This wasn't a stream channel id that we expected. We're (somehow) in a strange state. Try to recover:
#ifdef DEBUG_RECEIVE
fprintf(stderr, "SocketDescriptor(socket %d)::tcpReadHandler(): Saw nonexistent stream channel id: 0x%02x\n", fOurSocketNum, c);
#endif
fTCPReadingState = AWAITING_DOLLAR;
}
break;
}
case AWAITING_SIZE1: {
// The byte that we read is the first (high) byte of the 16-bit RTP or RTCP packet 'size'.
fSizeByte1 = c;
fTCPReadingState = AWAITING_SIZE2;
break;
}
case AWAITING_SIZE2: {
// The byte that we read is the second (low) byte of the 16-bit RTP or RTCP packet 'size'.
unsigned short size = (fSizeByte1<<8)|c;
// Record the information about the packet data that will be read next:
RTPInterface* rtpInterface = lookupRTPInterface(fStreamChannelId);
if (rtpInterface != NULL) {
rtpInterface->fNextTCPReadSize = size;
rtpInterface->fNextTCPReadStreamSocketNum = fOurSocketNum;
rtpInterface->fNextTCPReadStreamChannelId = fStreamChannelId;
}
fTCPReadingState = AWAITING_PACKET_DATA;
break;
}
case AWAITING_PACKET_DATA: {
callAgain = False;
fTCPReadingState = AWAITING_DOLLAR; // the next state, unless we end up having to read more data in the current state
// Call the appropriate read handler to get the packet data from the TCP stream:
RTPInterface* rtpInterface = lookupRTPInterface(fStreamChannelId);
if (rtpInterface != NULL) {
if (rtpInterface->fNextTCPReadSize == 0) {
// We've already read all the data for this packet.
break;
}
if (rtpInterface->fReadHandlerProc != NULL) {
#ifdef DEBUG_RECEIVE
fprintf(stderr, "SocketDescriptor(socket %d)::tcpReadHandler(): reading %d bytes on channel %d\n", fOurSocketNum, rtpInterface->fNextTCPReadSize, rtpInterface->fNextTCPReadStreamChannelId);
#endif
fTCPReadingState = AWAITING_PACKET_DATA;
rtpInterface->fReadHandlerProc(rtpInterface->fOwner, mask);
} else {
#ifdef DEBUG_RECEIVE
fprintf(stderr, "SocketDescriptor(socket %d)::tcpReadHandler(): No handler proc for \"rtpInterface\" for channel %d; need to skip %d remaining bytes\n", fOurSocketNum, fStreamChannelId, rtpInterface->fNextTCPReadSize);
#endif
int result = readSocket(fEnv, fOurSocketNum, &c, 1, fromAddress);
if (result < 0) { // error reading TCP socket, so we will no longer handle it
#ifdef DEBUG_RECEIVE
fprintf(stderr, "SocketDescriptor(socket %d)::tcpReadHandler(): readSocket(1 byte) returned %d (error)\n", fOurSocketNum, result);
#endif
fReadErrorOccurred = True;
fDeleteMyselfNext = True;
return False;
} else {
fTCPReadingState = AWAITING_PACKET_DATA;
if (result == 1) {
--rtpInterface->fNextTCPReadSize;
callAgain = True;
}
}
}
}
#ifdef DEBUG_RECEIVE
else fprintf(stderr, "SocketDescriptor(socket %d)::tcpReadHandler(): No \"rtpInterface\" for channel %d\n", fOurSocketNum, fStreamChannelId);
#endif
}
}
return callAgain;
}
////////// tcpStreamRecord implementation //////////
tcpStreamRecord
::tcpStreamRecord(int streamSocketNum, unsigned char streamChannelId,
tcpStreamRecord* next)
: fNext(next),
fStreamSocketNum(streamSocketNum), fStreamChannelId(streamChannelId) {
}
tcpStreamRecord::~tcpStreamRecord() {
delete fNext;
}
live/liveMedia/MediaSink.cpp 000400 001751 000000 00000014513 12265042432 016202 0 ustar 00rsf wheel 000000 000000 /**********
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the
Free Software Foundation; either version 2.1 of the License, or (at your
option) any later version. (See .)
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
more details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
**********/
// "liveMedia"
// Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved.
// Media Sinks
// Implementation
#include "MediaSink.hh"
#include "GroupsockHelper.hh"
#include
////////// MediaSink //////////
MediaSink::MediaSink(UsageEnvironment& env)
: Medium(env), fSource(NULL) {
}
MediaSink::~MediaSink() {
stopPlaying();
}
Boolean MediaSink::isSink() const {
return True;
}
Boolean MediaSink::lookupByName(UsageEnvironment& env, char const* sinkName,
MediaSink*& resultSink) {
resultSink = NULL; // unless we succeed
Medium* medium;
if (!Medium::lookupByName(env, sinkName, medium)) return False;
if (!medium->isSink()) {
env.setResultMsg(sinkName, " is not a media sink");
return False;
}
resultSink = (MediaSink*)medium;
return True;
}
Boolean MediaSink::sourceIsCompatibleWithUs(MediaSource& source) {
// We currently support only framed sources.
return source.isFramedSource();
}
Boolean MediaSink::startPlaying(MediaSource& source,
afterPlayingFunc* afterFunc,
void* afterClientData) {
// Make sure we're not already being played:
if (fSource != NULL) {
envir().setResultMsg("This sink is already being played");
return False;
}
// Make sure our source is compatible:
if (!sourceIsCompatibleWithUs(source)) {
envir().setResultMsg("MediaSink::startPlaying(): source is not compatible!");
return False;
}
fSource = (FramedSource*)&source;
fAfterFunc = afterFunc;
fAfterClientData = afterClientData;
return continuePlaying();
}
void MediaSink::stopPlaying() {
// First, tell the source that we're no longer interested:
if (fSource != NULL) fSource->stopGettingFrames();
// Cancel any pending tasks:
envir().taskScheduler().unscheduleDelayedTask(nextTask());
fSource = NULL; // indicates that we can be played again
fAfterFunc = NULL;
}
void MediaSink::onSourceClosure(void* clientData) {
MediaSink* sink = (MediaSink*)clientData;
sink->onSourceClosure();
}
void MediaSink::onSourceClosure() {
// Cancel any pending tasks:
envir().taskScheduler().unscheduleDelayedTask(nextTask());
fSource = NULL; // indicates that we can be played again
if (fAfterFunc != NULL) {
(*fAfterFunc)(fAfterClientData);
}
}
Boolean MediaSink::isRTPSink() const {
return False; // default implementation
}
////////// OutPacketBuffer //////////
unsigned OutPacketBuffer::maxSize = 60000; // by default
OutPacketBuffer::OutPacketBuffer(unsigned preferredPacketSize,
unsigned maxPacketSize)
: fPreferred(preferredPacketSize), fMax(maxPacketSize),
fOverflowDataSize(0) {
unsigned maxNumPackets = (maxSize + (maxPacketSize-1))/maxPacketSize;
fLimit = maxNumPackets*maxPacketSize;
fBuf = new unsigned char[fLimit];
resetPacketStart();
resetOffset();
resetOverflowData();
}
OutPacketBuffer::~OutPacketBuffer() {
delete[] fBuf;
}
void OutPacketBuffer::enqueue(unsigned char const* from, unsigned numBytes) {
if (numBytes > totalBytesAvailable()) {
#ifdef DEBUG
fprintf(stderr, "OutPacketBuffer::enqueue() warning: %d > %d\n", numBytes, totalBytesAvailable());
#endif
numBytes = totalBytesAvailable();
}
if (curPtr() != from) memmove(curPtr(), from, numBytes);
increment(numBytes);
}
void OutPacketBuffer::enqueueWord(u_int32_t word) {
u_int32_t nWord = htonl(word);
enqueue((unsigned char*)&nWord, 4);
}
void OutPacketBuffer::insert(unsigned char const* from, unsigned numBytes,
unsigned toPosition) {
unsigned realToPosition = fPacketStart + toPosition;
if (realToPosition + numBytes > fLimit) {
if (realToPosition > fLimit) return; // we can't do this
numBytes = fLimit - realToPosition;
}
memmove(&fBuf[realToPosition], from, numBytes);
if (toPosition + numBytes > fCurOffset) {
fCurOffset = toPosition + numBytes;
}
}
void OutPacketBuffer::insertWord(u_int32_t word, unsigned toPosition) {
u_int32_t nWord = htonl(word);
insert((unsigned char*)&nWord, 4, toPosition);
}
void OutPacketBuffer::extract(unsigned char* to, unsigned numBytes,
unsigned fromPosition) {
unsigned realFromPosition = fPacketStart + fromPosition;
if (realFromPosition + numBytes > fLimit) { // sanity check
if (realFromPosition > fLimit) return; // we can't do this
numBytes = fLimit - realFromPosition;
}
memmove(to, &fBuf[realFromPosition], numBytes);
}
u_int32_t OutPacketBuffer::extractWord(unsigned fromPosition) {
u_int32_t nWord;
extract((unsigned char*)&nWord, 4, fromPosition);
return ntohl(nWord);
}
void OutPacketBuffer::skipBytes(unsigned numBytes) {
if (numBytes > totalBytesAvailable()) {
numBytes = totalBytesAvailable();
}
increment(numBytes);
}
void OutPacketBuffer
::setOverflowData(unsigned overflowDataOffset,
unsigned overflowDataSize,
struct timeval const& presentationTime,
unsigned durationInMicroseconds) {
fOverflowDataOffset = overflowDataOffset;
fOverflowDataSize = overflowDataSize;
fOverflowPresentationTime = presentationTime;
fOverflowDurationInMicroseconds = durationInMicroseconds;
}
void OutPacketBuffer::useOverflowData() {
enqueue(&fBuf[fPacketStart + fOverflowDataOffset], fOverflowDataSize);
fCurOffset -= fOverflowDataSize; // undoes increment performed by "enqueue"
resetOverflowData();
}
void OutPacketBuffer::adjustPacketStart(unsigned numBytes) {
fPacketStart += numBytes;
if (fOverflowDataOffset >= numBytes) {
fOverflowDataOffset -= numBytes;
} else {
fOverflowDataOffset = 0;
fOverflowDataSize = 0; // an error otherwise
}
}
void OutPacketBuffer::resetPacketStart() {
if (fOverflowDataSize > 0) {
fOverflowDataOffset += fPacketStart;
}
fPacketStart = 0;
}
live/liveMedia/Media.cpp 000444 001751 000000 00000011033 12265042432 015357 0 ustar 00rsf wheel 000000 000000 /**********
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the
Free Software Foundation; either version 2.1 of the License, or (at your
option) any later version. (See .)
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
more details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
**********/
// "liveMedia"
// Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved.
// Media
// Implementation
#include "Media.hh"
#include "HashTable.hh"
////////// Medium //////////
Medium::Medium(UsageEnvironment& env)
: fEnviron(env), fNextTask(NULL) {
// First generate a name for the new medium:
MediaLookupTable::ourMedia(env)->generateNewName(fMediumName, mediumNameMaxLen);
env.setResultMsg(fMediumName);
// Then add it to our table:
MediaLookupTable::ourMedia(env)->addNew(this, fMediumName);
}
Medium::~Medium() {
// Remove any tasks that might be pending for us:
fEnviron.taskScheduler().unscheduleDelayedTask(fNextTask);
}
Boolean Medium::lookupByName(UsageEnvironment& env, char const* mediumName,
Medium*& resultMedium) {
resultMedium = MediaLookupTable::ourMedia(env)->lookup(mediumName);
if (resultMedium == NULL) {
env.setResultMsg("Medium ", mediumName, " does not exist");
return False;
}
return True;
}
void Medium::close(UsageEnvironment& env, char const* name) {
MediaLookupTable::ourMedia(env)->remove(name);
}
void Medium::close(Medium* medium) {
if (medium == NULL) return;
close(medium->envir(), medium->name());
}
Boolean Medium::isSource() const {
return False; // default implementation
}
Boolean Medium::isSink() const {
return False; // default implementation
}
Boolean Medium::isRTCPInstance() const {
return False; // default implementation
}
Boolean Medium::isRTSPClient() const {
return False; // default implementation
}
Boolean Medium::isRTSPServer() const {
return False; // default implementation
}
Boolean Medium::isMediaSession() const {
return False; // default implementation
}
Boolean Medium::isServerMediaSession() const {
return False; // default implementation
}
Boolean Medium::isDarwinInjector() const {
return False; // default implementation
}
////////// _Tables implementation //////////
_Tables* _Tables::getOurTables(UsageEnvironment& env, Boolean createIfNotPresent) {
if (env.liveMediaPriv == NULL && createIfNotPresent) {
env.liveMediaPriv = new _Tables(env);
}
return (_Tables*)(env.liveMediaPriv);
}
void _Tables::reclaimIfPossible() {
if (mediaTable == NULL && socketTable == NULL) {
fEnv.liveMediaPriv = NULL;
delete this;
}
}
_Tables::_Tables(UsageEnvironment& env)
: mediaTable(NULL), socketTable(NULL), fEnv(env) {
}
_Tables::~_Tables() {
}
////////// MediaLookupTable implementation //////////
MediaLookupTable* MediaLookupTable::ourMedia(UsageEnvironment& env) {
_Tables* ourTables = _Tables::getOurTables(env);
if (ourTables->mediaTable == NULL) {
// Create a new table to record the media that are to be created in
// this environment:
ourTables->mediaTable = new MediaLookupTable(env);
}
return ourTables->mediaTable;
}
Medium* MediaLookupTable::lookup(char const* name) const {
return (Medium*)(fTable->Lookup(name));
}
void MediaLookupTable::addNew(Medium* medium, char* mediumName) {
fTable->Add(mediumName, (void*)medium);
}
void MediaLookupTable::remove(char const* name) {
Medium* medium = lookup(name);
if (medium != NULL) {
fTable->Remove(name);
if (fTable->IsEmpty()) {
// We can also delete ourselves (to reclaim space):
_Tables* ourTables = _Tables::getOurTables(fEnv);
delete this;
ourTables->mediaTable = NULL;
ourTables->reclaimIfPossible();
}
delete medium;
}
}
void MediaLookupTable::generateNewName(char* mediumName,
unsigned /*maxLen*/) {
// We should really use snprintf() here, but not all systems have it
sprintf(mediumName, "liveMedia%d", fNameGenerator++);
}
MediaLookupTable::MediaLookupTable(UsageEnvironment& env)
: fEnv(env), fTable(HashTable::create(STRING_HASH_KEYS)), fNameGenerator(0) {
}
MediaLookupTable::~MediaLookupTable() {
delete fTable;
}
live/liveMedia/ServerMediaSession.cpp 000444 001751 000000 00000035351 12265042432 020123 0 ustar 00rsf wheel 000000 000000 /**********
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the
Free Software Foundation; either version 2.1 of the License, or (at your
option) any later version. (See .)
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
more details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
**********/
// "liveMedia"
// Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved.
// A data structure that represents a session that consists of
// potentially multiple (audio and/or video) sub-sessions
// (This data structure is used for media *streamers* - i.e., servers.
// For media receivers, use "MediaSession" instead.)
// Implementation
#include "ServerMediaSession.hh"
#include
#include
////////// ServerMediaSession //////////
ServerMediaSession* ServerMediaSession
::createNew(UsageEnvironment& env,
char const* streamName, char const* info,
char const* description, Boolean isSSM, char const* miscSDPLines) {
return new ServerMediaSession(env, streamName, info, description,
isSSM, miscSDPLines);
}
Boolean ServerMediaSession
::lookupByName(UsageEnvironment& env, char const* mediumName,
ServerMediaSession*& resultSession) {
resultSession = NULL; // unless we succeed
Medium* medium;
if (!Medium::lookupByName(env, mediumName, medium)) return False;
if (!medium->isServerMediaSession()) {
env.setResultMsg(mediumName, " is not a 'ServerMediaSession' object");
return False;
}
resultSession = (ServerMediaSession*)medium;
return True;
}
static char const* const libNameStr = "LIVE555 Streaming Media v";
char const* const libVersionStr = LIVEMEDIA_LIBRARY_VERSION_STRING;
ServerMediaSession::ServerMediaSession(UsageEnvironment& env,
char const* streamName,
char const* info,
char const* description,
Boolean isSSM, char const* miscSDPLines)
: Medium(env), fIsSSM(isSSM), fSubsessionsHead(NULL),
fSubsessionsTail(NULL), fSubsessionCounter(0),
fReferenceCount(0), fDeleteWhenUnreferenced(False) {
fStreamName = strDup(streamName == NULL ? "" : streamName);
char* libNamePlusVersionStr = NULL; // by default
if (info == NULL || description == NULL) {
libNamePlusVersionStr = new char[strlen(libNameStr) + strlen(libVersionStr) + 1];
sprintf(libNamePlusVersionStr, "%s%s", libNameStr, libVersionStr);
}
fInfoSDPString = strDup(info == NULL ? libNamePlusVersionStr : info);
fDescriptionSDPString = strDup(description == NULL ? libNamePlusVersionStr : description);
delete[] libNamePlusVersionStr;
fMiscSDPLines = strDup(miscSDPLines == NULL ? "" : miscSDPLines);
gettimeofday(&fCreationTime, NULL);
}
ServerMediaSession::~ServerMediaSession() {
deleteAllSubsessions();
delete[] fStreamName;
delete[] fInfoSDPString;
delete[] fDescriptionSDPString;
delete[] fMiscSDPLines;
}
Boolean
ServerMediaSession::addSubsession(ServerMediaSubsession* subsession) {
if (subsession->fParentSession != NULL) return False; // it's already used
if (fSubsessionsTail == NULL) {
fSubsessionsHead = subsession;
} else {
fSubsessionsTail->fNext = subsession;
}
fSubsessionsTail = subsession;
subsession->fParentSession = this;
subsession->fTrackNumber = ++fSubsessionCounter;
return True;
}
void ServerMediaSession::testScaleFactor(float& scale) {
// First, try setting all subsessions to the desired scale.
// If the subsessions' actual scales differ from each other, choose the
// value that's closest to 1, and then try re-setting all subsessions to that
// value. If the subsessions' actual scales still differ, re-set them all to 1.
float minSSScale = 1.0;
float maxSSScale = 1.0;
float bestSSScale = 1.0;
float bestDistanceTo1 = 0.0;
ServerMediaSubsession* subsession;
for (subsession = fSubsessionsHead; subsession != NULL;
subsession = subsession->fNext) {
float ssscale = scale;
subsession->testScaleFactor(ssscale);
if (subsession == fSubsessionsHead) { // this is the first subsession
minSSScale = maxSSScale = bestSSScale = ssscale;
bestDistanceTo1 = (float)fabs(ssscale - 1.0f);
} else {
if (ssscale < minSSScale) {
minSSScale = ssscale;
} else if (ssscale > maxSSScale) {
maxSSScale = ssscale;
}
float distanceTo1 = (float)fabs(ssscale - 1.0f);
if (distanceTo1 < bestDistanceTo1) {
bestSSScale = ssscale;
bestDistanceTo1 = distanceTo1;
}
}
}
if (minSSScale == maxSSScale) {
// All subsessions are at the same scale: minSSScale == bestSSScale == maxSSScale
scale = minSSScale;
return;
}
// The scales for each subsession differ. Try to set each one to the value
// that's closest to 1:
for (subsession = fSubsessionsHead; subsession != NULL;
subsession = subsession->fNext) {
float ssscale = bestSSScale;
subsession->testScaleFactor(ssscale);
if (ssscale != bestSSScale) break; // no luck
}
if (subsession == NULL) {
// All subsessions are at the same scale: bestSSScale
scale = bestSSScale;
return;
}
// Still no luck. Set each subsession's scale to 1:
for (subsession = fSubsessionsHead; subsession != NULL;
subsession = subsession->fNext) {
float ssscale = 1;
subsession->testScaleFactor(ssscale);
}
scale = 1;
}
float ServerMediaSession::duration() const {
float minSubsessionDuration = 0.0;
float maxSubsessionDuration = 0.0;
for (ServerMediaSubsession* subsession = fSubsessionsHead; subsession != NULL;
subsession = subsession->fNext) {
// Hack: If any subsession supports seeking by 'absolute' time, then return a negative value, to indicate that only subsessions
// will have a "a=range:" attribute:
char* absStartTime = NULL; char* absEndTime = NULL;
subsession->getAbsoluteTimeRange(absStartTime, absEndTime);
if (absStartTime != NULL) return -1.0f;
float ssduration = subsession->duration();
if (subsession == fSubsessionsHead) { // this is the first subsession
minSubsessionDuration = maxSubsessionDuration = ssduration;
} else if (ssduration < minSubsessionDuration) {
minSubsessionDuration = ssduration;
} else if (ssduration > maxSubsessionDuration) {
maxSubsessionDuration = ssduration;
}
}
if (maxSubsessionDuration != minSubsessionDuration) {
return -maxSubsessionDuration; // because subsession durations differ
} else {
return maxSubsessionDuration; // all subsession durations are the same
}
}
void ServerMediaSession::deleteAllSubsessions() {
Medium::close(fSubsessionsHead);
fSubsessionsHead = fSubsessionsTail = NULL;
fSubsessionCounter = 0;
}
Boolean ServerMediaSession::isServerMediaSession() const {
return True;
}
char* ServerMediaSession::generateSDPDescription() {
AddressString ipAddressStr(ourIPAddress(envir()));
unsigned ipAddressStrSize = strlen(ipAddressStr.val());
// For a SSM sessions, we need a "a=source-filter: incl ..." line also:
char* sourceFilterLine;
if (fIsSSM) {
char const* const sourceFilterFmt =
"a=source-filter: incl IN IP4 * %s\r\n"
"a=rtcp-unicast: reflection\r\n";
unsigned const sourceFilterFmtSize = strlen(sourceFilterFmt) + ipAddressStrSize + 1;
sourceFilterLine = new char[sourceFilterFmtSize];
sprintf(sourceFilterLine, sourceFilterFmt, ipAddressStr.val());
} else {
sourceFilterLine = strDup("");
}
char* rangeLine = NULL; // for now
char* sdp = NULL; // for now
do {
// Count the lengths of each subsession's media-level SDP lines.
// (We do this first, because the call to "subsession->sdpLines()"
// causes correct subsession 'duration()'s to be calculated later.)
unsigned sdpLength = 0;
ServerMediaSubsession* subsession;
for (subsession = fSubsessionsHead; subsession != NULL;
subsession = subsession->fNext) {
char const* sdpLines = subsession->sdpLines();
if (sdpLines == NULL) continue; // the media's not available
sdpLength += strlen(sdpLines);
}
if (sdpLength == 0) break; // the session has no usable subsessions
// Unless subsessions have differing durations, we also have a "a=range:" line:
float dur = duration();
if (dur == 0.0) {
rangeLine = strDup("a=range:npt=0-\r\n");
} else if (dur > 0.0) {
char buf[100];
sprintf(buf, "a=range:npt=0-%.3f\r\n", dur);
rangeLine = strDup(buf);
} else { // subsessions have differing durations, so "a=range:" lines go there
rangeLine = strDup("");
}
char const* const sdpPrefixFmt =
"v=0\r\n"
"o=- %ld%06ld %d IN IP4 %s\r\n"
"s=%s\r\n"
"i=%s\r\n"
"t=0 0\r\n"
"a=tool:%s%s\r\n"
"a=type:broadcast\r\n"
"a=control:*\r\n"
"%s"
"%s"
"a=x-qt-text-nam:%s\r\n"
"a=x-qt-text-inf:%s\r\n"
"%s";
sdpLength += strlen(sdpPrefixFmt)
+ 20 + 6 + 20 + ipAddressStrSize
+ strlen(fDescriptionSDPString)
+ strlen(fInfoSDPString)
+ strlen(libNameStr) + strlen(libVersionStr)
+ strlen(sourceFilterLine)
+ strlen(rangeLine)
+ strlen(fDescriptionSDPString)
+ strlen(fInfoSDPString)
+ strlen(fMiscSDPLines);
sdp = new char[sdpLength];
if (sdp == NULL) break;
// Generate the SDP prefix (session-level lines):
sprintf(sdp, sdpPrefixFmt,
fCreationTime.tv_sec, fCreationTime.tv_usec, // o=
1, // o= // (needs to change if params are modified)
ipAddressStr.val(), // o=
fDescriptionSDPString, // s=
fInfoSDPString, // i=
libNameStr, libVersionStr, // a=tool:
sourceFilterLine, // a=source-filter: incl (if a SSM session)
rangeLine, // a=range: line
fDescriptionSDPString, // a=x-qt-text-nam: line
fInfoSDPString, // a=x-qt-text-inf: line
fMiscSDPLines); // miscellaneous session SDP lines (if any)
// Then, add the (media-level) lines for each subsession:
char* mediaSDP = sdp;
for (subsession = fSubsessionsHead; subsession != NULL;
subsession = subsession->fNext) {
mediaSDP += strlen(mediaSDP);
char const* sdpLines = subsession->sdpLines();
if (sdpLines != NULL) sprintf(mediaSDP, "%s", sdpLines);
}
} while (0);
delete[] rangeLine; delete[] sourceFilterLine;
return sdp;
}
////////// ServerMediaSessionIterator //////////
ServerMediaSubsessionIterator
::ServerMediaSubsessionIterator(ServerMediaSession& session)
: fOurSession(session) {
reset();
}
ServerMediaSubsessionIterator::~ServerMediaSubsessionIterator() {
}
ServerMediaSubsession* ServerMediaSubsessionIterator::next() {
ServerMediaSubsession* result = fNextPtr;
if (fNextPtr != NULL) fNextPtr = fNextPtr->fNext;
return result;
}
void ServerMediaSubsessionIterator::reset() {
fNextPtr = fOurSession.fSubsessionsHead;
}
////////// ServerMediaSubsession //////////
ServerMediaSubsession::ServerMediaSubsession(UsageEnvironment& env)
: Medium(env),
fParentSession(NULL), fServerAddressForSDP(0), fPortNumForSDP(0),
fNext(NULL), fTrackNumber(0), fTrackId(NULL) {
}
ServerMediaSubsession::~ServerMediaSubsession() {
delete[] (char*)fTrackId;
Medium::close(fNext);
}
char const* ServerMediaSubsession::trackId() {
if (fTrackNumber == 0) return NULL; // not yet in a ServerMediaSession
if (fTrackId == NULL) {
char buf[100];
sprintf(buf, "track%d", fTrackNumber);
fTrackId = strDup(buf);
}
return fTrackId;
}
void ServerMediaSubsession::pauseStream(unsigned /*clientSessionId*/,
void* /*streamToken*/) {
// default implementation: do nothing
}
void ServerMediaSubsession::seekStream(unsigned /*clientSessionId*/,
void* /*streamToken*/, double& /*seekNPT*/, double /*streamDuration*/, u_int64_t& numBytes) {
// default implementation: do nothing
numBytes = 0;
}
void ServerMediaSubsession::seekStream(unsigned /*clientSessionId*/,
void* /*streamToken*/, char*& absStart, char*& absEnd) {
// default implementation: do nothing (but delete[] and assign "absStart" and "absEnd" to NULL, to show that we don't handle this)
delete[] absStart; absStart = NULL;
delete[] absEnd; absEnd = NULL;
}
void ServerMediaSubsession::nullSeekStream(unsigned /*clientSessionId*/, void* /*streamToken*/) {
// default implementation: do nothing
}
void ServerMediaSubsession::setStreamScale(unsigned /*clientSessionId*/,
void* /*streamToken*/, float /*scale*/) {
// default implementation: do nothing
}
float ServerMediaSubsession::getCurrentNPT(void* /*streamToken*/) {
// default implementation: return 0.0
return 0.0;
}
FramedSource* ServerMediaSubsession::getStreamSource(void* /*streamToken*/) {
// default implementation: return NULL
return NULL;
}
void ServerMediaSubsession::deleteStream(unsigned /*clientSessionId*/,
void*& /*streamToken*/) {
// default implementation: do nothing
}
void ServerMediaSubsession::testScaleFactor(float& scale) {
// default implementation: Support scale = 1 only
scale = 1;
}
float ServerMediaSubsession::duration() const {
// default implementation: assume an unbounded session:
return 0.0;
}
void ServerMediaSubsession::getAbsoluteTimeRange(char*& absStartTime, char*& absEndTime) const {
// default implementation: We don't support seeking by 'absolute' time, so indicate this by setting both parameters to NULL:
absStartTime = absEndTime = NULL;
}
void ServerMediaSubsession::setServerAddressAndPortForSDP(netAddressBits addressBits,
portNumBits portBits) {
fServerAddressForSDP = addressBits;
fPortNumForSDP = portBits;
}
char const*
ServerMediaSubsession::rangeSDPLine() const {
// First, check for the special case where we support seeking by 'absolute' time:
char* absStart = NULL; char* absEnd = NULL;
getAbsoluteTimeRange(absStart, absEnd);
if (absStart != NULL) {
char buf[100];
if (absEnd != NULL) {
sprintf(buf, "a=range:clock=%s-%s\r\n", absStart, absEnd);
} else {
sprintf(buf, "a=range:clock=%s-\r\n", absStart);
}
return strDup(buf);
}
if (fParentSession == NULL) return NULL;
// If all of our parent's subsessions have the same duration
// (as indicated by "fParentSession->duration() >= 0"), there's no "a=range:" line:
if (fParentSession->duration() >= 0.0) return strDup("");
// Use our own duration for a "a=range:" line:
float ourDuration = duration();
if (ourDuration == 0.0) {
return strDup("a=range:npt=0-\r\n");
} else {
char buf[100];
sprintf(buf, "a=range:npt=0-%.3f\r\n", ourDuration);
return strDup(buf);
}
}
live/liveMedia/ByteStreamFileSource.cpp 000444 001751 000000 00000014233 12265042432 020405 0 ustar 00rsf wheel 000000 000000 /**********
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the
Free Software Foundation; either version 2.1 of the License, or (at your
option) any later version. (See .)
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
more details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
**********/
// "liveMedia"
// Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved.
// A file source that is a plain byte stream (rather than frames)
// Implementation
#include "ByteStreamFileSource.hh"
#include "InputFile.hh"
#include "GroupsockHelper.hh"
////////// ByteStreamFileSource //////////
ByteStreamFileSource*
ByteStreamFileSource::createNew(UsageEnvironment& env, char const* fileName,
unsigned preferredFrameSize,
unsigned playTimePerFrame) {
FILE* fid = OpenInputFile(env, fileName);
if (fid == NULL) return NULL;
ByteStreamFileSource* newSource
= new ByteStreamFileSource(env, fid, preferredFrameSize, playTimePerFrame);
newSource->fFileSize = GetFileSize(fileName, fid);
return newSource;
}
ByteStreamFileSource*
ByteStreamFileSource::createNew(UsageEnvironment& env, FILE* fid,
unsigned preferredFrameSize,
unsigned playTimePerFrame) {
if (fid == NULL) return NULL;
ByteStreamFileSource* newSource = new ByteStreamFileSource(env, fid, preferredFrameSize, playTimePerFrame);
newSource->fFileSize = GetFileSize(NULL, fid);
return newSource;
}
void ByteStreamFileSource::seekToByteAbsolute(u_int64_t byteNumber, u_int64_t numBytesToStream) {
SeekFile64(fFid, (int64_t)byteNumber, SEEK_SET);
fNumBytesToStream = numBytesToStream;
fLimitNumBytesToStream = fNumBytesToStream > 0;
}
void ByteStreamFileSource::seekToByteRelative(int64_t offset) {
SeekFile64(fFid, offset, SEEK_CUR);
}
void ByteStreamFileSource::seekToEnd() {
SeekFile64(fFid, 0, SEEK_END);
}
ByteStreamFileSource::ByteStreamFileSource(UsageEnvironment& env, FILE* fid,
unsigned preferredFrameSize,
unsigned playTimePerFrame)
: FramedFileSource(env, fid), fFileSize(0), fPreferredFrameSize(preferredFrameSize),
fPlayTimePerFrame(playTimePerFrame), fLastPlayTime(0),
fHaveStartedReading(False), fLimitNumBytesToStream(False), fNumBytesToStream(0) {
#ifndef READ_FROM_FILES_SYNCHRONOUSLY
makeSocketNonBlocking(fileno(fFid));
#endif
// Test whether the file is seekable
fFidIsSeekable = FileIsSeekable(fFid);
}
ByteStreamFileSource::~ByteStreamFileSource() {
if (fFid == NULL) return;
#ifndef READ_FROM_FILES_SYNCHRONOUSLY
envir().taskScheduler().turnOffBackgroundReadHandling(fileno(fFid));
#endif
CloseInputFile(fFid);
}
void ByteStreamFileSource::doGetNextFrame() {
if (feof(fFid) || ferror(fFid) || (fLimitNumBytesToStream && fNumBytesToStream == 0)) {
handleClosure(this);
return;
}
#ifdef READ_FROM_FILES_SYNCHRONOUSLY
doReadFromFile();
#else
if (!fHaveStartedReading) {
// Await readable data from the file:
envir().taskScheduler().turnOnBackgroundReadHandling(fileno(fFid),
(TaskScheduler::BackgroundHandlerProc*)&fileReadableHandler, this);
fHaveStartedReading = True;
}
#endif
}
void ByteStreamFileSource::doStopGettingFrames() {
envir().taskScheduler().unscheduleDelayedTask(nextTask());
#ifndef READ_FROM_FILES_SYNCHRONOUSLY
envir().taskScheduler().turnOffBackgroundReadHandling(fileno(fFid));
fHaveStartedReading = False;
#endif
}
void ByteStreamFileSource::fileReadableHandler(ByteStreamFileSource* source, int /*mask*/) {
if (!source->isCurrentlyAwaitingData()) {
source->doStopGettingFrames(); // we're not ready for the data yet
return;
}
source->doReadFromFile();
}
void ByteStreamFileSource::doReadFromFile() {
// Try to read as many bytes as will fit in the buffer provided (or "fPreferredFrameSize" if less)
if (fLimitNumBytesToStream && fNumBytesToStream < (u_int64_t)fMaxSize) {
fMaxSize = (unsigned)fNumBytesToStream;
}
if (fPreferredFrameSize > 0 && fPreferredFrameSize < fMaxSize) {
fMaxSize = fPreferredFrameSize;
}
#ifdef READ_FROM_FILES_SYNCHRONOUSLY
fFrameSize = fread(fTo, 1, fMaxSize, fFid);
#else
if (fFidIsSeekable) {
fFrameSize = fread(fTo, 1, fMaxSize, fFid);
} else {
// For non-seekable files (e.g., pipes), call "read()" rather than "fread()", to ensure that the read doesn't block:
fFrameSize = read(fileno(fFid), fTo, fMaxSize);
}
#endif
if (fFrameSize == 0) {
handleClosure(this);
return;
}
fNumBytesToStream -= fFrameSize;
// Set the 'presentation time':
if (fPlayTimePerFrame > 0 && fPreferredFrameSize > 0) {
if (fPresentationTime.tv_sec == 0 && fPresentationTime.tv_usec == 0) {
// This is the first frame, so use the current time:
gettimeofday(&fPresentationTime, NULL);
} else {
// Increment by the play time of the previous data:
unsigned uSeconds = fPresentationTime.tv_usec + fLastPlayTime;
fPresentationTime.tv_sec += uSeconds/1000000;
fPresentationTime.tv_usec = uSeconds%1000000;
}
// Remember the play time of this data:
fLastPlayTime = (fPlayTimePerFrame*fFrameSize)/fPreferredFrameSize;
fDurationInMicroseconds = fLastPlayTime;
} else {
// We don't know a specific play time duration for this data,
// so just record the current time as being the 'presentation time':
gettimeofday(&fPresentationTime, NULL);
}
// Inform the reader that he has data:
#ifdef READ_FROM_FILES_SYNCHRONOUSLY
// To avoid possible infinite recursion, we need to return to the event loop to do this:
nextTask() = envir().taskScheduler().scheduleDelayedTask(0,
(TaskFunc*)FramedSource::afterGetting, this);
#else
// Because the file read was done from the event loop, we can call the
// 'after getting' function directly, without risk of infinite recursion:
FramedSource::afterGetting(this);
#endif
}
live/liveMedia/QuickTimeFileSink.cpp 000444 001751 000000 00000237400 12265042432 017670 0 ustar 00rsf wheel 000000 000000 /**********
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the
Free Software Foundation; either version 2.1 of the License, or (at your
option) any later version. (See .)
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
more details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
**********/
// "liveMedia"
// Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved.
// A sink that generates a QuickTime file from a composite media session
// Implementation
#include "QuickTimeFileSink.hh"
#include "QuickTimeGenericRTPSource.hh"
#include "GroupsockHelper.hh"
#include "InputFile.hh"
#include "OutputFile.hh"
#include "H263plusVideoRTPSource.hh" // for the special header
#include "MPEG4GenericRTPSource.hh" //for "samplingFrequencyFromAudioSpecificConfig()"
#include "MPEG4LATMAudioRTPSource.hh" // for "parseGeneralConfigStr()"
#include "Base64.hh"
#include
#define fourChar(x,y,z,w) ( ((x)<<24)|((y)<<16)|((z)<<8)|(w) )
#define H264_IDR_FRAME 0x65 //bit 8 == 0, bits 7-6 (ref) == 3, bits 5-0 (type) == 5
////////// SubsessionIOState, ChunkDescriptor ///////////
// A structure used to represent the I/O state of each input 'subsession':
class ChunkDescriptor {
public:
ChunkDescriptor(int64_t offsetInFile, unsigned size,
unsigned frameSize, unsigned frameDuration,
struct timeval presentationTime);
ChunkDescriptor* extendChunk(int64_t newOffsetInFile, unsigned newSize,
unsigned newFrameSize,
unsigned newFrameDuration,
struct timeval newPresentationTime);
// this may end up allocating a new chunk instead
public:
ChunkDescriptor* fNextChunk;
int64_t fOffsetInFile;
unsigned fNumFrames;
unsigned fFrameSize;
unsigned fFrameDuration;
struct timeval fPresentationTime; // of the start of the data
};
class SubsessionBuffer {
public:
SubsessionBuffer(unsigned bufferSize)
: fBufferSize(bufferSize) {
reset();
fData = new unsigned char[bufferSize];
}
virtual ~SubsessionBuffer() { delete[] fData; }
void reset() { fBytesInUse = 0; }
void addBytes(unsigned numBytes) { fBytesInUse += numBytes; }
unsigned char* dataStart() { return &fData[0]; }
unsigned char* dataEnd() { return &fData[fBytesInUse]; }
unsigned bytesInUse() const { return fBytesInUse; }
unsigned bytesAvailable() const { return fBufferSize - fBytesInUse; }
void setPresentationTime(struct timeval const& presentationTime) {
fPresentationTime = presentationTime;
}
struct timeval const& presentationTime() const {return fPresentationTime;}
private:
unsigned fBufferSize;
struct timeval fPresentationTime;
unsigned char* fData;
unsigned fBytesInUse;
};
class SyncFrame {
public:
SyncFrame(unsigned frameNum);
public:
class SyncFrame *nextSyncFrame;
unsigned sfFrameNum;
};
// A 64-bit counter, used below:
class Count64 {
public:
Count64()
: hi(0), lo(0) {
}
void operator+=(unsigned arg);
u_int32_t hi, lo;
};
class SubsessionIOState {
public:
SubsessionIOState(QuickTimeFileSink& sink, MediaSubsession& subsession);
virtual ~SubsessionIOState();
Boolean setQTstate();
void setFinalQTstate();
void afterGettingFrame(unsigned packetDataSize,
struct timeval presentationTime);
void onSourceClosure();
Boolean syncOK(struct timeval presentationTime);
// returns true iff data is usable despite a sync check
static void setHintTrack(SubsessionIOState* hintedTrack,
SubsessionIOState* hintTrack);
Boolean isHintTrack() const { return fTrackHintedByUs != NULL; }
Boolean hasHintTrack() const { return fHintTrackForUs != NULL; }
UsageEnvironment& envir() const { return fOurSink.envir(); }
public:
static unsigned fCurrentTrackNumber;
unsigned fTrackID;
SubsessionIOState* fHintTrackForUs; SubsessionIOState* fTrackHintedByUs;
SubsessionBuffer *fBuffer, *fPrevBuffer;
QuickTimeFileSink& fOurSink;
MediaSubsession& fOurSubsession;
unsigned short fLastPacketRTPSeqNum;
Boolean fOurSourceIsActive;
Boolean fHaveBeenSynced; // used in synchronizing with other streams
struct timeval fSyncTime;
Boolean fQTEnableTrack;
unsigned fQTcomponentSubtype;
char const* fQTcomponentName;
typedef unsigned (QuickTimeFileSink::*atomCreationFunc)();
atomCreationFunc fQTMediaInformationAtomCreator;
atomCreationFunc fQTMediaDataAtomCreator;
char const* fQTAudioDataType;
unsigned short fQTSoundSampleVersion;
unsigned fQTTimeScale;
unsigned fQTTimeUnitsPerSample;
unsigned fQTBytesPerFrame;
unsigned fQTSamplesPerFrame;
// These next fields are derived from the ones above,
// plus the information from each chunk:
unsigned fQTTotNumSamples;
unsigned fQTDurationM; // in media time units
unsigned fQTDurationT; // in track time units
int64_t fTKHD_durationPosn;
// position of the duration in the output 'tkhd' atom
unsigned fQTInitialOffsetDuration;
// if there's a pause at the beginning
ChunkDescriptor *fHeadChunk, *fTailChunk;
unsigned fNumChunks;
SyncFrame *fHeadSyncFrame, *fTailSyncFrame;
// Counters to be used in the hint track's 'udta'/'hinf' atom;
struct hinf {
Count64 trpy;
Count64 nump;
Count64 tpyl;
// Is 'maxr' needed? Computing this would be a PITA. #####
Count64 dmed;
Count64 dimm;
// 'drep' is always 0
// 'tmin' and 'tmax' are always 0
unsigned pmax;
unsigned dmax;
} fHINF;
private:
void useFrame(SubsessionBuffer& buffer);
void useFrameForHinting(unsigned frameSize,
struct timeval presentationTime,
unsigned startSampleNumber);
// used by the above two routines:
unsigned useFrame1(unsigned sourceDataSize,
struct timeval presentationTime,
unsigned frameDuration, int64_t destFileOffset);
// returns the number of samples in this data
private:
// A structure used for temporarily storing frame state:
struct {
unsigned frameSize;
struct timeval presentationTime;
int64_t destFileOffset; // used for non-hint tracks only
// The remaining fields are used for hint tracks only:
unsigned startSampleNumber;
unsigned short seqNum;
unsigned rtpHeader;
unsigned char numSpecialHeaders; // used when our RTP source has special headers
unsigned specialHeaderBytesLength; // ditto
unsigned char specialHeaderBytes[SPECIAL_HEADER_BUFFER_SIZE]; // ditto
unsigned packetSizes[256];
} fPrevFrameState;
};
////////// QuickTimeFileSink implementation //////////
QuickTimeFileSink::QuickTimeFileSink(UsageEnvironment& env,
MediaSession& inputSession,
char const* outputFileName,
unsigned bufferSize,
unsigned short movieWidth,
unsigned short movieHeight,
unsigned movieFPS,
Boolean packetLossCompensate,
Boolean syncStreams,
Boolean generateHintTracks,
Boolean generateMP4Format)
: Medium(env), fInputSession(inputSession),
fBufferSize(bufferSize), fPacketLossCompensate(packetLossCompensate),
fSyncStreams(syncStreams), fGenerateMP4Format(generateMP4Format),
fAreCurrentlyBeingPlayed(False),
fLargestRTPtimestampFrequency(0),
fNumSubsessions(0), fNumSyncedSubsessions(0),
fHaveCompletedOutputFile(False),
fMovieWidth(movieWidth), fMovieHeight(movieHeight),
fMovieFPS(movieFPS), fMaxTrackDurationM(0) {
fOutFid = OpenOutputFile(env, outputFileName);
if (fOutFid == NULL) return;
fNewestSyncTime.tv_sec = fNewestSyncTime.tv_usec = 0;
fFirstDataTime.tv_sec = fFirstDataTime.tv_usec = (unsigned)(~0);
// Set up I/O state for each input subsession:
MediaSubsessionIterator iter(fInputSession);
MediaSubsession* subsession;
while ((subsession = iter.next()) != NULL) {
// Ignore subsessions without a data source:
FramedSource* subsessionSource = subsession->readSource();
if (subsessionSource == NULL) continue;
// If "subsession's" SDP description specified screen dimension
// or frame rate parameters, then use these. (Note that this must
// be done before the call to "setQTState()" below.)
if (subsession->videoWidth() != 0) {
fMovieWidth = subsession->videoWidth();
}
if (subsession->videoHeight() != 0) {
fMovieHeight = subsession->videoHeight();
}
if (subsession->videoFPS() != 0) {
fMovieFPS = subsession->videoFPS();
}
SubsessionIOState* ioState
= new SubsessionIOState(*this, *subsession);
if (ioState == NULL || !ioState->setQTstate()) {
// We're not able to output a QuickTime track for this subsession
delete ioState; ioState = NULL;
continue;
}
subsession->miscPtr = (void*)ioState;
if (generateHintTracks) {
// Also create a hint track for this track:
SubsessionIOState* hintTrack
= new SubsessionIOState(*this, *subsession);
SubsessionIOState::setHintTrack(ioState, hintTrack);
if (!hintTrack->setQTstate()) {
delete hintTrack;
SubsessionIOState::setHintTrack(ioState, NULL);
}
}
// Also set a 'BYE' handler for this subsession's RTCP instance:
if (subsession->rtcpInstance() != NULL) {
subsession->rtcpInstance()->setByeHandler(onRTCPBye, ioState);
}
unsigned rtpTimestampFrequency = subsession->rtpTimestampFrequency();
if (rtpTimestampFrequency > fLargestRTPtimestampFrequency) {
fLargestRTPtimestampFrequency = rtpTimestampFrequency;
}
++fNumSubsessions;
}
// Use the current time as the file's creation and modification
// time. Use Apple's time format: seconds since January 1, 1904
gettimeofday(&fStartTime, NULL);
fAppleCreationTime = fStartTime.tv_sec - 0x83dac000;
// Begin by writing a "mdat" atom at the start of the file.
// (Later, when we've finished copying data to the file, we'll come
// back and fill in its size.)
fMDATposition = TellFile64(fOutFid);
addAtomHeader64("mdat");
// add 64Bit offset
fMDATposition += 8;
}
QuickTimeFileSink::~QuickTimeFileSink() {
completeOutputFile();
// Then, stop streaming and delete each active "SubsessionIOState":
MediaSubsessionIterator iter(fInputSession);
MediaSubsession* subsession;
while ((subsession = iter.next()) != NULL) {
subsession->readSource()->stopGettingFrames();
SubsessionIOState* ioState
= (SubsessionIOState*)(subsession->miscPtr);
if (ioState == NULL) continue;
delete ioState->fHintTrackForUs; // if any
delete ioState;
}
// Finally, close our output file:
CloseOutputFile(fOutFid);
}
QuickTimeFileSink*
QuickTimeFileSink::createNew(UsageEnvironment& env,
MediaSession& inputSession,
char const* outputFileName,
unsigned bufferSize,
unsigned short movieWidth,
unsigned short movieHeight,
unsigned movieFPS,
Boolean packetLossCompensate,
Boolean syncStreams,
Boolean generateHintTracks,
Boolean generateMP4Format) {
QuickTimeFileSink* newSink =
new QuickTimeFileSink(env, inputSession, outputFileName, bufferSize, movieWidth, movieHeight, movieFPS,
packetLossCompensate, syncStreams, generateHintTracks, generateMP4Format);
if (newSink == NULL || newSink->fOutFid == NULL) {
Medium::close(newSink);
return NULL;
}
return newSink;
}
Boolean QuickTimeFileSink::startPlaying(afterPlayingFunc* afterFunc,
void* afterClientData) {
// Make sure we're not already being played:
if (fAreCurrentlyBeingPlayed) {
envir().setResultMsg("This sink has already been played");
return False;
}
fAreCurrentlyBeingPlayed = True;
fAfterFunc = afterFunc;
fAfterClientData = afterClientData;
return continuePlaying();
}
Boolean QuickTimeFileSink::continuePlaying() {
// Run through each of our input session's 'subsessions',
// asking for a frame from each one:
Boolean haveActiveSubsessions = False;
MediaSubsessionIterator iter(fInputSession);
MediaSubsession* subsession;
while ((subsession = iter.next()) != NULL) {
FramedSource* subsessionSource = subsession->readSource();
if (subsessionSource == NULL) continue;
if (subsessionSource->isCurrentlyAwaitingData()) continue;
SubsessionIOState* ioState
= (SubsessionIOState*)(subsession->miscPtr);
if (ioState == NULL) continue;
haveActiveSubsessions = True;
unsigned char* toPtr = ioState->fBuffer->dataEnd();
unsigned toSize = ioState->fBuffer->bytesAvailable();
subsessionSource->getNextFrame(toPtr, toSize,
afterGettingFrame, ioState,
onSourceClosure, ioState);
}
if (!haveActiveSubsessions) {
envir().setResultMsg("No subsessions are currently active");
return False;
}
return True;
}
void QuickTimeFileSink
::afterGettingFrame(void* clientData, unsigned packetDataSize,
unsigned numTruncatedBytes,
struct timeval presentationTime,
unsigned /*durationInMicroseconds*/) {
SubsessionIOState* ioState = (SubsessionIOState*)clientData;
if (!ioState->syncOK(presentationTime)) {
// Ignore this data:
ioState->fOurSink.continuePlaying();
return;
}
if (numTruncatedBytes > 0) {
ioState->envir() << "QuickTimeFileSink::afterGettingFrame(): The input frame data was too large for our buffer. "
<< numTruncatedBytes
<< " bytes of trailing data was dropped! Correct this by increasing the \"bufferSize\" parameter in the \"createNew()\" call.\n";
}
ioState->afterGettingFrame(packetDataSize, presentationTime);
}
void QuickTimeFileSink::onSourceClosure(void* clientData) {
SubsessionIOState* ioState = (SubsessionIOState*)clientData;
ioState->onSourceClosure();
}
void QuickTimeFileSink::onSourceClosure1() {
// Check whether *all* of the subsession sources have closed.
// If not, do nothing for now:
MediaSubsessionIterator iter(fInputSession);
MediaSubsession* subsession;
while ((subsession = iter.next()) != NULL) {
SubsessionIOState* ioState
= (SubsessionIOState*)(subsession->miscPtr);
if (ioState == NULL) continue;
if (ioState->fOurSourceIsActive) return; // this source hasn't closed
}
completeOutputFile();
// Call our specified 'after' function:
if (fAfterFunc != NULL) {
(*fAfterFunc)(fAfterClientData);
}
}
void QuickTimeFileSink::onRTCPBye(void* clientData) {
SubsessionIOState* ioState = (SubsessionIOState*)clientData;
struct timeval timeNow;
gettimeofday(&timeNow, NULL);
unsigned secsDiff
= timeNow.tv_sec - ioState->fOurSink.fStartTime.tv_sec;
MediaSubsession& subsession = ioState->fOurSubsession;
ioState->envir() << "Received RTCP \"BYE\" on \""
<< subsession.mediumName()
<< "/" << subsession.codecName()
<< "\" subsession (after "
<< secsDiff << " seconds)\n";
// Handle the reception of a RTCP "BYE" as if the source had closed:
ioState->onSourceClosure();
}
static Boolean timevalGE(struct timeval const& tv1,
struct timeval const& tv2) {
return (unsigned)tv1.tv_sec > (unsigned)tv2.tv_sec
|| (tv1.tv_sec == tv2.tv_sec
&& (unsigned)tv1.tv_usec >= (unsigned)tv2.tv_usec);
}
void QuickTimeFileSink::completeOutputFile() {
if (fHaveCompletedOutputFile || fOutFid == NULL) return;
// Begin by filling in the initial "mdat" atom with the current
// file size:
int64_t curFileSize = TellFile64(fOutFid);
setWord64(fMDATposition, (u_int64_t)curFileSize);
// Then, note the time of the first received data:
MediaSubsessionIterator iter(fInputSession);
MediaSubsession* subsession;
while ((subsession = iter.next()) != NULL) {
SubsessionIOState* ioState
= (SubsessionIOState*)(subsession->miscPtr);
if (ioState == NULL) continue;
ChunkDescriptor* const headChunk = ioState->fHeadChunk;
if (headChunk != NULL
&& timevalGE(fFirstDataTime, headChunk->fPresentationTime)) {
fFirstDataTime = headChunk->fPresentationTime;
}
}
// Then, update the QuickTime-specific state for each active track:
iter.reset();
while ((subsession = iter.next()) != NULL) {
SubsessionIOState* ioState
= (SubsessionIOState*)(subsession->miscPtr);
if (ioState == NULL) continue;
ioState->setFinalQTstate();
// Do the same for a hint track (if any):
if (ioState->hasHintTrack()) {
ioState->fHintTrackForUs->setFinalQTstate();
}
}
if (fGenerateMP4Format) {
// Begin with a "ftyp" atom:
addAtom_ftyp();
}
// Then, add a "moov" atom for the file metadata:
addAtom_moov();
// We're done:
fHaveCompletedOutputFile = True;
}
////////// SubsessionIOState, ChunkDescriptor implementation ///////////
unsigned SubsessionIOState::fCurrentTrackNumber = 0;
SubsessionIOState::SubsessionIOState(QuickTimeFileSink& sink,
MediaSubsession& subsession)
: fHintTrackForUs(NULL), fTrackHintedByUs(NULL),
fOurSink(sink), fOurSubsession(subsession),
fLastPacketRTPSeqNum(0), fHaveBeenSynced(False), fQTTotNumSamples(0),
fHeadChunk(NULL), fTailChunk(NULL), fNumChunks(0),
fHeadSyncFrame(NULL), fTailSyncFrame(NULL) {
fTrackID = ++fCurrentTrackNumber;
fBuffer = new SubsessionBuffer(fOurSink.fBufferSize);
fPrevBuffer = sink.fPacketLossCompensate
? new SubsessionBuffer(fOurSink.fBufferSize) : NULL;
FramedSource* subsessionSource = subsession.readSource();
fOurSourceIsActive = subsessionSource != NULL;
fPrevFrameState.presentationTime.tv_sec = 0;
fPrevFrameState.presentationTime.tv_usec = 0;
fPrevFrameState.seqNum = 0;
}
SubsessionIOState::~SubsessionIOState() {
delete fBuffer; delete fPrevBuffer;
// Delete the list of chunk descriptors:
ChunkDescriptor* chunk = fHeadChunk;
while (chunk != NULL) {
ChunkDescriptor* next = chunk->fNextChunk;
delete chunk;
chunk = next;
}
// Delete the list of sync frames:
SyncFrame* syncFrame = fHeadSyncFrame;
while (syncFrame != NULL) {
SyncFrame* next = syncFrame->nextSyncFrame;
delete syncFrame;
syncFrame = next;
}
}
Boolean SubsessionIOState::setQTstate() {
char const* noCodecWarning1 = "Warning: We don't implement a QuickTime ";
char const* noCodecWarning2 = " Media Data Type for the \"";
char const* noCodecWarning3 = "\" track, so we'll insert a dummy \"????\" Media Data Atom instead. A separate, codec-specific editing pass will be needed before this track can be played.\n";
do {
fQTEnableTrack = True; // enable this track in the movie by default
fQTTimeScale = fOurSubsession.rtpTimestampFrequency(); // by default
fQTTimeUnitsPerSample = 1; // by default
fQTBytesPerFrame = 0;
// by default - indicates that the whole packet data is a frame
fQTSamplesPerFrame = 1; // by default
// Make sure our subsession's medium is one that we know how to
// represent in a QuickTime file:
if (isHintTrack()) {
// Hint tracks are treated specially
fQTEnableTrack = False; // hint tracks are marked as inactive
fQTcomponentSubtype = fourChar('h','i','n','t');
fQTcomponentName = "hint media handler";
fQTMediaInformationAtomCreator = &QuickTimeFileSink::addAtom_gmhd;
fQTMediaDataAtomCreator = &QuickTimeFileSink::addAtom_rtp;
} else if (strcmp(fOurSubsession.mediumName(), "audio") == 0) {
fQTcomponentSubtype = fourChar('s','o','u','n');
fQTcomponentName = "Apple Sound Media Handler";
fQTMediaInformationAtomCreator = &QuickTimeFileSink::addAtom_smhd;
fQTMediaDataAtomCreator
= &QuickTimeFileSink::addAtom_soundMediaGeneral; // by default
fQTSoundSampleVersion = 0; // by default
// Make sure that our subsession's codec is one that we can handle:
if (strcmp(fOurSubsession.codecName(), "X-QT") == 0 ||
strcmp(fOurSubsession.codecName(), "X-QUICKTIME") == 0) {
fQTMediaDataAtomCreator = &QuickTimeFileSink::addAtom_genericMedia;
} else if (strcmp(fOurSubsession.codecName(), "PCMU") == 0) {
fQTAudioDataType = "ulaw";
fQTBytesPerFrame = 1;
} else if (strcmp(fOurSubsession.codecName(), "GSM") == 0) {
fQTAudioDataType = "agsm";
fQTBytesPerFrame = 33;
fQTSamplesPerFrame = 160;
} else if (strcmp(fOurSubsession.codecName(), "PCMA") == 0) {
fQTAudioDataType = "alaw";
fQTBytesPerFrame = 1;
} else if (strcmp(fOurSubsession.codecName(), "QCELP") == 0) {
fQTMediaDataAtomCreator = &QuickTimeFileSink::addAtom_Qclp;
fQTSamplesPerFrame = 160;
} else if (strcmp(fOurSubsession.codecName(), "MPEG4-GENERIC") == 0 ||
strcmp(fOurSubsession.codecName(), "MP4A-LATM") == 0) {
fQTMediaDataAtomCreator = &QuickTimeFileSink::addAtom_mp4a;
fQTTimeUnitsPerSample = 1024; // QT considers each frame to be a 'sample'
// The time scale (frequency) comes from the 'config' information.
// It might be different from the RTP timestamp frequency (e.g., aacPlus).
unsigned frequencyFromConfig
= samplingFrequencyFromAudioSpecificConfig(fOurSubsession.fmtp_config());
if (frequencyFromConfig != 0) fQTTimeScale = frequencyFromConfig;
} else {
envir() << noCodecWarning1 << "Audio" << noCodecWarning2
<< fOurSubsession.codecName() << noCodecWarning3;
fQTMediaDataAtomCreator = &QuickTimeFileSink::addAtom_dummy;
fQTEnableTrack = False; // disable this track in the movie
}
} else if (strcmp(fOurSubsession.mediumName(), "video") == 0) {
fQTcomponentSubtype = fourChar('v','i','d','e');
fQTcomponentName = "Apple Video Media Handler";
fQTMediaInformationAtomCreator = &QuickTimeFileSink::addAtom_vmhd;
// Make sure that our subsession's codec is one that we can handle:
if (strcmp(fOurSubsession.codecName(), "X-QT") == 0 ||
strcmp(fOurSubsession.codecName(), "X-QUICKTIME") == 0) {
fQTMediaDataAtomCreator = &QuickTimeFileSink::addAtom_genericMedia;
} else if (strcmp(fOurSubsession.codecName(), "H263-1998") == 0 ||
strcmp(fOurSubsession.codecName(), "H263-2000") == 0) {
fQTMediaDataAtomCreator = &QuickTimeFileSink::addAtom_h263;
fQTTimeScale = 600;
fQTTimeUnitsPerSample = fQTTimeScale/fOurSink.fMovieFPS;
} else if (strcmp(fOurSubsession.codecName(), "H264") == 0) {
fQTMediaDataAtomCreator = &QuickTimeFileSink::addAtom_avc1;
fQTTimeScale = 600;
fQTTimeUnitsPerSample = fQTTimeScale/fOurSink.fMovieFPS;
} else if (strcmp(fOurSubsession.codecName(), "MP4V-ES") == 0) {
fQTMediaDataAtomCreator = &QuickTimeFileSink::addAtom_mp4v;
fQTTimeScale = 600;
fQTTimeUnitsPerSample = fQTTimeScale/fOurSink.fMovieFPS;
} else {
envir() << noCodecWarning1 << "Video" << noCodecWarning2
<< fOurSubsession.codecName() << noCodecWarning3;
fQTMediaDataAtomCreator = &QuickTimeFileSink::addAtom_dummy;
fQTEnableTrack = False; // disable this track in the movie
}
} else {
envir() << "Warning: We don't implement a QuickTime Media Handler for media type \""
<< fOurSubsession.mediumName() << "\"";
break;
}
#ifdef QT_SUPPORT_PARTIALLY_ONLY
envir() << "Warning: We don't have sufficient codec-specific information (e.g., sample sizes) to fully generate the \""
<< fOurSubsession.mediumName() << "/" << fOurSubsession.codecName()
<< "\" track, so we'll disable this track in the movie. A separate, codec-specific editing pass will be needed before this track can be played\n";
fQTEnableTrack = False; // disable this track in the movie
#endif
return True;
} while (0);
envir() << ", so a track for the \"" << fOurSubsession.mediumName()
<< "/" << fOurSubsession.codecName()
<< "\" subsession will not be included in the output QuickTime file\n";
return False;
}
void SubsessionIOState::setFinalQTstate() {
// Compute derived parameters, by running through the list of chunks:
fQTDurationT = 0;
ChunkDescriptor* chunk = fHeadChunk;
while (chunk != NULL) {
unsigned const numFrames = chunk->fNumFrames;
unsigned const dur = numFrames*chunk->fFrameDuration;
fQTDurationT += dur;
chunk = chunk->fNextChunk;
}
// Convert this duration from track to movie time scale:
double scaleFactor = fOurSink.movieTimeScale()/(double)fQTTimeScale;
fQTDurationM = (unsigned)(fQTDurationT*scaleFactor);
if (fQTDurationM > fOurSink.fMaxTrackDurationM) {
fOurSink.fMaxTrackDurationM = fQTDurationM;
}
}
void SubsessionIOState::afterGettingFrame(unsigned packetDataSize,
struct timeval presentationTime) {
// Begin by checking whether there was a gap in the RTP stream.
// If so, try to compensate for this (if desired):
unsigned short rtpSeqNum
= fOurSubsession.rtpSource()->curPacketRTPSeqNum();
if (fOurSink.fPacketLossCompensate && fPrevBuffer->bytesInUse() > 0) {
short seqNumGap = rtpSeqNum - fLastPacketRTPSeqNum;
for (short i = 1; i < seqNumGap; ++i) {
// Insert a copy of the previous frame, to compensate for the loss:
useFrame(*fPrevBuffer);
}
}
fLastPacketRTPSeqNum = rtpSeqNum;
// Now, continue working with the frame that we just got
if (fBuffer->bytesInUse() == 0) {
fBuffer->setPresentationTime(presentationTime);
}
fBuffer->addBytes(packetDataSize);
// If our RTP source is a "QuickTimeGenericRTPSource", then
// use its 'qtState' to set some parameters that we need:
if (fQTMediaDataAtomCreator == &QuickTimeFileSink::addAtom_genericMedia){
QuickTimeGenericRTPSource* rtpSource
= (QuickTimeGenericRTPSource*)fOurSubsession.rtpSource();
QuickTimeGenericRTPSource::QTState& qtState = rtpSource->qtState;
fQTTimeScale = qtState.timescale;
if (qtState.width != 0) {
fOurSink.fMovieWidth = qtState.width;
}
if (qtState.height != 0) {
fOurSink.fMovieHeight = qtState.height;
}
// Also, if the media type in the "sdAtom" is one that we recognize
// to have a special parameters, then fix this here:
if (qtState.sdAtomSize >= 8) {
char const* atom = qtState.sdAtom;
unsigned mediaType = fourChar(atom[4],atom[5],atom[6],atom[7]);
switch (mediaType) {
case fourChar('a','g','s','m'): {
fQTBytesPerFrame = 33;
fQTSamplesPerFrame = 160;
break;
}
case fourChar('Q','c','l','p'): {
fQTBytesPerFrame = 35;
fQTSamplesPerFrame = 160;
break;
}
case fourChar('H','c','l','p'): {
fQTBytesPerFrame = 17;
fQTSamplesPerFrame = 160;
break;
}
case fourChar('h','2','6','3'): {
fQTTimeUnitsPerSample = fQTTimeScale/fOurSink.fMovieFPS;
break;
}
}
}
} else if (fQTMediaDataAtomCreator == &QuickTimeFileSink::addAtom_Qclp) {
// For QCELP data, make a note of the frame size (even though it's the
// same as the packet data size), because it varies depending on the
// 'rate' of the stream, and this size gets used later when setting up
// the 'Qclp' QuickTime atom:
fQTBytesPerFrame = packetDataSize;
}
useFrame(*fBuffer);
if (fOurSink.fPacketLossCompensate) {
// Save this frame, in case we need it for recovery:
SubsessionBuffer* tmp = fPrevBuffer; // assert: != NULL
fPrevBuffer = fBuffer;
fBuffer = tmp;
}
fBuffer->reset(); // for the next input
// Now, try getting more frames:
fOurSink.continuePlaying();
}
void SubsessionIOState::useFrame(SubsessionBuffer& buffer) {
unsigned char* const frameSource = buffer.dataStart();
unsigned const frameSize = buffer.bytesInUse();
struct timeval const& presentationTime = buffer.presentationTime();
int64_t const destFileOffset = TellFile64(fOurSink.fOutFid);
unsigned sampleNumberOfFrameStart = fQTTotNumSamples + 1;
Boolean avcHack = fQTMediaDataAtomCreator == &QuickTimeFileSink::addAtom_avc1;
// If we're not syncing streams, or this subsession is not video, then
// just give this frame a fixed duration:
if (!fOurSink.fSyncStreams
|| fQTcomponentSubtype != fourChar('v','i','d','e')) {
unsigned const frameDuration = fQTTimeUnitsPerSample*fQTSamplesPerFrame;
unsigned frameSizeToUse = frameSize;
if (avcHack) frameSizeToUse += 4; // H.264/AVC gets the frame size prefix
fQTTotNumSamples += useFrame1(frameSizeToUse, presentationTime, frameDuration, destFileOffset);
} else {
// For synced video streams, we use the difference between successive
// frames' presentation times as the 'frame duration'. So, record
// information about the *previous* frame:
struct timeval const& ppt = fPrevFrameState.presentationTime; //abbrev
if (ppt.tv_sec != 0 || ppt.tv_usec != 0) {
// There has been a previous frame.
double duration = (presentationTime.tv_sec - ppt.tv_sec)
+ (presentationTime.tv_usec - ppt.tv_usec)/1000000.0;
if (duration < 0.0) duration = 0.0;
unsigned frameDuration
= (unsigned)((2*duration*fQTTimeScale+1)/2); // round
unsigned frameSizeToUse = fPrevFrameState.frameSize;
if (avcHack) frameSizeToUse += 4; // H.264/AVC gets the frame size prefix
unsigned numSamples
= useFrame1(frameSizeToUse, ppt, frameDuration, fPrevFrameState.destFileOffset);
fQTTotNumSamples += numSamples;
sampleNumberOfFrameStart = fQTTotNumSamples + 1;
}
if (avcHack && (*frameSource == H264_IDR_FRAME)) {
SyncFrame* newSyncFrame = new SyncFrame(fQTTotNumSamples + 1);
if (fTailSyncFrame == NULL) {
fHeadSyncFrame = newSyncFrame;
} else {
fTailSyncFrame->nextSyncFrame = newSyncFrame;
}
fTailSyncFrame = newSyncFrame;
}
// Remember the current frame for next time:
fPrevFrameState.frameSize = frameSize;
fPrevFrameState.presentationTime = presentationTime;
fPrevFrameState.destFileOffset = destFileOffset;
}
if (avcHack) fOurSink.addWord(frameSize);
// Write the data into the file:
fwrite(frameSource, 1, frameSize, fOurSink.fOutFid);
// If we have a hint track, then write to it also:
if (hasHintTrack()) {
// Because presentation times are used for RTP packet timestamps,
// we don't starting writing to the hint track until we've been synced:
if (!fHaveBeenSynced) {
fHaveBeenSynced
= fOurSubsession.rtpSource()->hasBeenSynchronizedUsingRTCP();
}
if (fHaveBeenSynced) {
fHintTrackForUs->useFrameForHinting(frameSize, presentationTime,
sampleNumberOfFrameStart);
}
}
}
void SubsessionIOState::useFrameForHinting(unsigned frameSize,
struct timeval presentationTime,
unsigned startSampleNumber) {
// At this point, we have a single, combined frame - not individual packets.
// For the hint track, we need to split the frame back up into separate packets.
// However, for some RTP sources, then we also need to reuse the special
// header bytes that were at the start of each of the RTP packets.
Boolean hack263 = strcmp(fOurSubsession.codecName(), "H263-1998") == 0;
Boolean hackm4a_generic = strcmp(fOurSubsession.mediumName(), "audio") == 0
&& strcmp(fOurSubsession.codecName(), "MPEG4-GENERIC") == 0;
Boolean hackm4a_latm = strcmp(fOurSubsession.mediumName(), "audio") == 0
&& strcmp(fOurSubsession.codecName(), "MP4A-LATM") == 0;
Boolean hackm4a = hackm4a_generic || hackm4a_latm;
Boolean haveSpecialHeaders = (hack263 || hackm4a_generic);
// If there has been a previous frame, then output a 'hint sample' for it.
// (We use the current frame's presentation time to compute the previous
// hint sample's duration.)
RTPSource* const rs = fOurSubsession.rtpSource(); // abbrev
struct timeval const& ppt = fPrevFrameState.presentationTime; //abbrev
if (ppt.tv_sec != 0 || ppt.tv_usec != 0) {
double duration = (presentationTime.tv_sec - ppt.tv_sec)
+ (presentationTime.tv_usec - ppt.tv_usec)/1000000.0;
if (duration < 0.0) duration = 0.0;
unsigned msDuration = (unsigned)(duration*1000); // milliseconds
if (msDuration > fHINF.dmax) fHINF.dmax = msDuration;
unsigned hintSampleDuration
= (unsigned)((2*duration*fQTTimeScale+1)/2); // round
if (hackm4a) {
// Because multiple AAC frames can appear in a RTP packet, the presentation
// times of the second and subsequent frames will not be accurate.
// So, use the known "hintSampleDuration" instead:
hintSampleDuration = fTrackHintedByUs->fQTTimeUnitsPerSample;
// Also, if the 'time scale' was different from the RTP timestamp frequency,
// (as can happen with aacPlus), then we need to scale "hintSampleDuration"
// accordingly:
if (fTrackHintedByUs->fQTTimeScale != fOurSubsession.rtpTimestampFrequency()) {
unsigned const scalingFactor
= fOurSubsession.rtpTimestampFrequency()/fTrackHintedByUs->fQTTimeScale ;
hintSampleDuration *= scalingFactor;
}
}
int64_t const hintSampleDestFileOffset = TellFile64(fOurSink.fOutFid);
unsigned const maxPacketSize = 1450;
unsigned short numPTEntries
= (fPrevFrameState.frameSize + (maxPacketSize-1))/maxPacketSize; // normal case
unsigned char* immediateDataPtr = NULL;
unsigned immediateDataBytesRemaining = 0;
if (haveSpecialHeaders) { // special case
numPTEntries = fPrevFrameState.numSpecialHeaders;
immediateDataPtr = fPrevFrameState.specialHeaderBytes;
immediateDataBytesRemaining
= fPrevFrameState.specialHeaderBytesLength;
}
unsigned hintSampleSize
= fOurSink.addHalfWord(numPTEntries);// Entry count
hintSampleSize += fOurSink.addHalfWord(0x0000); // Reserved
unsigned offsetWithinSample = 0;
for (unsigned i = 0; i < numPTEntries; ++i) {
// Output a Packet Table entry (representing a single RTP packet):
unsigned short numDTEntries = 1;
unsigned short seqNum = fPrevFrameState.seqNum++;
// Note: This assumes that the input stream had no packets lost #####
unsigned rtpHeader = fPrevFrameState.rtpHeader;
if (i+1 < numPTEntries) {
// This is not the last RTP packet, so clear the marker bit:
rtpHeader &=~ (1<<23);
}
unsigned dataFrameSize = (i+1 < numPTEntries)
? maxPacketSize : fPrevFrameState.frameSize - i*maxPacketSize; // normal case
unsigned sampleNumber = fPrevFrameState.startSampleNumber;
unsigned char immediateDataLen = 0;
if (haveSpecialHeaders) { // special case
++numDTEntries; // to include a Data Table entry for the special hdr
if (immediateDataBytesRemaining > 0) {
if (hack263) {
immediateDataLen = *immediateDataPtr++;
--immediateDataBytesRemaining;
if (immediateDataLen > immediateDataBytesRemaining) {
// shouldn't happen (length byte was bad)
immediateDataLen = immediateDataBytesRemaining;
}
} else {
immediateDataLen = fPrevFrameState.specialHeaderBytesLength;
}
}
dataFrameSize = fPrevFrameState.packetSizes[i] - immediateDataLen;
if (hack263) {
Boolean PbitSet
= immediateDataLen >= 1 && (immediateDataPtr[0]&0x4) != 0;
if (PbitSet) {
offsetWithinSample += 2; // to omit the two leading 0 bytes
}
}
}
// Output the Packet Table:
hintSampleSize += fOurSink.addWord(0); // Relative transmission time
hintSampleSize += fOurSink.addWord(rtpHeader|seqNum);
// RTP header info + RTP sequence number
hintSampleSize += fOurSink.addHalfWord(0x0000); // Flags
hintSampleSize += fOurSink.addHalfWord(numDTEntries); // Entry count
unsigned totalPacketSize = 0;
// Output the Data Table:
if (haveSpecialHeaders) {
// use the "Immediate Data" format (1):
hintSampleSize += fOurSink.addByte(1); // Source
unsigned char len = immediateDataLen > 14 ? 14 : immediateDataLen;
hintSampleSize += fOurSink.addByte(len); // Length
totalPacketSize += len; fHINF.dimm += len;
unsigned char j;
for (j = 0; j < len; ++j) {
hintSampleSize += fOurSink.addByte(immediateDataPtr[j]); // Data
}
for (j = len; j < 14; ++j) {
hintSampleSize += fOurSink.addByte(0); // Data (padding)
}
immediateDataPtr += immediateDataLen;
immediateDataBytesRemaining -= immediateDataLen;
}
// use the "Sample Data" format (2):
hintSampleSize += fOurSink.addByte(2); // Source
hintSampleSize += fOurSink.addByte(0); // Track ref index
hintSampleSize += fOurSink.addHalfWord(dataFrameSize); // Length
totalPacketSize += dataFrameSize; fHINF.dmed += dataFrameSize;
hintSampleSize += fOurSink.addWord(sampleNumber); // Sample number
hintSampleSize += fOurSink.addWord(offsetWithinSample); // Offset
// Get "bytes|samples per compression block" from the hinted track:
unsigned short const bytesPerCompressionBlock
= fTrackHintedByUs->fQTBytesPerFrame;
unsigned short const samplesPerCompressionBlock
= fTrackHintedByUs->fQTSamplesPerFrame;
hintSampleSize += fOurSink.addHalfWord(bytesPerCompressionBlock);
hintSampleSize += fOurSink.addHalfWord(samplesPerCompressionBlock);
offsetWithinSample += dataFrameSize;// for the next iteration (if any)
// Tally statistics for this packet:
fHINF.nump += 1;
fHINF.tpyl += totalPacketSize;
totalPacketSize += 12; // add in the size of the RTP header
fHINF.trpy += totalPacketSize;
if (totalPacketSize > fHINF.pmax) fHINF.pmax = totalPacketSize;
}
// Make note of this completed hint sample frame:
fQTTotNumSamples += useFrame1(hintSampleSize, ppt, hintSampleDuration,
hintSampleDestFileOffset);
}
// Remember this frame for next time:
fPrevFrameState.frameSize = frameSize;
fPrevFrameState.presentationTime = presentationTime;
fPrevFrameState.startSampleNumber = startSampleNumber;
fPrevFrameState.rtpHeader
= rs->curPacketMarkerBit()<<23
| (rs->rtpPayloadFormat()&0x7F)<<16;
if (hack263) {
H263plusVideoRTPSource* rs_263 = (H263plusVideoRTPSource*)rs;
fPrevFrameState.numSpecialHeaders = rs_263->fNumSpecialHeaders;
fPrevFrameState.specialHeaderBytesLength = rs_263->fSpecialHeaderBytesLength;
unsigned i;
for (i = 0; i < rs_263->fSpecialHeaderBytesLength; ++i) {
fPrevFrameState.specialHeaderBytes[i] = rs_263->fSpecialHeaderBytes[i];
}
for (i = 0; i < rs_263->fNumSpecialHeaders; ++i) {
fPrevFrameState.packetSizes[i] = rs_263->fPacketSizes[i];
}
} else if (hackm4a_generic) {
// Synthesize a special header, so that this frame can be in its own RTP packet.
unsigned const sizeLength = fOurSubsession.fmtp_sizelength();
unsigned const indexLength = fOurSubsession.fmtp_indexlength();
if (sizeLength + indexLength != 16) {
envir() << "Warning: unexpected 'sizeLength' " << sizeLength
<< " and 'indexLength' " << indexLength
<< "seen when creating hint track\n";
}
fPrevFrameState.numSpecialHeaders = 1;
fPrevFrameState.specialHeaderBytesLength = 4;
fPrevFrameState.specialHeaderBytes[0] = 0; // AU_headers_length (high byte)
fPrevFrameState.specialHeaderBytes[1] = 16; // AU_headers_length (low byte)
fPrevFrameState.specialHeaderBytes[2] = ((frameSize<>8;
fPrevFrameState.specialHeaderBytes[3] = (frameSize<extendChunk(destFileOffset, sourceDataSize,
frameSize, frameDuration,
presentationTime);
}
if (newTailChunk != fTailChunk) {
// This data created a new chunk, rather than extending the old one
++fNumChunks;
fTailChunk = newTailChunk;
}
return numSamples;
}
void SubsessionIOState::onSourceClosure() {
fOurSourceIsActive = False;
fOurSink.onSourceClosure1();
}
Boolean SubsessionIOState::syncOK(struct timeval presentationTime) {
QuickTimeFileSink& s = fOurSink; // abbreviation
if (!s.fSyncStreams) return True; // we don't care
if (s.fNumSyncedSubsessions < s.fNumSubsessions) {
// Not all subsessions have yet been synced. Check whether ours was
// one of the unsynced ones, and, if so, whether it is now synced:
if (!fHaveBeenSynced) {
// We weren't synchronized before
if (fOurSubsession.rtpSource()->hasBeenSynchronizedUsingRTCP()) {
// H264 ?
if (fQTMediaDataAtomCreator == &QuickTimeFileSink::addAtom_avc1) {
// special case: audio + H264 video: wait until audio is in sync
if ((s.fNumSubsessions == 2) && (s.fNumSyncedSubsessions < (s.fNumSubsessions - 1))) return False;
// if audio is in sync, wait for the next IDR frame to start
unsigned char* const frameSource = fBuffer->dataStart();
if (*frameSource != H264_IDR_FRAME) return False;
}
// But now we are
fHaveBeenSynced = True;
fSyncTime = presentationTime;
++s.fNumSyncedSubsessions;
if (timevalGE(fSyncTime, s.fNewestSyncTime)) {
s.fNewestSyncTime = fSyncTime;
}
}
}
}
// Check again whether all subsessions have been synced:
if (s.fNumSyncedSubsessions < s.fNumSubsessions) return False;
// Allow this data if it is more recent than the newest sync time:
return timevalGE(presentationTime, s.fNewestSyncTime);
}
void SubsessionIOState::setHintTrack(SubsessionIOState* hintedTrack,
SubsessionIOState* hintTrack) {
if (hintedTrack != NULL) hintedTrack->fHintTrackForUs = hintTrack;
if (hintTrack != NULL) hintTrack->fTrackHintedByUs = hintedTrack;
}
SyncFrame::SyncFrame(unsigned frameNum)
: nextSyncFrame(NULL), sfFrameNum(frameNum) {
}
void Count64::operator+=(unsigned arg) {
unsigned newLo = lo + arg;
if (newLo < lo) { // lo has overflowed
++hi;
}
lo = newLo;
}
ChunkDescriptor
::ChunkDescriptor(int64_t offsetInFile, unsigned size,
unsigned frameSize, unsigned frameDuration,
struct timeval presentationTime)
: fNextChunk(NULL), fOffsetInFile(offsetInFile),
fNumFrames(size/frameSize),
fFrameSize(frameSize), fFrameDuration(frameDuration),
fPresentationTime(presentationTime) {
}
ChunkDescriptor* ChunkDescriptor
::extendChunk(int64_t newOffsetInFile, unsigned newSize,
unsigned newFrameSize, unsigned newFrameDuration,
struct timeval newPresentationTime) {
// First, check whether the new space is just at the end of this
// existing chunk:
if (newOffsetInFile == fOffsetInFile + fNumFrames*fFrameSize) {
// We can extend this existing chunk, provided that the frame size
// and frame duration have not changed:
if (newFrameSize == fFrameSize && newFrameDuration == fFrameDuration) {
fNumFrames += newSize/fFrameSize;
return this;
}
}
// We'll allocate a new ChunkDescriptor, and link it to the end of us:
ChunkDescriptor* newDescriptor
= new ChunkDescriptor(newOffsetInFile, newSize,
newFrameSize, newFrameDuration,
newPresentationTime);
fNextChunk = newDescriptor;
return newDescriptor;
}
////////// QuickTime-specific implementation //////////
unsigned QuickTimeFileSink::addWord64(u_int64_t word) {
addByte((unsigned char)(word>>56)); addByte((unsigned char)(word>>48));
addByte((unsigned char)(word>>40)); addByte((unsigned char)(word>>32));
addByte((unsigned char)(word>>24)); addByte((unsigned char)(word>>16));
addByte((unsigned char)(word>>8)); addByte((unsigned char)(word));
return 8;
}
unsigned QuickTimeFileSink::addWord(unsigned word) {
addByte(word>>24); addByte(word>>16);
addByte(word>>8); addByte(word);
return 4;
}
unsigned QuickTimeFileSink::addHalfWord(unsigned short halfWord) {
addByte((unsigned char)(halfWord>>8)); addByte((unsigned char)halfWord);
return 2;
}
unsigned QuickTimeFileSink::addZeroWords(unsigned numWords) {
for (unsigned i = 0; i < numWords; ++i) {
addWord(0);
}
return numWords*4;
}
unsigned QuickTimeFileSink::add4ByteString(char const* str) {
addByte(str[0]); addByte(str[1]); addByte(str[2]); addByte(str[3]);
return 4;
}
unsigned QuickTimeFileSink::addArbitraryString(char const* str,
Boolean oneByteLength) {
unsigned size = 0;
if (oneByteLength) {
// Begin with a byte containing the string length:
unsigned strLength = strlen(str);
if (strLength >= 256) {
envir() << "QuickTimeFileSink::addArbitraryString(\""
<< str << "\") saw string longer than we know how to handle ("
<< strLength << "\n";
}
size += addByte((unsigned char)strLength);
}
while (*str != '\0') {
size += addByte(*str++);
}
return size;
}
unsigned QuickTimeFileSink::addAtomHeader(char const* atomName) {
// Output a placeholder for the 4-byte size:
addWord(0);
// Output the 4-byte atom name:
add4ByteString(atomName);
return 8;
}
unsigned QuickTimeFileSink::addAtomHeader64(char const* atomName) {
// Output 64Bit size marker
addWord(1);
// Output the 4-byte atom name:
add4ByteString(atomName);
addWord64(0);
return 16;
}
void QuickTimeFileSink::setWord(int64_t filePosn, unsigned size) {
do {
if (SeekFile64(fOutFid, filePosn, SEEK_SET) < 0) break;
addWord(size);
if (SeekFile64(fOutFid, 0, SEEK_END) < 0) break; // go back to where we were
return;
} while (0);
// One of the SeekFile64()s failed, probable because we're not a seekable file
envir() << "QuickTimeFileSink::setWord(): SeekFile64 failed (err "
<< envir().getErrno() << ")\n";
}
void QuickTimeFileSink::setWord64(int64_t filePosn, u_int64_t size) {
do {
if (SeekFile64(fOutFid, filePosn, SEEK_SET) < 0) break;
addWord64(size);
if (SeekFile64(fOutFid, 0, SEEK_END) < 0) break; // go back to where we were
return;
} while (0);
// One of the SeekFile64()s failed, probable because we're not a seekable file
envir() << "QuickTimeFileSink::setWord64(): SeekFile64 failed (err "
<< envir().getErrno() << ")\n";
}
// Methods for writing particular atoms. Note the following macros:
#define addAtom(name) \
unsigned QuickTimeFileSink::addAtom_##name() { \
int64_t initFilePosn = TellFile64(fOutFid); \
unsigned size = addAtomHeader("" #name "")
#define addAtomEnd \
setWord(initFilePosn, size); \
return size; \
}
addAtom(ftyp);
size += add4ByteString("mp42");
size += addWord(0x00000000);
size += add4ByteString("mp42");
size += add4ByteString("isom");
addAtomEnd;
addAtom(moov);
size += addAtom_mvhd();
if (fGenerateMP4Format) {
size += addAtom_iods();
}
// Add a 'trak' atom for each subsession:
// (For some unknown reason, QuickTime Player (5.0 at least)
// doesn't display the movie correctly unless the audio track
// (if present) appears before the video track. So ensure this here.)
MediaSubsessionIterator iter(fInputSession);
MediaSubsession* subsession;
while ((subsession = iter.next()) != NULL) {
fCurrentIOState = (SubsessionIOState*)(subsession->miscPtr);
if (fCurrentIOState == NULL) continue;
if (strcmp(subsession->mediumName(), "audio") != 0) continue;
size += addAtom_trak();
if (fCurrentIOState->hasHintTrack()) {
// This track has a hint track; output it also:
fCurrentIOState = fCurrentIOState->fHintTrackForUs;
size += addAtom_trak();
}
}
iter.reset();
while ((subsession = iter.next()) != NULL) {
fCurrentIOState = (SubsessionIOState*)(subsession->miscPtr);
if (fCurrentIOState == NULL) continue;
if (strcmp(subsession->mediumName(), "audio") == 0) continue;
size += addAtom_trak();
if (fCurrentIOState->hasHintTrack()) {
// This track has a hint track; output it also:
fCurrentIOState = fCurrentIOState->fHintTrackForUs;
size += addAtom_trak();
}
}
addAtomEnd;
addAtom(mvhd);
size += addWord(0x00000000); // Version + Flags
size += addWord(fAppleCreationTime); // Creation time
size += addWord(fAppleCreationTime); // Modification time
// For the "Time scale" field, use the largest RTP timestamp frequency
// that we saw in any of the subsessions.
size += addWord(movieTimeScale()); // Time scale
unsigned const duration = fMaxTrackDurationM;
fMVHD_durationPosn = TellFile64(fOutFid);
size += addWord(duration); // Duration
size += addWord(0x00010000); // Preferred rate
size += addWord(0x01000000); // Preferred volume + Reserved[0]
size += addZeroWords(2); // Reserved[1-2]
size += addWord(0x00010000); // matrix top left corner
size += addZeroWords(3); // matrix
size += addWord(0x00010000); // matrix center
size += addZeroWords(3); // matrix
size += addWord(0x40000000); // matrix bottom right corner
size += addZeroWords(6); // various time fields
size += addWord(SubsessionIOState::fCurrentTrackNumber+1);// Next track ID
addAtomEnd;
addAtom(iods);
size += addWord(0x00000000); // Version + Flags
size += addWord(0x10808080);
size += addWord(0x07004FFF);
size += addWord(0xFF0FFFFF);
addAtomEnd;
addAtom(trak);
size += addAtom_tkhd();
// If we're synchronizing the media streams (or are a hint track),
// add an edit list that helps do this:
if (fCurrentIOState->fHeadChunk != NULL
&& (fSyncStreams || fCurrentIOState->isHintTrack())) {
size += addAtom_edts();
}
// If we're generating a hint track, add a 'tref' atom:
if (fCurrentIOState->isHintTrack()) size += addAtom_tref();
size += addAtom_mdia();
// If we're generating a hint track, add a 'udta' atom:
if (fCurrentIOState->isHintTrack()) size += addAtom_udta();
addAtomEnd;
addAtom(tkhd);
if (fCurrentIOState->fQTEnableTrack) {
size += addWord(0x0000000F); // Version + Flags
} else {
// Disable this track in the movie:
size += addWord(0x00000000); // Version + Flags
}
size += addWord(fAppleCreationTime); // Creation time
size += addWord(fAppleCreationTime); // Modification time
size += addWord(fCurrentIOState->fTrackID); // Track ID
size += addWord(0x00000000); // Reserved
unsigned const duration = fCurrentIOState->fQTDurationM; // movie units
fCurrentIOState->fTKHD_durationPosn = TellFile64(fOutFid);
size += addWord(duration); // Duration
size += addZeroWords(3); // Reserved+Layer+Alternate grp
size += addWord(0x01000000); // Volume + Reserved
size += addWord(0x00010000); // matrix top left corner
size += addZeroWords(3); // matrix
size += addWord(0x00010000); // matrix center
size += addZeroWords(3); // matrix
size += addWord(0x40000000); // matrix bottom right corner
if (strcmp(fCurrentIOState->fOurSubsession.mediumName(), "video") == 0) {
size += addWord(fMovieWidth<<16); // Track width
size += addWord(fMovieHeight<<16); // Track height
} else {
size += addZeroWords(2); // not video: leave width and height fields zero
}
addAtomEnd;
addAtom(edts);
size += addAtom_elst();
addAtomEnd;
#define addEdit1(duration,trackPosition) do { \
unsigned trackDuration \
= (unsigned) ((2*(duration)*movieTimeScale()+1)/2); \
/* in movie time units */ \
size += addWord(trackDuration); /* Track duration */ \
totalDurationOfEdits += trackDuration; \
size += addWord(trackPosition); /* Media time */ \
size += addWord(0x00010000); /* Media rate (1x) */ \
++numEdits; \
} while (0)
#define addEdit(duration) addEdit1((duration),editTrackPosition)
#define addEmptyEdit(duration) addEdit1((duration),(~0))
addAtom(elst);
size += addWord(0x00000000); // Version + Flags
// Add a dummy "Number of entries" field
// (and remember its position). We'll fill this field in later:
int64_t numEntriesPosition = TellFile64(fOutFid);
size += addWord(0); // dummy for "Number of entries"
unsigned numEdits = 0;
unsigned totalDurationOfEdits = 0; // in movie time units
// Run through our chunks, looking at their presentation times.
// From these, figure out the edits that need to be made to keep
// the track media data in sync with the presentation times.
double const syncThreshold = 0.1; // 100 ms
// don't allow the track to get out of sync by more than this
struct timeval editStartTime = fFirstDataTime;
unsigned editTrackPosition = 0;
unsigned currentTrackPosition = 0;
double trackDurationOfEdit = 0.0;
unsigned chunkDuration = 0;
ChunkDescriptor* chunk = fCurrentIOState->fHeadChunk;
while (chunk != NULL) {
struct timeval const& chunkStartTime = chunk->fPresentationTime;
double movieDurationOfEdit
= (chunkStartTime.tv_sec - editStartTime.tv_sec)
+ (chunkStartTime.tv_usec - editStartTime.tv_usec)/1000000.0;
trackDurationOfEdit = (currentTrackPosition-editTrackPosition)
/ (double)(fCurrentIOState->fQTTimeScale);
double outOfSync = movieDurationOfEdit - trackDurationOfEdit;
if (outOfSync > syncThreshold) {
// The track's data is too short, so end this edit, add a new
// 'empty' edit after it, and start a new edit
// (at the current track posn.):
if (trackDurationOfEdit > 0.0) addEdit(trackDurationOfEdit);
addEmptyEdit(outOfSync);
editStartTime = chunkStartTime;
editTrackPosition = currentTrackPosition;
} else if (outOfSync < -syncThreshold) {
// The track's data is too long, so end this edit, and start
// a new edit (pointing at the current track posn.):
if (movieDurationOfEdit > 0.0) addEdit(movieDurationOfEdit);
editStartTime = chunkStartTime;
editTrackPosition = currentTrackPosition;
}
// Note the duration of this chunk:
unsigned numChannels = fCurrentIOState->fOurSubsession.numChannels();
chunkDuration = chunk->fNumFrames*chunk->fFrameDuration/numChannels;
currentTrackPosition += chunkDuration;
chunk = chunk->fNextChunk;
}
// Write out the final edit
trackDurationOfEdit
+= (double)chunkDuration/fCurrentIOState->fQTTimeScale;
if (trackDurationOfEdit > 0.0) addEdit(trackDurationOfEdit);
// Now go back and fill in the "Number of entries" field:
setWord(numEntriesPosition, numEdits);
// Also, if the sum of all of the edit durations exceeds the
// track duration that we already computed (from sample durations),
// then reset the track duration to this new value:
if (totalDurationOfEdits > fCurrentIOState->fQTDurationM) {
fCurrentIOState->fQTDurationM = totalDurationOfEdits;
setWord(fCurrentIOState->fTKHD_durationPosn, totalDurationOfEdits);
// Also, check whether the overall movie duration needs to change:
if (totalDurationOfEdits > fMaxTrackDurationM) {
fMaxTrackDurationM = totalDurationOfEdits;
setWord(fMVHD_durationPosn, totalDurationOfEdits);
}
// Also, convert to track time scale:
double scaleFactor
= fCurrentIOState->fQTTimeScale/(double)movieTimeScale();
fCurrentIOState->fQTDurationT
= (unsigned)(totalDurationOfEdits*scaleFactor);
}
addAtomEnd;
addAtom(tref);
size += addAtom_hint();
addAtomEnd;
addAtom(hint);
SubsessionIOState* hintedTrack = fCurrentIOState->fTrackHintedByUs;
// Assert: hintedTrack != NULL
size += addWord(hintedTrack->fTrackID);
addAtomEnd;
addAtom(mdia);
size += addAtom_mdhd();
size += addAtom_hdlr();
size += addAtom_minf();
addAtomEnd;
addAtom(mdhd);
size += addWord(0x00000000); // Version + Flags
size += addWord(fAppleCreationTime); // Creation time
size += addWord(fAppleCreationTime); // Modification time
unsigned const timeScale = fCurrentIOState->fQTTimeScale;
size += addWord(timeScale); // Time scale
unsigned const duration = fCurrentIOState->fQTDurationT; // track units
size += addWord(duration); // Duration
size += addWord(0x00000000); // Language+Quality
addAtomEnd;
addAtom(hdlr);
size += addWord(0x00000000); // Version + Flags
size += add4ByteString("mhlr"); // Component type
size += addWord(fCurrentIOState->fQTcomponentSubtype);
// Component subtype
size += add4ByteString("appl"); // Component manufacturer
size += addWord(0x00000000); // Component flags
size += addWord(0x00000000); // Component flags mask
size += addArbitraryString(fCurrentIOState->fQTcomponentName);
// Component name
addAtomEnd;
addAtom(minf);
SubsessionIOState::atomCreationFunc mediaInformationAtomCreator
= fCurrentIOState->fQTMediaInformationAtomCreator;
size += (this->*mediaInformationAtomCreator)();
size += addAtom_hdlr2();
size += addAtom_dinf();
size += addAtom_stbl();
addAtomEnd;
addAtom(smhd);
size += addZeroWords(2); // Version+Flags+Balance+Reserved
addAtomEnd;
addAtom(vmhd);
size += addWord(0x00000001); // Version + Flags
size += addWord(0x00408000); // Graphics mode + Opcolor[red]
size += addWord(0x80008000); // Opcolor[green} + Opcolor[blue]
addAtomEnd;
addAtom(gmhd);
size += addAtom_gmin();
addAtomEnd;
addAtom(gmin);
size += addWord(0x00000000); // Version + Flags
// The following fields probably aren't used for hint tracks, so just
// use values that I've seen in other files:
size += addWord(0x00408000); // Graphics mode + Opcolor (1st 2 bytes)
size += addWord(0x80008000); // Opcolor (last 4 bytes)
size += addWord(0x00000000); // Balance + Reserved
addAtomEnd;
unsigned QuickTimeFileSink::addAtom_hdlr2() {
int64_t initFilePosn = TellFile64(fOutFid);
unsigned size = addAtomHeader("hdlr");
size += addWord(0x00000000); // Version + Flags
size += add4ByteString("dhlr"); // Component type
size += add4ByteString("alis"); // Component subtype
size += add4ByteString("appl"); // Component manufacturer
size += addZeroWords(2); // Component flags+Component flags mask
size += addArbitraryString("Apple Alias Data Handler"); // Component name
addAtomEnd;
addAtom(dinf);
size += addAtom_dref();
addAtomEnd;
addAtom(dref);
size += addWord(0x00000000); // Version + Flags
size += addWord(0x00000001); // Number of entries
size += addAtom_alis();
addAtomEnd;
addAtom(alis);
size += addWord(0x00000001); // Version + Flags
addAtomEnd;
addAtom(stbl);
size += addAtom_stsd();
size += addAtom_stts();
if (fCurrentIOState->fQTcomponentSubtype == fourChar('v','i','d','e')) {
size += addAtom_stss(); // only for video streams
}
size += addAtom_stsc();
size += addAtom_stsz();
size += addAtom_co64();
addAtomEnd;
addAtom(stsd);
size += addWord(0x00000000); // Version+Flags
size += addWord(0x00000001); // Number of entries
SubsessionIOState::atomCreationFunc mediaDataAtomCreator
= fCurrentIOState->fQTMediaDataAtomCreator;
size += (this->*mediaDataAtomCreator)();
addAtomEnd;
unsigned QuickTimeFileSink::addAtom_genericMedia() {
int64_t initFilePosn = TellFile64(fOutFid);
// Our source is assumed to be a "QuickTimeGenericRTPSource"
// Use its "sdAtom" state for our contents:
QuickTimeGenericRTPSource* rtpSource = (QuickTimeGenericRTPSource*)
fCurrentIOState->fOurSubsession.rtpSource();
QuickTimeGenericRTPSource::QTState& qtState = rtpSource->qtState;
char const* from = qtState.sdAtom;
unsigned size = qtState.sdAtomSize;
for (unsigned i = 0; i < size; ++i) addByte(from[i]);
addAtomEnd;
unsigned QuickTimeFileSink::addAtom_soundMediaGeneral() {
int64_t initFilePosn = TellFile64(fOutFid);
unsigned size = addAtomHeader(fCurrentIOState->fQTAudioDataType);
// General sample description fields:
size += addWord(0x00000000); // Reserved
size += addWord(0x00000001); // Reserved+Data reference index
// Sound sample description fields:
unsigned short const version = fCurrentIOState->fQTSoundSampleVersion;
size += addWord(version<<16); // Version+Revision level
size += addWord(0x00000000); // Vendor
unsigned short numChannels
= (unsigned short)(fCurrentIOState->fOurSubsession.numChannels());
size += addHalfWord(numChannels); // Number of channels
size += addHalfWord(0x0010); // Sample size
// size += addWord(0x00000000); // Compression ID+Packet size
size += addWord(0xfffe0000); // Compression ID+Packet size #####
unsigned const sampleRateFixedPoint = fCurrentIOState->fQTTimeScale << 16;
size += addWord(sampleRateFixedPoint); // Sample rate
addAtomEnd;
unsigned QuickTimeFileSink::addAtom_Qclp() {
// The beginning of this atom looks just like a general Sound Media atom,
// except with a version field of 1:
int64_t initFilePosn = TellFile64(fOutFid);
fCurrentIOState->fQTAudioDataType = "Qclp";
fCurrentIOState->fQTSoundSampleVersion = 1;
unsigned size = addAtom_soundMediaGeneral();
// Next, add the four fields that are particular to version 1:
// (Later, parameterize these #####)
size += addWord(0x000000a0); // samples per packet
size += addWord(0x00000000); // ???
size += addWord(0x00000000); // ???
size += addWord(0x00000002); // bytes per sample (uncompressed)
// Other special fields are in a 'wave' atom that follows:
size += addAtom_wave();
addAtomEnd;
addAtom(wave);
size += addAtom_frma();
if (strcmp(fCurrentIOState->fQTAudioDataType, "Qclp") == 0) {
size += addWord(0x00000014); // ???
size += add4ByteString("Qclp"); // ???
if (fCurrentIOState->fQTBytesPerFrame == 35) {
size += addAtom_Fclp(); // full-rate QCELP
} else {
size += addAtom_Hclp(); // half-rate QCELP
} // what about other QCELP 'rates'??? #####
size += addWord(0x00000008); // ???
size += addWord(0x00000000); // ???
size += addWord(0x00000000); // ???
size += addWord(0x00000008); // ???
} else if (strcmp(fCurrentIOState->fQTAudioDataType, "mp4a") == 0) {
size += addWord(0x0000000c); // ???
size += add4ByteString("mp4a"); // ???
size += addWord(0x00000000); // ???
size += addAtom_esds(); // ESDescriptor
size += addWord(0x00000008); // ???
size += addWord(0x00000000); // ???
}
addAtomEnd;
addAtom(frma);
size += add4ByteString(fCurrentIOState->fQTAudioDataType); // ???
addAtomEnd;
addAtom(Fclp);
size += addWord(0x00000000); // ???
addAtomEnd;
addAtom(Hclp);
size += addWord(0x00000000); // ???
addAtomEnd;
unsigned QuickTimeFileSink::addAtom_mp4a() {
unsigned size = 0;
// The beginning of this atom looks just like a general Sound Media atom,
// except with a version field of 1:
int64_t initFilePosn = TellFile64(fOutFid);
fCurrentIOState->fQTAudioDataType = "mp4a";
if (fGenerateMP4Format) {
fCurrentIOState->fQTSoundSampleVersion = 0;
size = addAtom_soundMediaGeneral();
size += addAtom_esds();
} else {
fCurrentIOState->fQTSoundSampleVersion = 1;
size = addAtom_soundMediaGeneral();
// Next, add the four fields that are particular to version 1:
// (Later, parameterize these #####)
size += addWord(fCurrentIOState->fQTTimeUnitsPerSample);
size += addWord(0x00000001); // ???
size += addWord(0x00000001); // ???
size += addWord(0x00000002); // bytes per sample (uncompressed)
// Other special fields are in a 'wave' atom that follows:
size += addAtom_wave();
}
addAtomEnd;
addAtom(esds);
//#####
MediaSubsession& subsession = fCurrentIOState->fOurSubsession;
if (strcmp(subsession.mediumName(), "audio") == 0) {
// MPEG-4 audio
size += addWord(0x00000000); // ???
size += addWord(0x03808080); // ???
size += addWord(0x2a000000); // ???
size += addWord(0x04808080); // ???
size += addWord(0x1c401500); // ???
size += addWord(0x18000000); // ???
size += addWord(0x6d600000); // ???
size += addWord(0x6d600580); // ???
size += addByte(0x80); size += addByte(0x80); // ???
} else if (strcmp(subsession.mediumName(), "video") == 0) {
// MPEG-4 video
size += addWord(0x00000000); // ???
size += addWord(0x03330000); // ???
size += addWord(0x1f042b20); // ???
size += addWord(0x1104fd46); // ???
size += addWord(0x000d4e10); // ???
size += addWord(0x000d4e10); // ???
size += addByte(0x05); // ???
}
// Add the source's 'config' information:
unsigned configSize;
unsigned char* config
= parseGeneralConfigStr(subsession.fmtp_config(), configSize);
size += addByte(configSize);
for (unsigned i = 0; i < configSize; ++i) {
size += addByte(config[i]);
}
delete[] config;
if (strcmp(subsession.mediumName(), "audio") == 0) {
// MPEG-4 audio
size += addWord(0x06808080); // ???
size += addHalfWord(0x0102); // ???
} else {
// MPEG-4 video
size += addHalfWord(0x0601); // ???
size += addByte(0x02); // ???
}
//#####
addAtomEnd;
addAtom(srcq);
//#####
size += addWord(0x00000040); // ???
//#####
addAtomEnd;
addAtom(h263);
// General sample description fields:
size += addWord(0x00000000); // Reserved
size += addWord(0x00000001); // Reserved+Data reference index
// Video sample description fields:
size += addWord(0x00020001); // Version+Revision level
size += add4ByteString("appl"); // Vendor
size += addWord(0x00000000); // Temporal quality
size += addWord(0x000002fc); // Spatial quality
unsigned const widthAndHeight = (fMovieWidth<<16)|fMovieHeight;
size += addWord(widthAndHeight); // Width+height
size += addWord(0x00480000); // Horizontal resolution
size += addWord(0x00480000); // Vertical resolution
size += addWord(0x00000000); // Data size
size += addWord(0x00010548); // Frame count+Compressor name (start)
// "H.263"
size += addWord(0x2e323633); // Compressor name (continued)
size += addZeroWords(6); // Compressor name (continued - zero)
size += addWord(0x00000018); // Compressor name (final)+Depth
size += addHalfWord(0xffff); // Color table id
addAtomEnd;
addAtom(avc1);
// General sample description fields:
size += addWord(0x00000000); // Reserved
size += addWord(0x00000001); // Reserved+Data reference index
// Video sample description fields:
size += addWord(0x00000000); // Version+Revision level
size += add4ByteString("appl"); // Vendor
size += addWord(0x00000000); // Temporal quality
size += addWord(0x00000000); // Spatial quality
unsigned const widthAndHeight = (fMovieWidth<<16)|fMovieHeight;
size += addWord(widthAndHeight); // Width+height
size += addWord(0x00480000); // Horizontal resolution
size += addWord(0x00480000); // Vertical resolution
size += addWord(0x00000000); // Data size
size += addWord(0x00010548); // Frame count+Compressor name (start)
// "H.264"
size += addWord(0x2e323634); // Compressor name (continued)
size += addZeroWords(6); // Compressor name (continued - zero)
size += addWord(0x00000018); // Compressor name (final)+Depth
size += addHalfWord(0xffff); // Color table id
size += addAtom_avcC();
addAtomEnd;
addAtom(avcC);
// Begin by Base-64 decoding the "sprop" parameter sets strings:
char* psets = strDup(fCurrentIOState->fOurSubsession.fmtp_spropparametersets());
if (psets == NULL) return 0;
size_t comma_pos = strcspn(psets, ",");
psets[comma_pos] = '\0';
char const* sps_b64 = psets;
char const* pps_b64 = &psets[comma_pos+1];
unsigned sps_count;
unsigned char* sps_data = base64Decode(sps_b64, sps_count, false);
unsigned pps_count;
unsigned char* pps_data = base64Decode(pps_b64, pps_count, false);
// Then add the decoded data:
size += addByte(0x01); // configuration version
size += addByte(sps_data[1]); // profile
size += addByte(sps_data[2]); // profile compat
size += addByte(sps_data[3]); // level
size += addByte(0xff); /* 0b11111100 | lengthsize = 0x11 */
size += addByte(0xe0 | (sps_count > 0 ? 1 : 0) );
if (sps_count > 0) {
size += addHalfWord(sps_count);
for (unsigned i = 0; i < sps_count; i++) {
size += addByte(sps_data[i]);
}
}
size += addByte(pps_count > 0 ? 1 : 0);
if (pps_count > 0) {
size += addHalfWord(pps_count);
for (unsigned i = 0; i < pps_count; i++) {
size += addByte(pps_data[i]);
}
}
// Finally, delete the data that we allocated:
delete[] pps_data; delete[] sps_data;
delete[] psets;
addAtomEnd;
addAtom(mp4v);
// General sample description fields:
size += addWord(0x00000000); // Reserved
size += addWord(0x00000001); // Reserved+Data reference index
// Video sample description fields:
size += addWord(0x00020001); // Version+Revision level
size += add4ByteString("appl"); // Vendor
size += addWord(0x00000200); // Temporal quality
size += addWord(0x00000400); // Spatial quality
unsigned const widthAndHeight = (fMovieWidth<<16)|fMovieHeight;
size += addWord(widthAndHeight); // Width+height
size += addWord(0x00480000); // Horizontal resolution
size += addWord(0x00480000); // Vertical resolution
size += addWord(0x00000000); // Data size
size += addWord(0x00010c4d); // Frame count+Compressor name (start)
// "MPEG-4 Video"
size += addWord(0x5045472d); // Compressor name (continued)
size += addWord(0x34205669); // Compressor name (continued)
size += addWord(0x64656f00); // Compressor name (continued)
size += addZeroWords(4); // Compressor name (continued - zero)
size += addWord(0x00000018); // Compressor name (final)+Depth
size += addHalfWord(0xffff); // Color table id
size += addAtom_esds(); // ESDescriptor
size += addWord(0x00000000); // ???
addAtomEnd;
unsigned QuickTimeFileSink::addAtom_rtp() {
int64_t initFilePosn = TellFile64(fOutFid);
unsigned size = addAtomHeader("rtp ");
size += addWord(0x00000000); // Reserved (1st 4 bytes)
size += addWord(0x00000001); // Reserved (last 2 bytes) + Data ref index
size += addWord(0x00010001); // Hint track version + Last compat htv
size += addWord(1450); // Max packet size
size += addAtom_tims();
addAtomEnd;
addAtom(tims);
size += addWord(fCurrentIOState->fOurSubsession.rtpTimestampFrequency());
addAtomEnd;
addAtom(stts); // Time-to-Sample
size += addWord(0x00000000); // Version+flags
// First, add a dummy "Number of entries" field
// (and remember its position). We'll fill this field in later:
int64_t numEntriesPosition = TellFile64(fOutFid);
size += addWord(0); // dummy for "Number of entries"
// Then, run through the chunk descriptors, and enter the entries
// in this (compressed) Time-to-Sample table:
unsigned numEntries = 0, numSamplesSoFar = 0;
unsigned prevSampleDuration = 0;
unsigned const samplesPerFrame = fCurrentIOState->fQTSamplesPerFrame;
ChunkDescriptor* chunk = fCurrentIOState->fHeadChunk;
while (chunk != NULL) {
unsigned const sampleDuration = chunk->fFrameDuration/samplesPerFrame;
if (sampleDuration != prevSampleDuration) {
// This chunk will start a new table entry,
// so write out the old one (if any):
if (chunk != fCurrentIOState->fHeadChunk) {
++numEntries;
size += addWord(numSamplesSoFar); // Sample count
size += addWord(prevSampleDuration); // Sample duration
numSamplesSoFar = 0;
}
}
unsigned const numSamples = chunk->fNumFrames*samplesPerFrame;
numSamplesSoFar += numSamples;
prevSampleDuration = sampleDuration;
chunk = chunk->fNextChunk;
}
// Then, write out the last entry:
++numEntries;
size += addWord(numSamplesSoFar); // Sample count
size += addWord(prevSampleDuration); // Sample duration
// Now go back and fill in the "Number of entries" field:
setWord(numEntriesPosition, numEntries);
addAtomEnd;
addAtom(stss); // Sync-Sample
size += addWord(0x00000000); // Version+flags
// First, add a dummy "Number of entries" field
// (and remember its position). We'll fill this field in later:
int64_t numEntriesPosition = TellFile64(fOutFid);
size += addWord(0); // dummy for "Number of entries"
unsigned numEntries = 0, numSamplesSoFar = 0;
if (fCurrentIOState->fHeadSyncFrame != NULL) {
SyncFrame* currentSyncFrame = fCurrentIOState->fHeadSyncFrame;
while(currentSyncFrame != NULL) {
++numEntries;
size += addWord(currentSyncFrame->sfFrameNum);
currentSyncFrame = currentSyncFrame->nextSyncFrame;
}
} else {
// Then, run through the chunk descriptors, counting up the total nuber of samples:
unsigned const samplesPerFrame = fCurrentIOState->fQTSamplesPerFrame;
ChunkDescriptor* chunk = fCurrentIOState->fHeadChunk;
while (chunk != NULL) {
unsigned const numSamples = chunk->fNumFrames*samplesPerFrame;
numSamplesSoFar += numSamples;
chunk = chunk->fNextChunk;
}
// Then, write out the sample numbers that we deem correspond to 'sync samples':
unsigned i;
for (i = 0; i < numSamplesSoFar; i += 12) {
// For an explanation of the constant "12", see http://lists.live555.com/pipermail/live-devel/2009-July/010969.html
// (Perhaps we should really try to keep track of which 'samples' ('frames' for video) really are 'key frames'?)
size += addWord(i+1);
++numEntries;
}
// Then, write out the last entry (if we haven't already done so):
if (i != (numSamplesSoFar - 1)) {
size += addWord(numSamplesSoFar);
++numEntries;
}
}
// Now go back and fill in the "Number of entries" field:
setWord(numEntriesPosition, numEntries);
addAtomEnd;
addAtom(stsc); // Sample-to-Chunk
size += addWord(0x00000000); // Version+flags
// First, add a dummy "Number of entries" field
// (and remember its position). We'll fill this field in later:
int64_t numEntriesPosition = TellFile64(fOutFid);
size += addWord(0); // dummy for "Number of entries"
// Then, run through the chunk descriptors, and enter the entries
// in this (compressed) Sample-to-Chunk table:
unsigned numEntries = 0, chunkNumber = 0;
unsigned prevSamplesPerChunk = ~0;
unsigned const samplesPerFrame = fCurrentIOState->fQTSamplesPerFrame;
ChunkDescriptor* chunk = fCurrentIOState->fHeadChunk;
while (chunk != NULL) {
++chunkNumber;
unsigned const samplesPerChunk = chunk->fNumFrames*samplesPerFrame;
if (samplesPerChunk != prevSamplesPerChunk) {
// This chunk will be a new table entry:
++numEntries;
size += addWord(chunkNumber); // Chunk number
size += addWord(samplesPerChunk); // Samples per chunk
size += addWord(0x00000001); // Sample description ID
prevSamplesPerChunk = samplesPerChunk;
}
chunk = chunk->fNextChunk;
}
// Now go back and fill in the "Number of entries" field:
setWord(numEntriesPosition, numEntries);
addAtomEnd;
addAtom(stsz); // Sample Size
size += addWord(0x00000000); // Version+flags
// Begin by checking whether our chunks all have the same
// 'bytes-per-sample'. This determines whether this atom's table
// has just a single entry, or multiple entries.
Boolean haveSingleEntryTable = True;
double firstBPS = 0.0;
ChunkDescriptor* chunk = fCurrentIOState->fHeadChunk;
while (chunk != NULL) {
double bps
= (double)(chunk->fFrameSize)/(fCurrentIOState->fQTSamplesPerFrame);
if (bps < 1.0) {
// I don't think a multiple-entry table would make sense in
// this case, so assume a single entry table ??? #####
break;
}
if (firstBPS == 0.0) {
firstBPS = bps;
} else if (bps != firstBPS) {
haveSingleEntryTable = False;
break;
}
chunk = chunk->fNextChunk;
}
unsigned sampleSize;
if (haveSingleEntryTable) {
if (fCurrentIOState->isHintTrack()
&& fCurrentIOState->fHeadChunk != NULL) {
sampleSize = fCurrentIOState->fHeadChunk->fFrameSize
/ fCurrentIOState->fQTSamplesPerFrame;
} else {
// The following doesn't seem right, but seems to do the right thing:
sampleSize = fCurrentIOState->fQTTimeUnitsPerSample; //???
}
} else {
sampleSize = 0; // indicates a multiple-entry table
}
size += addWord(sampleSize); // Sample size
unsigned const totNumSamples = fCurrentIOState->fQTTotNumSamples;
size += addWord(totNumSamples); // Number of entries
if (!haveSingleEntryTable) {
// Multiple-entry table:
// Run through the chunk descriptors, entering the sample sizes:
ChunkDescriptor* chunk = fCurrentIOState->fHeadChunk;
while (chunk != NULL) {
unsigned numSamples
= chunk->fNumFrames*(fCurrentIOState->fQTSamplesPerFrame);
unsigned sampleSize
= chunk->fFrameSize/(fCurrentIOState->fQTSamplesPerFrame);
for (unsigned i = 0; i < numSamples; ++i) {
size += addWord(sampleSize);
}
chunk = chunk->fNextChunk;
}
}
addAtomEnd;
addAtom(co64); // Chunk Offset
size += addWord(0x00000000); // Version+flags
size += addWord(fCurrentIOState->fNumChunks); // Number of entries
// Run through the chunk descriptors, entering the file offsets:
ChunkDescriptor* chunk = fCurrentIOState->fHeadChunk;
while (chunk != NULL) {
size += addWord64(chunk->fOffsetInFile);
chunk = chunk->fNextChunk;
}
addAtomEnd;
addAtom(udta);
size += addAtom_name();
size += addAtom_hnti();
size += addAtom_hinf();
addAtomEnd;
addAtom(name);
char description[100];
sprintf(description, "Hinted %s track",
fCurrentIOState->fOurSubsession.mediumName());
size += addArbitraryString(description, False); // name of object
addAtomEnd;
addAtom(hnti);
size += addAtom_sdp();
addAtomEnd;
unsigned QuickTimeFileSink::addAtom_sdp() {
int64_t initFilePosn = TellFile64(fOutFid);
unsigned size = addAtomHeader("sdp ");
// Add this subsession's SDP lines:
char const* sdpLines = fCurrentIOState->fOurSubsession.savedSDPLines();
// We need to change any "a=control:trackID=" values to be this
// track's actual track id:
char* newSDPLines = new char[strlen(sdpLines)+100/*overkill*/];
char const* searchStr = "a=control:trackid=";
Boolean foundSearchString = False;
char const *p1, *p2, *p3;
for (p1 = sdpLines; *p1 != '\0'; ++p1) {
for (p2 = p1,p3 = searchStr; tolower(*p2) == *p3; ++p2,++p3) {}
if (*p3 == '\0') {
// We found the end of the search string, at p2.
int beforeTrackNumPosn = p2-sdpLines;
// Look for the subsequent track number, and skip over it:
int trackNumLength;
if (sscanf(p2, " %*d%n", &trackNumLength) < 0) break;
int afterTrackNumPosn = beforeTrackNumPosn + trackNumLength;
// Replace the old track number with the correct one:
int i;
for (i = 0; i < beforeTrackNumPosn; ++i) newSDPLines[i] = sdpLines[i];
sprintf(&newSDPLines[i], "%d", fCurrentIOState->fTrackID);
i = afterTrackNumPosn;
int j = i + strlen(&newSDPLines[i]);
while (1) {
if ((newSDPLines[j] = sdpLines[i]) == '\0') break;
++i; ++j;
}
foundSearchString = True;
break;
}
}
if (!foundSearchString) {
// Because we didn't find a "a=control:trackID=" line,
// add one of our own:
sprintf(newSDPLines, "%s%s%d\r\n",
sdpLines, searchStr, fCurrentIOState->fTrackID);
}
size += addArbitraryString(newSDPLines, False);
delete[] newSDPLines;
addAtomEnd;
addAtom(hinf);
size += addAtom_totl();
size += addAtom_npck();
size += addAtom_tpay();
size += addAtom_trpy();
size += addAtom_nump();
size += addAtom_tpyl();
// Is 'maxr' required? #####
size += addAtom_dmed();
size += addAtom_dimm();
size += addAtom_drep();
size += addAtom_tmin();
size += addAtom_tmax();
size += addAtom_pmax();
size += addAtom_dmax();
size += addAtom_payt();
addAtomEnd;
addAtom(totl);
size += addWord(fCurrentIOState->fHINF.trpy.lo);
addAtomEnd;
addAtom(npck);
size += addWord(fCurrentIOState->fHINF.nump.lo);
addAtomEnd;
addAtom(tpay);
size += addWord(fCurrentIOState->fHINF.tpyl.lo);
addAtomEnd;
addAtom(trpy);
size += addWord(fCurrentIOState->fHINF.trpy.hi);
size += addWord(fCurrentIOState->fHINF.trpy.lo);
addAtomEnd;
addAtom(nump);
size += addWord(fCurrentIOState->fHINF.nump.hi);
size += addWord(fCurrentIOState->fHINF.nump.lo);
addAtomEnd;
addAtom(tpyl);
size += addWord(fCurrentIOState->fHINF.tpyl.hi);
size += addWord(fCurrentIOState->fHINF.tpyl.lo);
addAtomEnd;
addAtom(dmed);
size += addWord(fCurrentIOState->fHINF.dmed.hi);
size += addWord(fCurrentIOState->fHINF.dmed.lo);
addAtomEnd;
addAtom(dimm);
size += addWord(fCurrentIOState->fHINF.dimm.hi);
size += addWord(fCurrentIOState->fHINF.dimm.lo);
addAtomEnd;
addAtom(drep);
size += addWord(0);
size += addWord(0);
addAtomEnd;
addAtom(tmin);
size += addWord(0);
addAtomEnd;
addAtom(tmax);
size += addWord(0);
addAtomEnd;
addAtom(pmax);
size += addWord(fCurrentIOState->fHINF.pmax);
addAtomEnd;
addAtom(dmax);
size += addWord(fCurrentIOState->fHINF.dmax);
addAtomEnd;
addAtom(payt);
MediaSubsession& ourSubsession = fCurrentIOState->fOurSubsession;
RTPSource* rtpSource = ourSubsession.rtpSource();
size += addWord(rtpSource->rtpPayloadFormat());
// Also, add a 'rtpmap' string: /
unsigned rtpmapStringLength = strlen(ourSubsession.codecName()) + 20;
char* rtpmapString = new char[rtpmapStringLength];
sprintf(rtpmapString, "%s/%d",
ourSubsession.codecName(), rtpSource->timestampFrequency());
size += addArbitraryString(rtpmapString);
delete[] rtpmapString;
addAtomEnd;
// A dummy atom (with name "????"):
unsigned QuickTimeFileSink::addAtom_dummy() {
int64_t initFilePosn = TellFile64(fOutFid);
unsigned size = addAtomHeader("????");
addAtomEnd;
live/liveMedia/MPEG2IndexFromTransportStream.cpp 000444 001751 000000 00000057162 12265042432 022074 0 ustar 00rsf wheel 000000 000000 /**********
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the
Free Software Foundation; either version 2.1 of the License, or (at your
option) any later version. (See .)
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
more details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
**********/
// "liveMedia"
// Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved.
// A filter that produces a sequence of I-frame indices from a MPEG-2 Transport Stream
// Implementation
#include "MPEG2IndexFromTransportStream.hh"
////////// IndexRecord definition //////////
enum RecordType {
RECORD_UNPARSED = 0,
RECORD_VSH = 1, // a MPEG Video Sequence Header
RECORD_GOP = 2,
RECORD_PIC_NON_IFRAME = 3, // includes slices
RECORD_PIC_IFRAME = 4, // includes slices
RECORD_NAL_H264_SPS = 5, // H.264
RECORD_NAL_H264_PPS = 6, // H.264
RECORD_NAL_H264_SEI = 7, // H.264
RECORD_NAL_H264_NON_IFRAME = 8, // H.264
RECORD_NAL_H264_IFRAME = 9, // H.264
RECORD_NAL_H264_OTHER = 10, // H.264
RECORD_NAL_H265_VPS = 11, // H.265
RECORD_NAL_H265_SPS = 12, // H.265
RECORD_NAL_H265_PPS = 13, // H.265
RECORD_NAL_H265_NON_IFRAME = 14, // H.265
RECORD_NAL_H265_IFRAME = 15, // H.265
RECORD_NAL_H265_OTHER = 16, // H.265
RECORD_JUNK
};
class IndexRecord {
public:
IndexRecord(u_int8_t startOffset, u_int8_t size,
unsigned long transportPacketNumber, float pcr);
virtual ~IndexRecord();
RecordType& recordType() { return fRecordType; }
void setFirstFlag() { fRecordType = (RecordType)(((u_int8_t)fRecordType) | 0x80); }
u_int8_t startOffset() const { return fStartOffset; }
u_int8_t& size() { return fSize; }
float pcr() const { return fPCR; }
unsigned long transportPacketNumber() const { return fTransportPacketNumber; }
IndexRecord* next() const { return fNext; }
void addAfter(IndexRecord* prev);
void unlink();
private:
// Index records are maintained in a doubly-linked list:
IndexRecord* fNext;
IndexRecord* fPrev;
RecordType fRecordType;
u_int8_t fStartOffset; // within the Transport Stream packet
u_int8_t fSize; // in bytes, following "fStartOffset".
// Note: fStartOffset + fSize <= TRANSPORT_PACKET_SIZE
float fPCR;
unsigned long fTransportPacketNumber;
};
#ifdef DEBUG
static char const* recordTypeStr[] = {
"UNPARSED",
"VSH",
"GOP",
"PIC(non-I-frame)",
"PIC(I-frame)",
"SPS (H.264)",
"PPS (H.264)",
"SEI (H.264)",
"H.264 non-I-frame",
"H.264 I-frame",
"other NAL unit (H.264)",
"VPS (H.265)",
"SPS (H.265)",
"PPS (H.265)",
"H.265 non-I-frame",
"H.265 I-frame",
"other NAL unit (H.265)",
"JUNK"
};
UsageEnvironment& operator<<(UsageEnvironment& env, IndexRecord& r) {
return env << "[" << ((r.recordType()&0x80) != 0 ? "1" : "")
<< recordTypeStr[r.recordType()&0x7F] << ":"
<< (unsigned)r.transportPacketNumber() << ":" << r.startOffset()
<< "(" << r.size() << ")@" << r.pcr() << "]";
}
#endif
////////// MPEG2IFrameIndexFromTransportStream implementation //////////
MPEG2IFrameIndexFromTransportStream*
MPEG2IFrameIndexFromTransportStream::createNew(UsageEnvironment& env,
FramedSource* inputSource) {
return new MPEG2IFrameIndexFromTransportStream(env, inputSource);
}
// The largest expected frame size (in bytes):
#define MAX_FRAME_SIZE 400000
// Make our parse buffer twice as large as this, to ensure that at least one
// complete frame will fit inside it:
#define PARSE_BUFFER_SIZE (2*MAX_FRAME_SIZE)
// The PID used for the PAT (as defined in the MPEG Transport Stream standard):
#define PAT_PID 0
MPEG2IFrameIndexFromTransportStream
::MPEG2IFrameIndexFromTransportStream(UsageEnvironment& env,
FramedSource* inputSource)
: FramedFilter(env, inputSource),
fIsH264(False), fIsH265(False),
fInputTransportPacketCounter((unsigned)-1), fClosureNumber(0), fLastContinuityCounter(~0),
fFirstPCR(0.0), fLastPCR(0.0), fHaveSeenFirstPCR(False),
fPMT_PID(0x10), fVideo_PID(0xE0), // default values
fParseBufferSize(PARSE_BUFFER_SIZE),
fParseBufferFrameStart(0), fParseBufferParseEnd(4), fParseBufferDataEnd(0),
fHeadIndexRecord(NULL), fTailIndexRecord(NULL) {
fParseBuffer = new unsigned char[fParseBufferSize];
}
MPEG2IFrameIndexFromTransportStream::~MPEG2IFrameIndexFromTransportStream() {
delete fHeadIndexRecord;
delete[] fParseBuffer;
}
void MPEG2IFrameIndexFromTransportStream::doGetNextFrame() {
// Begin by trying to deliver an index record (for an already-parsed frame)
// to the client:
if (deliverIndexRecord()) return;
// No more index records are left to deliver, so try to parse a new frame:
if (parseFrame()) { // success - try again
doGetNextFrame();
return;
}
// We need to read some more Transport Stream packets. Check whether we have room:
if (fParseBufferSize - fParseBufferDataEnd < TRANSPORT_PACKET_SIZE) {
// There's no room left. Compact the buffer, and check again:
compactParseBuffer();
if (fParseBufferSize - fParseBufferDataEnd < TRANSPORT_PACKET_SIZE) {
envir() << "ERROR: parse buffer full; increase MAX_FRAME_SIZE\n";
// Treat this as if the input source ended:
handleInputClosure1();
return;
}
}
// Arrange to read a new Transport Stream packet:
fInputSource->getNextFrame(fInputBuffer, sizeof fInputBuffer,
afterGettingFrame, this,
handleInputClosure, this);
}
void MPEG2IFrameIndexFromTransportStream
::afterGettingFrame(void* clientData, unsigned frameSize,
unsigned numTruncatedBytes,
struct timeval presentationTime,
unsigned durationInMicroseconds) {
MPEG2IFrameIndexFromTransportStream* source
= (MPEG2IFrameIndexFromTransportStream*)clientData;
source->afterGettingFrame1(frameSize, numTruncatedBytes,
presentationTime, durationInMicroseconds);
}
#define TRANSPORT_SYNC_BYTE 0x47
void MPEG2IFrameIndexFromTransportStream
::afterGettingFrame1(unsigned frameSize,
unsigned numTruncatedBytes,
struct timeval presentationTime,
unsigned durationInMicroseconds) {
if (frameSize < TRANSPORT_PACKET_SIZE || fInputBuffer[0] != TRANSPORT_SYNC_BYTE) {
if (fInputBuffer[0] != TRANSPORT_SYNC_BYTE) {
envir() << "Bad TS sync byte: 0x" << fInputBuffer[0] << "\n";
}
// Handle this as if the source ended:
handleInputClosure1();
return;
}
++fInputTransportPacketCounter;
// Figure out how much of this Transport Packet contains PES data:
u_int8_t adaptation_field_control = (fInputBuffer[3]&0x30)>>4;
u_int8_t totalHeaderSize
= adaptation_field_control == 1 ? 4 : 5 + fInputBuffer[4];
// Check for a PCR:
if (totalHeaderSize > 5 && (fInputBuffer[5]&0x10) != 0) {
// There's a PCR:
u_int32_t pcrBaseHigh
= (fInputBuffer[6]<<24)|(fInputBuffer[7]<<16)
|(fInputBuffer[8]<<8)|fInputBuffer[9];
float pcr = pcrBaseHigh/45000.0f;
if ((fInputBuffer[10]&0x80) != 0) pcr += 1/90000.0f; // add in low-bit (if set)
unsigned short pcrExt = ((fInputBuffer[10]&0x01)<<8) | fInputBuffer[11];
pcr += pcrExt/27000000.0f;
if (!fHaveSeenFirstPCR) {
fFirstPCR = pcr;
fHaveSeenFirstPCR = True;
} else if (pcr < fLastPCR) {
// The PCR timestamp has gone backwards. Display a warning about this
// (because it indicates buggy Transport Stream data), and compensate for it.
envir() << "\nWarning: At about " << fLastPCR-fFirstPCR
<< " seconds into the file, the PCR timestamp decreased - from "
<< fLastPCR << " to " << pcr << "\n";
fFirstPCR -= (fLastPCR - pcr);
}
fLastPCR = pcr;
}
// Get the PID from the packet, and check for special tables: the PAT and PMT:
u_int16_t PID = ((fInputBuffer[1]&0x1F)<<8) | fInputBuffer[2];
if (PID == PAT_PID) {
analyzePAT(&fInputBuffer[totalHeaderSize], TRANSPORT_PACKET_SIZE-totalHeaderSize);
} else if (PID == fPMT_PID) {
analyzePMT(&fInputBuffer[totalHeaderSize], TRANSPORT_PACKET_SIZE-totalHeaderSize);
}
// Ignore transport packets for non-video programs,
// or packets with no data, or packets that duplicate the previous packet:
u_int8_t continuity_counter = fInputBuffer[3]&0x0F;
if ((PID != fVideo_PID) ||
!(adaptation_field_control == 1 || adaptation_field_control == 3) ||
continuity_counter == fLastContinuityCounter) {
doGetNextFrame();
return;
}
fLastContinuityCounter = continuity_counter;
// Also, if this is the start of a PES packet, then skip over the PES header:
Boolean payload_unit_start_indicator = (fInputBuffer[1]&0x40) != 0;
if (payload_unit_start_indicator) {
// Note: The following works only for MPEG-2 data #####
u_int8_t PES_header_data_length = fInputBuffer[totalHeaderSize+8];
totalHeaderSize += 9 + PES_header_data_length;
if (totalHeaderSize >= TRANSPORT_PACKET_SIZE) {
envir() << "Unexpectedly large PES header size: " << PES_header_data_length << "\n";
// Handle this as if the source ended:
handleInputClosure1();
return;
}
}
// The remaining data is Video Elementary Stream data. Add it to our parse buffer:
unsigned vesSize = TRANSPORT_PACKET_SIZE - totalHeaderSize;
memmove(&fParseBuffer[fParseBufferDataEnd], &fInputBuffer[totalHeaderSize], vesSize);
fParseBufferDataEnd += vesSize;
// And add a new index record noting where it came from:
addToTail(new IndexRecord(totalHeaderSize, vesSize, fInputTransportPacketCounter,
fLastPCR - fFirstPCR));
// Try again:
doGetNextFrame();
}
void MPEG2IFrameIndexFromTransportStream::handleInputClosure(void* clientData) {
MPEG2IFrameIndexFromTransportStream* source
= (MPEG2IFrameIndexFromTransportStream*)clientData;
source->handleInputClosure1();
}
#define VIDEO_SEQUENCE_START_CODE 0xB3 // MPEG-1 or 2
#define VISUAL_OBJECT_SEQUENCE_START_CODE 0xB0 // MPEG-4
#define GROUP_START_CODE 0xB8 // MPEG-1 or 2
#define GROUP_VOP_START_CODE 0xB3 // MPEG-4
#define PICTURE_START_CODE 0x00 // MPEG-1 or 2
#define VOP_START_CODE 0xB6 // MPEG-4
void MPEG2IFrameIndexFromTransportStream::handleInputClosure1() {
if (++fClosureNumber == 1 && fParseBufferDataEnd > fParseBufferFrameStart
&& fParseBufferDataEnd <= fParseBufferSize - 4) {
// This is the first time we saw EOF, and there's still data remaining to be
// parsed. Hack: Append a Picture Header code to the end of the unparsed
// data, and try again. This should use up all of the unparsed data.
fParseBuffer[fParseBufferDataEnd++] = 0;
fParseBuffer[fParseBufferDataEnd++] = 0;
fParseBuffer[fParseBufferDataEnd++] = 1;
fParseBuffer[fParseBufferDataEnd++] = PICTURE_START_CODE;
// Try again:
doGetNextFrame();
} else {
// Handle closure in the regular way:
FramedSource::handleClosure(this);
}
}
void MPEG2IFrameIndexFromTransportStream
::analyzePAT(unsigned char* pkt, unsigned size) {
// Get the PMT_PID:
while (size >= 17) { // The table is large enough
u_int16_t program_number = (pkt[9]<<8) | pkt[10];
if (program_number != 0) {
fPMT_PID = ((pkt[11]&0x1F)<<8) | pkt[12];
return;
}
pkt += 4; size -= 4;
}
}
void MPEG2IFrameIndexFromTransportStream
::analyzePMT(unsigned char* pkt, unsigned size) {
// Scan the "elementary_PID"s in the map, until we see the first video stream.
// First, get the "section_length", to get the table's size:
u_int16_t section_length = ((pkt[2]&0x0F)<<8) | pkt[3];
if ((unsigned)(4+section_length) < size) size = (4+section_length);
// Then, skip any descriptors following the "program_info_length":
if (size < 22) return; // not enough data
unsigned program_info_length = ((pkt[11]&0x0F)<<8) | pkt[12];
pkt += 13; size -= 13;
if (size < program_info_length) return; // not enough data
pkt += program_info_length; size -= program_info_length;
// Look at each ("stream_type","elementary_PID") pair, looking for a video stream:
while (size >= 9) {
u_int8_t stream_type = pkt[0];
u_int16_t elementary_PID = ((pkt[1]&0x1F)<<8) | pkt[2];
if (stream_type == 1 || stream_type == 2 ||
stream_type == 0x1B/*H.264 video*/ || stream_type == 0x24/*H.265 video*/) {
if (stream_type == 0x1B) fIsH264 = True;
else if (stream_type == 0x24) fIsH265 = True;
fVideo_PID = elementary_PID;
return;
}
u_int16_t ES_info_length = ((pkt[3]&0x0F)<<8) | pkt[4];
pkt += 5; size -= 5;
if (size < ES_info_length) return; // not enough data
pkt += ES_info_length; size -= ES_info_length;
}
}
Boolean MPEG2IFrameIndexFromTransportStream::deliverIndexRecord() {
IndexRecord* head = fHeadIndexRecord;
if (head == NULL) return False;
// Check whether the head record has been parsed yet:
if (head->recordType() == RECORD_UNPARSED) return False;
// Remove the head record (the one whose data we'll be delivering):
IndexRecord* next = head->next();
head->unlink();
if (next == head) {
fHeadIndexRecord = fTailIndexRecord = NULL;
} else {
fHeadIndexRecord = next;
}
if (head->recordType() == RECORD_JUNK) {
// Don't actually deliver the data to the client:
delete head;
// Try to deliver the next record instead:
return deliverIndexRecord();
}
// Deliver data from the head record:
#ifdef DEBUG
envir() << "delivering: " << *head << "\n";
#endif
if (fMaxSize < 11) {
fFrameSize = 0;
} else {
fTo[0] = (u_int8_t)(head->recordType());
fTo[1] = head->startOffset();
fTo[2] = head->size();
// Deliver the PCR, as 24 bits (integer part; little endian) + 8 bits (fractional part)
float pcr = head->pcr();
unsigned pcr_int = (unsigned)pcr;
u_int8_t pcr_frac = (u_int8_t)(256*(pcr-pcr_int));
fTo[3] = (unsigned char)(pcr_int);
fTo[4] = (unsigned char)(pcr_int>>8);
fTo[5] = (unsigned char)(pcr_int>>16);
fTo[6] = (unsigned char)(pcr_frac);
// Deliver the transport packet number (in little-endian order):
unsigned long tpn = head->transportPacketNumber();
fTo[7] = (unsigned char)(tpn);
fTo[8] = (unsigned char)(tpn>>8);
fTo[9] = (unsigned char)(tpn>>16);
fTo[10] = (unsigned char)(tpn>>24);
fFrameSize = 11;
}
// Free the (former) head record (as we're now done with it):
delete head;
// Complete delivery to the client:
afterGetting(this);
return True;
}
Boolean MPEG2IFrameIndexFromTransportStream::parseFrame() {
// At this point, we have a queue of >=0 (unparsed) index records, representing
// the data in the parse buffer from "fParseBufferFrameStart"
// to "fParseBufferDataEnd". We now parse through this data, looking for
// a complete 'frame', where a 'frame', in this case, means:
// for MPEG video: a Video Sequence Header, GOP Header, Picture Header, or Slice
// for H.264 or H.265 video: a NAL unit
// Inspect the frame's initial 4-byte code, to make sure it starts with a system code:
if (fParseBufferDataEnd-fParseBufferFrameStart < 4) return False; // not enough data
unsigned numInitialBadBytes = 0;
unsigned char const* p = &fParseBuffer[fParseBufferFrameStart];
if (!(p[0] == 0 && p[1] == 0 && p[2] == 1)) {
// There's no system code at the beginning. Parse until we find one:
if (fParseBufferParseEnd == fParseBufferFrameStart + 4) {
// Start parsing from the beginning of the frame data:
fParseBufferParseEnd = fParseBufferFrameStart;
}
unsigned char nextCode;
if (!parseToNextCode(nextCode)) return False;
numInitialBadBytes = fParseBufferParseEnd - fParseBufferFrameStart;
fParseBufferFrameStart = fParseBufferParseEnd;
fParseBufferParseEnd += 4; // skip over the code that we just saw
p = &fParseBuffer[fParseBufferFrameStart];
}
unsigned char curCode = p[3];
if (fIsH264) curCode &= 0x1F; // nal_unit_type
else if (fIsH265) curCode = (curCode&0x7E)>>1;
RecordType curRecordType;
unsigned char nextCode;
if (fIsH264) {
switch (curCode) {
case 1: // Coded slice of a non-IDR picture
curRecordType = RECORD_NAL_H264_NON_IFRAME;
if (!parseToNextCode(nextCode)) return False;
break;
case 5: // Coded slice of an IDR picture
curRecordType = RECORD_NAL_H264_IFRAME;
if (!parseToNextCode(nextCode)) return False;
break;
case 6: // Supplemental enhancement information (SEI)
curRecordType = RECORD_NAL_H264_SEI;
if (!parseToNextCode(nextCode)) return False;
break;
case 7: // Sequence parameter set (SPS)
curRecordType = RECORD_NAL_H264_SPS;
if (!parseToNextCode(nextCode)) return False;
break;
case 8: // Picture parameter set (PPS)
curRecordType = RECORD_NAL_H264_PPS;
if (!parseToNextCode(nextCode)) return False;
break;
default:
curRecordType = RECORD_NAL_H264_OTHER;
if (!parseToNextCode(nextCode)) return False;
break;
}
} else if (fIsH265) {
switch (curCode) {
case 19: // Coded slice segment of an IDR picture
case 20: // Coded slice segment of an IDR picture
curRecordType = RECORD_NAL_H265_IFRAME;
if (!parseToNextCode(nextCode)) return False;
break;
case 32: // Video parameter set (VPS)
curRecordType = RECORD_NAL_H265_VPS;
if (!parseToNextCode(nextCode)) return False;
break;
case 33: // Sequence parameter set (SPS)
curRecordType = RECORD_NAL_H265_SPS;
if (!parseToNextCode(nextCode)) return False;
break;
case 34: // Picture parameter set (PPS)
curRecordType = RECORD_NAL_H265_PPS;
if (!parseToNextCode(nextCode)) return False;
break;
default:
curRecordType = (curCode <= 31) ? RECORD_NAL_H265_NON_IFRAME : RECORD_NAL_H265_OTHER;
if (!parseToNextCode(nextCode)) return False;
break;
}
} else { // MPEG-1, 2, or 4
switch (curCode) {
case VIDEO_SEQUENCE_START_CODE:
case VISUAL_OBJECT_SEQUENCE_START_CODE:
curRecordType = RECORD_VSH;
while (1) {
if (!parseToNextCode(nextCode)) return False;
if (nextCode == GROUP_START_CODE ||
nextCode == PICTURE_START_CODE || nextCode == VOP_START_CODE) break;
fParseBufferParseEnd += 4; // skip over the code that we just saw
}
break;
case GROUP_START_CODE:
curRecordType = RECORD_GOP;
while (1) {
if (!parseToNextCode(nextCode)) return False;
if (nextCode == PICTURE_START_CODE || nextCode == VOP_START_CODE) break;
fParseBufferParseEnd += 4; // skip over the code that we just saw
}
break;
default: // picture
curRecordType = RECORD_PIC_NON_IFRAME; // may get changed to IFRAME later
while (1) {
if (!parseToNextCode(nextCode)) return False;
if (nextCode == VIDEO_SEQUENCE_START_CODE ||
nextCode == VISUAL_OBJECT_SEQUENCE_START_CODE ||
nextCode == GROUP_START_CODE || nextCode == GROUP_VOP_START_CODE ||
nextCode == PICTURE_START_CODE || nextCode == VOP_START_CODE) break;
fParseBufferParseEnd += 4; // skip over the code that we just saw
}
break;
}
}
if (curRecordType == RECORD_PIC_NON_IFRAME) {
if (curCode == VOP_START_CODE) { // MPEG-4
if ((fParseBuffer[fParseBufferFrameStart+4]&0xC0) == 0) {
// This is actually an I-frame. Note it as such:
curRecordType = RECORD_PIC_IFRAME;
}
} else { // MPEG-1 or 2
if ((fParseBuffer[fParseBufferFrameStart+5]&0x38) == 0x08) {
// This is actually an I-frame. Note it as such:
curRecordType = RECORD_PIC_IFRAME;
}
}
}
// There is now a parsed 'frame', from "fParseBufferFrameStart"
// to "fParseBufferParseEnd". Tag the corresponding index records to note this:
unsigned frameSize = fParseBufferParseEnd - fParseBufferFrameStart + numInitialBadBytes;
#ifdef DEBUG
envir() << "parsed " << recordTypeStr[curRecordType] << "; length "
<< frameSize << "\n";
#endif
for (IndexRecord* r = fHeadIndexRecord; ; r = r->next()) {
if (numInitialBadBytes >= r->size()) {
r->recordType() = RECORD_JUNK;
numInitialBadBytes -= r->size();
} else {
r->recordType() = curRecordType;
}
if (r == fHeadIndexRecord) r->setFirstFlag();
// indicates that this is the first record for this frame
if (r->size() > frameSize) {
// This record contains extra data that's not part of the frame.
// Shorten this record, and move the extra data to a new record
// that comes afterwards:
u_int8_t newOffset = r->startOffset() + frameSize;
u_int8_t newSize = r->size() - frameSize;
r->size() = frameSize;
#ifdef DEBUG
envir() << "tagged record (modified): " << *r << "\n";
#endif
IndexRecord* newRecord
= new IndexRecord(newOffset, newSize, r->transportPacketNumber(), r->pcr());
newRecord->addAfter(r);
if (fTailIndexRecord == r) fTailIndexRecord = newRecord;
#ifdef DEBUG
envir() << "added extra record: " << *newRecord << "\n";
#endif
} else {
#ifdef DEBUG
envir() << "tagged record: " << *r << "\n";
#endif
}
frameSize -= r->size();
if (frameSize == 0) break;
if (r == fTailIndexRecord) { // this shouldn't happen
envir() << "!!!!!Internal consistency error!!!!!\n";
return False;
}
}
// Finally, update our parse state (to skip over the now-parsed data):
fParseBufferFrameStart = fParseBufferParseEnd;
fParseBufferParseEnd += 4; // to skip over the next code (that we found)
return True;
}
Boolean MPEG2IFrameIndexFromTransportStream
::parseToNextCode(unsigned char& nextCode) {
unsigned char const* p = &fParseBuffer[fParseBufferParseEnd];
unsigned char const* end = &fParseBuffer[fParseBufferDataEnd];
while (p <= end-4) {
if (p[2] > 1) p += 3; // common case (optimized)
else if (p[2] == 0) ++p;
else if (p[0] == 0 && p[1] == 0) { // && p[2] == 1
// We found a code here:
nextCode = p[3];
fParseBufferParseEnd = p - &fParseBuffer[0]; // where we've gotten to
return True;
} else p += 3;
}
fParseBufferParseEnd = p - &fParseBuffer[0]; // where we've gotten to
return False; // no luck this time
}
void MPEG2IFrameIndexFromTransportStream::compactParseBuffer() {
#ifdef DEBUG
envir() << "Compacting parse buffer: [" << fParseBufferFrameStart
<< "," << fParseBufferParseEnd << "," << fParseBufferDataEnd << "]";
#endif
memmove(&fParseBuffer[0], &fParseBuffer[fParseBufferFrameStart],
fParseBufferDataEnd - fParseBufferFrameStart);
fParseBufferDataEnd -= fParseBufferFrameStart;
fParseBufferParseEnd -= fParseBufferFrameStart;
fParseBufferFrameStart = 0;
#ifdef DEBUG
envir() << "-> [" << fParseBufferFrameStart
<< "," << fParseBufferParseEnd << "," << fParseBufferDataEnd << "]\n";
#endif
}
void MPEG2IFrameIndexFromTransportStream::addToTail(IndexRecord* newIndexRecord) {
#ifdef DEBUG
envir() << "adding new: " << *newIndexRecord << "\n";
#endif
if (fTailIndexRecord == NULL) {
fHeadIndexRecord = fTailIndexRecord = newIndexRecord;
} else {
newIndexRecord->addAfter(fTailIndexRecord);
fTailIndexRecord = newIndexRecord;
}
}
////////// IndexRecord implementation //////////
IndexRecord::IndexRecord(u_int8_t startOffset, u_int8_t size,
unsigned long transportPacketNumber, float pcr)
: fNext(this), fPrev(this), fRecordType(RECORD_UNPARSED),
fStartOffset(startOffset), fSize(size),
fPCR(pcr), fTransportPacketNumber(transportPacketNumber) {
}
IndexRecord::~IndexRecord() {
IndexRecord* nextRecord = next();
unlink();
if (nextRecord != this) delete nextRecord;
}
void IndexRecord::addAfter(IndexRecord* prev) {
fNext = prev->fNext;
fPrev = prev;
prev->fNext->fPrev = this;
prev->fNext = this;
}
void IndexRecord::unlink() {
fNext->fPrev = fPrev;
fPrev->fNext = fNext;
fNext = fPrev = this;
}
live/liveMedia/OnDemandServerMediaSubsession.cpp 000444 001751 000000 00000050211 12265042432 022233 0 ustar 00rsf wheel 000000 000000 /**********
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the
Free Software Foundation; either version 2.1 of the License, or (at your
option) any later version. (See .)
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
more details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
**********/
// "liveMedia"
// Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved.
// A 'ServerMediaSubsession' object that creates new, unicast, "RTPSink"s
// on demand.
// Implementation
#include "OnDemandServerMediaSubsession.hh"
#include
OnDemandServerMediaSubsession
::OnDemandServerMediaSubsession(UsageEnvironment& env,
Boolean reuseFirstSource,
portNumBits initialPortNum)
: ServerMediaSubsession(env),
fSDPLines(NULL), fReuseFirstSource(reuseFirstSource), fInitialPortNum(initialPortNum), fLastStreamToken(NULL) {
fDestinationsHashTable = HashTable::create(ONE_WORD_HASH_KEYS);
gethostname(fCNAME, sizeof fCNAME);
fCNAME[sizeof fCNAME-1] = '\0'; // just in case
}
OnDemandServerMediaSubsession::~OnDemandServerMediaSubsession() {
delete[] fSDPLines;
// Clean out the destinations hash table:
while (1) {
Destinations* destinations
= (Destinations*)(fDestinationsHashTable->RemoveNext());
if (destinations == NULL) break;
delete destinations;
}
delete fDestinationsHashTable;
}
char const*
OnDemandServerMediaSubsession::sdpLines() {
if (fSDPLines == NULL) {
// We need to construct a set of SDP lines that describe this
// subsession (as a unicast stream). To do so, we first create
// dummy (unused) source and "RTPSink" objects,
// whose parameters we use for the SDP lines:
unsigned estBitrate;
FramedSource* inputSource = createNewStreamSource(0, estBitrate);
if (inputSource == NULL) return NULL; // file not found
struct in_addr dummyAddr;
dummyAddr.s_addr = 0;
Groupsock dummyGroupsock(envir(), dummyAddr, 0, 0);
unsigned char rtpPayloadType = 96 + trackNumber()-1; // if dynamic
RTPSink* dummyRTPSink
= createNewRTPSink(&dummyGroupsock, rtpPayloadType, inputSource);
setSDPLinesFromRTPSink(dummyRTPSink, inputSource, estBitrate);
Medium::close(dummyRTPSink);
closeStreamSource(inputSource);
}
return fSDPLines;
}
void OnDemandServerMediaSubsession
::getStreamParameters(unsigned clientSessionId,
netAddressBits clientAddress,
Port const& clientRTPPort,
Port const& clientRTCPPort,
int tcpSocketNum,
unsigned char rtpChannelId,
unsigned char rtcpChannelId,
netAddressBits& destinationAddress,
u_int8_t& /*destinationTTL*/,
Boolean& isMulticast,
Port& serverRTPPort,
Port& serverRTCPPort,
void*& streamToken) {
if (destinationAddress == 0) destinationAddress = clientAddress;
struct in_addr destinationAddr; destinationAddr.s_addr = destinationAddress;
isMulticast = False;
if (fLastStreamToken != NULL && fReuseFirstSource) {
// Special case: Rather than creating a new 'StreamState',
// we reuse the one that we've already created:
serverRTPPort = ((StreamState*)fLastStreamToken)->serverRTPPort();
serverRTCPPort = ((StreamState*)fLastStreamToken)->serverRTCPPort();
++((StreamState*)fLastStreamToken)->referenceCount();
streamToken = fLastStreamToken;
} else {
// Normal case: Create a new media source:
unsigned streamBitrate;
FramedSource* mediaSource
= createNewStreamSource(clientSessionId, streamBitrate);
// Create 'groupsock' and 'sink' objects for the destination,
// using previously unused server port numbers:
RTPSink* rtpSink;
BasicUDPSink* udpSink;
Groupsock* rtpGroupsock;
Groupsock* rtcpGroupsock;
portNumBits serverPortNum;
if (clientRTCPPort.num() == 0) {
// We're streaming raw UDP (not RTP). Create a single groupsock:
NoReuse dummy(envir()); // ensures that we skip over ports that are already in use
for (serverPortNum = fInitialPortNum; ; ++serverPortNum) {
struct in_addr dummyAddr; dummyAddr.s_addr = 0;
serverRTPPort = serverPortNum;
rtpGroupsock = new Groupsock(envir(), dummyAddr, serverRTPPort, 255);
if (rtpGroupsock->socketNum() >= 0) break; // success
}
rtcpGroupsock = NULL;
rtpSink = NULL;
udpSink = BasicUDPSink::createNew(envir(), rtpGroupsock);
} else {
// Normal case: We're streaming RTP (over UDP or TCP). Create a pair of
// groupsocks (RTP and RTCP), with adjacent port numbers (RTP port number even):
NoReuse dummy(envir()); // ensures that we skip over ports that are already in use
for (portNumBits serverPortNum = fInitialPortNum; ; serverPortNum += 2) {
struct in_addr dummyAddr; dummyAddr.s_addr = 0;
serverRTPPort = serverPortNum;
rtpGroupsock = new Groupsock(envir(), dummyAddr, serverRTPPort, 255);
if (rtpGroupsock->socketNum() < 0) {
delete rtpGroupsock;
continue; // try again
}
serverRTCPPort = serverPortNum+1;
rtcpGroupsock = new Groupsock(envir(), dummyAddr, serverRTCPPort, 255);
if (rtcpGroupsock->socketNum() < 0) {
delete rtpGroupsock;
delete rtcpGroupsock;
continue; // try again
}
break; // success
}
unsigned char rtpPayloadType = 96 + trackNumber()-1; // if dynamic
rtpSink = createNewRTPSink(rtpGroupsock, rtpPayloadType, mediaSource);
udpSink = NULL;
}
// Turn off the destinations for each groupsock. They'll get set later
// (unless TCP is used instead):
if (rtpGroupsock != NULL) rtpGroupsock->removeAllDestinations();
if (rtcpGroupsock != NULL) rtcpGroupsock->removeAllDestinations();
if (rtpGroupsock != NULL) {
// Try to use a big send buffer for RTP - at least 0.1 second of
// specified bandwidth and at least 50 KB
unsigned rtpBufSize = streamBitrate * 25 / 2; // 1 kbps * 0.1 s = 12.5 bytes
if (rtpBufSize < 50 * 1024) rtpBufSize = 50 * 1024;
increaseSendBufferTo(envir(), rtpGroupsock->socketNum(), rtpBufSize);
}
// Set up the state of the stream. The stream will get started later:
streamToken = fLastStreamToken
= new StreamState(*this, serverRTPPort, serverRTCPPort, rtpSink, udpSink,
streamBitrate, mediaSource,
rtpGroupsock, rtcpGroupsock);
}
// Record these destinations as being for this client session id:
Destinations* destinations;
if (tcpSocketNum < 0) { // UDP
destinations = new Destinations(destinationAddr, clientRTPPort, clientRTCPPort);
} else { // TCP
destinations = new Destinations(tcpSocketNum, rtpChannelId, rtcpChannelId);
}
fDestinationsHashTable->Add((char const*)clientSessionId, destinations);
}
void OnDemandServerMediaSubsession::startStream(unsigned clientSessionId,
void* streamToken,
TaskFunc* rtcpRRHandler,
void* rtcpRRHandlerClientData,
unsigned short& rtpSeqNum,
unsigned& rtpTimestamp,
ServerRequestAlternativeByteHandler* serverRequestAlternativeByteHandler,
void* serverRequestAlternativeByteHandlerClientData) {
StreamState* streamState = (StreamState*)streamToken;
Destinations* destinations
= (Destinations*)(fDestinationsHashTable->Lookup((char const*)clientSessionId));
if (streamState != NULL) {
streamState->startPlaying(destinations,
rtcpRRHandler, rtcpRRHandlerClientData,
serverRequestAlternativeByteHandler, serverRequestAlternativeByteHandlerClientData);
RTPSink* rtpSink = streamState->rtpSink(); // alias
if (rtpSink != NULL) {
rtpSeqNum = rtpSink->currentSeqNo();
rtpTimestamp = rtpSink->presetNextTimestamp();
}
}
}
void OnDemandServerMediaSubsession::pauseStream(unsigned /*clientSessionId*/,
void* streamToken) {
// Pausing isn't allowed if multiple clients are receiving data from
// the same source:
if (fReuseFirstSource) return;
StreamState* streamState = (StreamState*)streamToken;
if (streamState != NULL) streamState->pause();
}
void OnDemandServerMediaSubsession::seekStream(unsigned /*clientSessionId*/,
void* streamToken, double& seekNPT, double streamDuration, u_int64_t& numBytes) {
numBytes = 0; // by default: unknown
// Seeking isn't allowed if multiple clients are receiving data from the same source:
if (fReuseFirstSource) return;
StreamState* streamState = (StreamState*)streamToken;
if (streamState != NULL && streamState->mediaSource() != NULL) {
seekStreamSource(streamState->mediaSource(), seekNPT, streamDuration, numBytes);
streamState->startNPT() = (float)seekNPT;
RTPSink* rtpSink = streamState->rtpSink(); // alias
if (rtpSink != NULL) rtpSink->resetPresentationTimes();
}
}
void OnDemandServerMediaSubsession::seekStream(unsigned /*clientSessionId*/,
void* streamToken, char*& absStart, char*& absEnd) {
// Seeking isn't allowed if multiple clients are receiving data from the same source:
if (fReuseFirstSource) return;
StreamState* streamState = (StreamState*)streamToken;
if (streamState != NULL && streamState->mediaSource() != NULL) {
seekStreamSource(streamState->mediaSource(), absStart, absEnd);
}
}
void OnDemandServerMediaSubsession::nullSeekStream(unsigned /*clientSessionId*/, void* streamToken) {
StreamState* streamState = (StreamState*)streamToken;
if (streamState != NULL && streamState->mediaSource() != NULL) {
// Because we're not seeking here, get the current NPT, and remember it as the new 'start' NPT:
streamState->startNPT() = getCurrentNPT(streamToken);
RTPSink* rtpSink = streamState->rtpSink(); // alias
if (rtpSink != NULL) rtpSink->resetPresentationTimes();
}
}
void OnDemandServerMediaSubsession::setStreamScale(unsigned /*clientSessionId*/,
void* streamToken, float scale) {
// Changing the scale factor isn't allowed if multiple clients are receiving data
// from the same source:
if (fReuseFirstSource) return;
StreamState* streamState = (StreamState*)streamToken;
if (streamState != NULL && streamState->mediaSource() != NULL) {
setStreamSourceScale(streamState->mediaSource(), scale);
}
}
float OnDemandServerMediaSubsession::getCurrentNPT(void* streamToken) {
do {
if (streamToken == NULL) break;
StreamState* streamState = (StreamState*)streamToken;
RTPSink* rtpSink = streamState->rtpSink();
if (rtpSink == NULL) break;
return streamState->startNPT()
+ (rtpSink->mostRecentPresentationTime().tv_sec - rtpSink->initialPresentationTime().tv_sec)
+ (rtpSink->mostRecentPresentationTime().tv_sec - rtpSink->initialPresentationTime().tv_sec)/1000000.0f;
} while (0);
return 0.0;
}
FramedSource* OnDemandServerMediaSubsession::getStreamSource(void* streamToken) {
if (streamToken == NULL) return NULL;
StreamState* streamState = (StreamState*)streamToken;
return streamState->mediaSource();
}
void OnDemandServerMediaSubsession::deleteStream(unsigned clientSessionId,
void*& streamToken) {
StreamState* streamState = (StreamState*)streamToken;
// Look up (and remove) the destinations for this client session:
Destinations* destinations
= (Destinations*)(fDestinationsHashTable->Lookup((char const*)clientSessionId));
if (destinations != NULL) {
fDestinationsHashTable->Remove((char const*)clientSessionId);
// Stop streaming to these destinations:
if (streamState != NULL) streamState->endPlaying(destinations);
}
// Delete the "StreamState" structure if it's no longer being used:
if (streamState != NULL) {
if (streamState->referenceCount() > 0) --streamState->referenceCount();
if (streamState->referenceCount() == 0) {
delete streamState;
streamToken = NULL;
}
}
// Finally, delete the destinations themselves:
delete destinations;
}
char const* OnDemandServerMediaSubsession
::getAuxSDPLine(RTPSink* rtpSink, FramedSource* /*inputSource*/) {
// Default implementation:
return rtpSink == NULL ? NULL : rtpSink->auxSDPLine();
}
void OnDemandServerMediaSubsession::seekStreamSource(FramedSource* /*inputSource*/,
double& /*seekNPT*/, double /*streamDuration*/, u_int64_t& numBytes) {
// Default implementation: Do nothing
}
void OnDemandServerMediaSubsession::seekStreamSource(FramedSource* /*inputSource*/,
char*& absStart, char*& absEnd) {
// Default implementation: do nothing (but delete[] and assign "absStart" and "absEnd" to NULL, to show that we don't handle this)
delete[] absStart; absStart = NULL;
delete[] absEnd; absEnd = NULL;
}
void OnDemandServerMediaSubsession
::setStreamSourceScale(FramedSource* /*inputSource*/, float /*scale*/) {
// Default implementation: Do nothing
}
void OnDemandServerMediaSubsession::closeStreamSource(FramedSource *inputSource) {
Medium::close(inputSource);
}
void OnDemandServerMediaSubsession
::setSDPLinesFromRTPSink(RTPSink* rtpSink, FramedSource* inputSource, unsigned estBitrate) {
if (rtpSink == NULL) return;
char const* mediaType = rtpSink->sdpMediaType();
unsigned char rtpPayloadType = rtpSink->rtpPayloadType();
AddressString ipAddressStr(fServerAddressForSDP);
char* rtpmapLine = rtpSink->rtpmapLine();
char const* rangeLine = rangeSDPLine();
char const* auxSDPLine = getAuxSDPLine(rtpSink, inputSource);
if (auxSDPLine == NULL) auxSDPLine = "";
char const* const sdpFmt =
"m=%s %u RTP/AVP %d\r\n"
"c=IN IP4 %s\r\n"
"b=AS:%u\r\n"
"%s"
"%s"
"%s"
"a=control:%s\r\n";
unsigned sdpFmtSize = strlen(sdpFmt)
+ strlen(mediaType) + 5 /* max short len */ + 3 /* max char len */
+ strlen(ipAddressStr.val())
+ 20 /* max int len */
+ strlen(rtpmapLine)
+ strlen(rangeLine)
+ strlen(auxSDPLine)
+ strlen(trackId());
char* sdpLines = new char[sdpFmtSize];
sprintf(sdpLines, sdpFmt,
mediaType, // m=
fPortNumForSDP, // m=
rtpPayloadType, // m=
ipAddressStr.val(), // c= address
estBitrate, // b=AS:
rtpmapLine, // a=rtpmap:... (if present)
rangeLine, // a=range:... (if present)
auxSDPLine, // optional extra SDP line
trackId()); // a=control:
delete[] (char*)rangeLine; delete[] rtpmapLine;
fSDPLines = strDup(sdpLines);
delete[] sdpLines;
}
////////// StreamState implementation //////////
static void afterPlayingStreamState(void* clientData) {
StreamState* streamState = (StreamState*)clientData;
if (streamState->streamDuration() == 0.0) {
// When the input stream ends, tear it down. This will cause a RTCP "BYE"
// to be sent to each client, teling it that the stream has ended.
// (Because the stream didn't have a known duration, there was no other
// way for clients to know when the stream ended.)
streamState->reclaim();
}
// Otherwise, keep the stream alive, in case a client wants to
// subsequently re-play the stream starting from somewhere other than the end.
// (This can be done only on streams that have a known duration.)
}
StreamState::StreamState(OnDemandServerMediaSubsession& master,
Port const& serverRTPPort, Port const& serverRTCPPort,
RTPSink* rtpSink, BasicUDPSink* udpSink,
unsigned totalBW, FramedSource* mediaSource,
Groupsock* rtpGS, Groupsock* rtcpGS)
: fMaster(master), fAreCurrentlyPlaying(False), fReferenceCount(1),
fServerRTPPort(serverRTPPort), fServerRTCPPort(serverRTCPPort),
fRTPSink(rtpSink), fUDPSink(udpSink), fStreamDuration(master.duration()),
fTotalBW(totalBW), fRTCPInstance(NULL) /* created later */,
fMediaSource(mediaSource), fStartNPT(0.0), fRTPgs(rtpGS), fRTCPgs(rtcpGS) {
}
StreamState::~StreamState() {
reclaim();
}
void StreamState
::startPlaying(Destinations* dests,
TaskFunc* rtcpRRHandler, void* rtcpRRHandlerClientData,
ServerRequestAlternativeByteHandler* serverRequestAlternativeByteHandler,
void* serverRequestAlternativeByteHandlerClientData) {
if (dests == NULL) return;
if (fRTCPInstance == NULL && fRTPSink != NULL) {
// Create (and start) a 'RTCP instance' for this RTP sink:
fRTCPInstance
= RTCPInstance::createNew(fRTPSink->envir(), fRTCPgs,
fTotalBW, (unsigned char*)fMaster.fCNAME,
fRTPSink, NULL /* we're a server */);
// Note: This starts RTCP running automatically
}
if (dests->isTCP) {
// Change RTP and RTCP to use the TCP socket instead of UDP:
if (fRTPSink != NULL) {
fRTPSink->addStreamSocket(dests->tcpSocketNum, dests->rtpChannelId);
RTPInterface
::setServerRequestAlternativeByteHandler(fRTPSink->envir(), dests->tcpSocketNum,
serverRequestAlternativeByteHandler, serverRequestAlternativeByteHandlerClientData);
// So that we continue to handle RTSP commands from the client
}
if (fRTCPInstance != NULL) {
fRTCPInstance->addStreamSocket(dests->tcpSocketNum, dests->rtcpChannelId);
fRTCPInstance->setSpecificRRHandler(dests->tcpSocketNum, dests->rtcpChannelId,
rtcpRRHandler, rtcpRRHandlerClientData);
}
} else {
// Tell the RTP and RTCP 'groupsocks' about this destination
// (in case they don't already have it):
if (fRTPgs != NULL) fRTPgs->addDestination(dests->addr, dests->rtpPort);
if (fRTCPgs != NULL) fRTCPgs->addDestination(dests->addr, dests->rtcpPort);
if (fRTCPInstance != NULL) {
fRTCPInstance->setSpecificRRHandler(dests->addr.s_addr, dests->rtcpPort,
rtcpRRHandler, rtcpRRHandlerClientData);
}
}
if (fRTCPInstance != NULL) {
// Hack: Send an initial RTCP "SR" packet, before the initial RTP packet, so that receivers will (likely) be able to
// get RTCP-synchronized presentation times immediately:
fRTCPInstance->sendReport();
}
if (!fAreCurrentlyPlaying && fMediaSource != NULL) {
if (fRTPSink != NULL) {
fRTPSink->startPlaying(*fMediaSource, afterPlayingStreamState, this);
fAreCurrentlyPlaying = True;
} else if (fUDPSink != NULL) {
fUDPSink->startPlaying(*fMediaSource, afterPlayingStreamState, this);
fAreCurrentlyPlaying = True;
}
}
}
void StreamState::pause() {
if (fRTPSink != NULL) fRTPSink->stopPlaying();
if (fUDPSink != NULL) fUDPSink->stopPlaying();
fAreCurrentlyPlaying = False;
}
void StreamState::endPlaying(Destinations* dests) {
#if 0
// The following code is temporarily disabled, because it erroneously sends RTCP "BYE"s to all clients if multiple
// clients are streaming from the same data source (i.e., if "reuseFirstSource" is True), and we don't want that to happen
// if we're being called as a result of a single one of these clients having sent a "TEARDOWN" (rather than the whole stream
// having been closed, for all clients).
// This will be fixed for real later.
if (fRTCPInstance != NULL) {
// Hack: Explicitly send a RTCP "BYE" packet now, because the code below will prevent that from happening later,
// when "fRTCPInstance" gets deleted:
fRTCPInstance->sendBYE();
}
#endif
if (dests->isTCP) {
if (fRTPSink != NULL) {
fRTPSink->removeStreamSocket(dests->tcpSocketNum, dests->rtpChannelId);
}
if (fRTCPInstance != NULL) {
fRTCPInstance->removeStreamSocket(dests->tcpSocketNum, dests->rtcpChannelId);
fRTCPInstance->unsetSpecificRRHandler(dests->tcpSocketNum, dests->rtcpChannelId);
}
} else {
// Tell the RTP and RTCP 'groupsocks' to stop using these destinations:
if (fRTPgs != NULL) fRTPgs->removeDestination(dests->addr, dests->rtpPort);
if (fRTCPgs != NULL) fRTCPgs->removeDestination(dests->addr, dests->rtcpPort);
if (fRTCPInstance != NULL) {
fRTCPInstance->unsetSpecificRRHandler(dests->addr.s_addr, dests->rtcpPort);
}
}
}
void StreamState::reclaim() {
// Delete allocated media objects
Medium::close(fRTCPInstance) /* will send a RTCP BYE */; fRTCPInstance = NULL;
Medium::close(fRTPSink); fRTPSink = NULL;
Medium::close(fUDPSink); fUDPSink = NULL;
fMaster.closeStreamSource(fMediaSource); fMediaSource = NULL;
if (fMaster.fLastStreamToken == this) fMaster.fLastStreamToken = NULL;
delete fRTPgs; fRTPgs = NULL;
delete fRTCPgs; fRTCPgs = NULL;
}
live/liveMedia/AC3AudioFileServerMediaSubsession.cpp 000444 001751 000000 00000004566 12265042432 022712 0 ustar 00rsf wheel 000000 000000 /**********
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the
Free Software Foundation; either version 2.1 of the License, or (at your
option) any later version. (See .)
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
more details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
**********/
// "liveMedia"
// Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved.
// A 'ServerMediaSubsession' object that creates new, unicast, "RTPSink"s
// on demand, from an AC3 audio file.
// Implementation
#include "AC3AudioFileServerMediaSubsession.hh"
#include "ByteStreamFileSource.hh"
#include "AC3AudioStreamFramer.hh"
#include "AC3AudioRTPSink.hh"
AC3AudioFileServerMediaSubsession*
AC3AudioFileServerMediaSubsession::createNew(UsageEnvironment& env,
char const* fileName,
Boolean reuseFirstSource) {
return new AC3AudioFileServerMediaSubsession(env, fileName, reuseFirstSource);
}
AC3AudioFileServerMediaSubsession
::AC3AudioFileServerMediaSubsession(UsageEnvironment& env,
char const* fileName, Boolean reuseFirstSource)
: FileServerMediaSubsession(env, fileName, reuseFirstSource) {
}
AC3AudioFileServerMediaSubsession::~AC3AudioFileServerMediaSubsession() {
}
FramedSource* AC3AudioFileServerMediaSubsession
::createNewStreamSource(unsigned /*clientSessionId*/, unsigned& estBitrate) {
estBitrate = 48; // kbps, estimate
ByteStreamFileSource* fileSource = ByteStreamFileSource::createNew(envir(), fFileName);
if (fileSource == NULL) return NULL;
return AC3AudioStreamFramer::createNew(envir(), fileSource);
}
RTPSink* AC3AudioFileServerMediaSubsession
::createNewRTPSink(Groupsock* rtpGroupsock,
unsigned char rtpPayloadTypeIfDynamic,
FramedSource* inputSource) {
AC3AudioStreamFramer* audioSource = (AC3AudioStreamFramer*)inputSource;
return AC3AudioRTPSink::createNew(envir(), rtpGroupsock,
rtpPayloadTypeIfDynamic,
audioSource->samplingRate());
}
live/liveMedia/RTSPClient.cpp 000444 001751 000000 00000230164 12265042432 016277 0 ustar 00rsf wheel 000000 000000 /**********
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the
Free Software Foundation; either version 2.1 of the License, or (at your
option) any later version. (See .)
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
more details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
**********/
// "liveMedia"
// Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved.
// A generic RTSP client
// Implementation
#include "RTSPClient.hh"
#include "RTSPCommon.hh"
#include "Base64.hh"
#include "Locale.hh"
#include
#include "ourMD5.hh"
////////// RTSPClient implementation //////////
RTSPClient* RTSPClient::createNew(UsageEnvironment& env, char const* rtspURL,
int verbosityLevel,
char const* applicationName,
portNumBits tunnelOverHTTPPortNum,
int socketNumToServer) {
return new RTSPClient(env, rtspURL,
verbosityLevel, applicationName, tunnelOverHTTPPortNum, socketNumToServer);
}
unsigned RTSPClient::sendDescribeCommand(responseHandler* responseHandler, Authenticator* authenticator) {
if (authenticator != NULL) fCurrentAuthenticator = *authenticator;
return sendRequest(new RequestRecord(++fCSeq, "DESCRIBE", responseHandler));
}
unsigned RTSPClient::sendOptionsCommand(responseHandler* responseHandler, Authenticator* authenticator) {
if (authenticator != NULL) fCurrentAuthenticator = *authenticator;
return sendRequest(new RequestRecord(++fCSeq, "OPTIONS", responseHandler));
}
unsigned RTSPClient::sendAnnounceCommand(char const* sdpDescription, responseHandler* responseHandler, Authenticator* authenticator) {
if (authenticator != NULL) fCurrentAuthenticator = *authenticator;
return sendRequest(new RequestRecord(++fCSeq, "ANNOUNCE", responseHandler, NULL, NULL, False, 0.0, 0.0, 0.0, sdpDescription));
}
unsigned RTSPClient::sendSetupCommand(MediaSubsession& subsession, responseHandler* responseHandler,
Boolean streamOutgoing, Boolean streamUsingTCP, Boolean forceMulticastOnUnspecified,
Authenticator* authenticator) {
if (fTunnelOverHTTPPortNum != 0) streamUsingTCP = True; // RTSP-over-HTTP tunneling uses TCP (by definition)
if (authenticator != NULL) fCurrentAuthenticator = *authenticator;
u_int32_t booleanFlags = 0;
if (streamUsingTCP) booleanFlags |= 0x1;
if (streamOutgoing) booleanFlags |= 0x2;
if (forceMulticastOnUnspecified) booleanFlags |= 0x4;
return sendRequest(new RequestRecord(++fCSeq, "SETUP", responseHandler, NULL, &subsession, booleanFlags));
}
unsigned RTSPClient::sendPlayCommand(MediaSession& session, responseHandler* responseHandler,
double start, double end, float scale,
Authenticator* authenticator) {
if (authenticator != NULL) fCurrentAuthenticator = *authenticator;
sendDummyUDPPackets(session); // hack to improve NAT traversal
return sendRequest(new RequestRecord(++fCSeq, "PLAY", responseHandler, &session, NULL, 0, start, end, scale));
}
unsigned RTSPClient::sendPlayCommand(MediaSubsession& subsession, responseHandler* responseHandler,
double start, double end, float scale,
Authenticator* authenticator) {
if (authenticator != NULL) fCurrentAuthenticator = *authenticator;
sendDummyUDPPackets(subsession); // hack to improve NAT traversal
return sendRequest(new RequestRecord(++fCSeq, "PLAY", responseHandler, NULL, &subsession, 0, start, end, scale));
}
unsigned RTSPClient::sendPlayCommand(MediaSession& session, responseHandler* responseHandler,
char const* absStartTime, char const* absEndTime, float scale,
Authenticator* authenticator) {
if (authenticator != NULL) fCurrentAuthenticator = *authenticator;
sendDummyUDPPackets(session); // hack to improve NAT traversal
return sendRequest(new RequestRecord(++fCSeq, responseHandler, absStartTime, absEndTime, scale, &session, NULL));
}
unsigned RTSPClient::sendPlayCommand(MediaSubsession& subsession, responseHandler* responseHandler,
char const* absStartTime, char const* absEndTime, float scale,
Authenticator* authenticator) {
if (authenticator != NULL) fCurrentAuthenticator = *authenticator;
sendDummyUDPPackets(subsession); // hack to improve NAT traversal
return sendRequest(new RequestRecord(++fCSeq, responseHandler, absStartTime, absEndTime, scale, NULL, &subsession));
}
unsigned RTSPClient::sendPauseCommand(MediaSession& session, responseHandler* responseHandler, Authenticator* authenticator) {
if (authenticator != NULL) fCurrentAuthenticator = *authenticator;
return sendRequest(new RequestRecord(++fCSeq, "PAUSE", responseHandler, &session));
}
unsigned RTSPClient::sendPauseCommand(MediaSubsession& subsession, responseHandler* responseHandler, Authenticator* authenticator) {
if (authenticator != NULL) fCurrentAuthenticator = *authenticator;
return sendRequest(new RequestRecord(++fCSeq, "PAUSE", responseHandler, NULL, &subsession));
}
unsigned RTSPClient::sendRecordCommand(MediaSession& session, responseHandler* responseHandler, Authenticator* authenticator) {
if (authenticator != NULL) fCurrentAuthenticator = *authenticator;
return sendRequest(new RequestRecord(++fCSeq, "RECORD", responseHandler, &session));
}
unsigned RTSPClient::sendRecordCommand(MediaSubsession& subsession, responseHandler* responseHandler, Authenticator* authenticator) {
if (authenticator != NULL) fCurrentAuthenticator = *authenticator;
return sendRequest(new RequestRecord(++fCSeq, "RECORD", responseHandler, NULL, &subsession));
}
unsigned RTSPClient::sendTeardownCommand(MediaSession& session, responseHandler* responseHandler, Authenticator* authenticator) {
if (authenticator != NULL) fCurrentAuthenticator = *authenticator;
return sendRequest(new RequestRecord(++fCSeq, "TEARDOWN", responseHandler, &session));
}
unsigned RTSPClient::sendTeardownCommand(MediaSubsession& subsession, responseHandler* responseHandler, Authenticator* authenticator) {
if (authenticator != NULL) fCurrentAuthenticator = *authenticator;
return sendRequest(new RequestRecord(++fCSeq, "TEARDOWN", responseHandler, NULL, &subsession));
}
unsigned RTSPClient::sendSetParameterCommand(MediaSession& session, responseHandler* responseHandler,
char const* parameterName, char const* parameterValue,
Authenticator* authenticator) {
if (authenticator != NULL) fCurrentAuthenticator = *authenticator;
char* paramString = new char[strlen(parameterName) + strlen(parameterValue) + 10];
sprintf(paramString, "%s: %s\r\n", parameterName, parameterValue);
unsigned result = sendRequest(new RequestRecord(++fCSeq, "SET_PARAMETER", responseHandler, &session, NULL, False, 0.0, 0.0, 0.0, paramString));
delete[] paramString;
return result;
}
unsigned RTSPClient::sendGetParameterCommand(MediaSession& session, responseHandler* responseHandler, char const* parameterName,
Authenticator* authenticator) {
if (authenticator != NULL) fCurrentAuthenticator = *authenticator;
// We assume that:
// parameterName is NULL means: Send no body in the request.
// parameterName is "" means: Send only \r\n in the request body.
// parameterName is non-empty means: Send "\r\n" as the request body.
unsigned parameterNameLen = parameterName == NULL ? 0 : strlen(parameterName);
char* paramString = new char[parameterNameLen + 3]; // the 3 is for \r\n + the '\0' byte
if (parameterName == NULL) {
paramString[0] = '\0';
} else {
sprintf(paramString, "%s\r\n", parameterName);
}
unsigned result = sendRequest(new RequestRecord(++fCSeq, "GET_PARAMETER", responseHandler, &session, NULL, False, 0.0, 0.0, 0.0, paramString));
delete[] paramString;
return result;
}
void RTSPClient::sendDummyUDPPackets(MediaSession& session, unsigned numDummyPackets) {
MediaSubsessionIterator iter(session);
MediaSubsession* subsession;
while ((subsession = iter.next()) != NULL) {
sendDummyUDPPackets(*subsession, numDummyPackets);
}
}
void RTSPClient::sendDummyUDPPackets(MediaSubsession& subsession, unsigned numDummyPackets) {
// Hack: To increase the likelihood of UDP packets from the server reaching us,
// if we're behind a NAT, send a few 'dummy' UDP packets to the server now.
// (We do this on both our RTP port and our RTCP port.)
Groupsock* gs1 = NULL; Groupsock* gs2 = NULL;
if (subsession.rtpSource() != NULL) gs1 = subsession.rtpSource()->RTPgs();
if (subsession.rtcpInstance() != NULL) gs2 = subsession.rtcpInstance()->RTCPgs();
u_int32_t const dummy = 0xFEEDFACE;
for (unsigned i = 0; i < numDummyPackets; ++i) {
if (gs1 != NULL) gs1->output(envir(), 255, (unsigned char*)&dummy, sizeof dummy);
if (gs2 != NULL) gs2->output(envir(), 255, (unsigned char*)&dummy, sizeof dummy);
}
}
Boolean RTSPClient::changeResponseHandler(unsigned cseq, responseHandler* newResponseHandler) {
// Look for the matching request record in each of our 'pending requests' queues:
RequestRecord* request;
if ((request = fRequestsAwaitingConnection.findByCSeq(cseq)) != NULL
|| (request = fRequestsAwaitingHTTPTunneling.findByCSeq(cseq)) != NULL
|| (request = fRequestsAwaitingResponse.findByCSeq(cseq)) != NULL) {
request->handler() = newResponseHandler;
return True;
}
return False;
}
Boolean RTSPClient::lookupByName(UsageEnvironment& env,
char const* instanceName,
RTSPClient*& resultClient) {
resultClient = NULL; // unless we succeed
Medium* medium;
if (!Medium::lookupByName(env, instanceName, medium)) return False;
if (!medium->isRTSPClient()) {
env.setResultMsg(instanceName, " is not a RTSP client");
return False;
}
resultClient = (RTSPClient*)medium;
return True;
}
Boolean RTSPClient::parseRTSPURL(UsageEnvironment& env, char const* url,
char*& username, char*& password,
NetAddress& address,
portNumBits& portNum,
char const** urlSuffix) {
do {
// Parse the URL as "rtsp://[[:]@][:][/]"
char const* prefix = "rtsp://";
unsigned const prefixLength = 7;
if (_strncasecmp(url, prefix, prefixLength) != 0) {
env.setResultMsg("URL is not of the form \"", prefix, "\"");
break;
}
unsigned const parseBufferSize = 100;
char parseBuffer[parseBufferSize];
char const* from = &url[prefixLength];
// Check whether "[:]@" occurs next.
// We do this by checking whether '@' appears before the end of the URL, or before the first '/'.
username = password = NULL; // default return values
char const* colonPasswordStart = NULL;
char const* p;
for (p = from; *p != '\0' && *p != '/'; ++p) {
if (*p == ':' && colonPasswordStart == NULL) {
colonPasswordStart = p;
} else if (*p == '@') {
// We found (and perhaps ). Copy them into newly-allocated result strings:
if (colonPasswordStart == NULL) colonPasswordStart = p;
char const* usernameStart = from;
unsigned usernameLen = colonPasswordStart - usernameStart;
username = new char[usernameLen + 1] ; // allow for the trailing '\0'
for (unsigned i = 0; i < usernameLen; ++i) username[i] = usernameStart[i];
username[usernameLen] = '\0';
char const* passwordStart = colonPasswordStart;
if (passwordStart < p) ++passwordStart; // skip over the ':'
unsigned passwordLen = p - passwordStart;
password = new char[passwordLen + 1]; // allow for the trailing '\0'
for (unsigned j = 0; j < passwordLen; ++j) password[j] = passwordStart[j];
password[passwordLen] = '\0';
from = p + 1; // skip over the '@'
break;
}
}
// Next, parse
char* to = &parseBuffer[0];
unsigned i;
for (i = 0; i < parseBufferSize; ++i) {
if (*from == '\0' || *from == ':' || *from == '/') {
// We've completed parsing the address
*to = '\0';
break;
}
*to++ = *from++;
}
if (i == parseBufferSize) {
env.setResultMsg("URL is too long");
break;
}
NetAddressList addresses(parseBuffer);
if (addresses.numAddresses() == 0) {
env.setResultMsg("Failed to find network address for \"",
parseBuffer, "\"");
break;
}
address = *(addresses.firstAddress());
portNum = 554; // default value
char nextChar = *from;
if (nextChar == ':') {
int portNumInt;
if (sscanf(++from, "%d", &portNumInt) != 1) {
env.setResultMsg("No port number follows ':'");
break;
}
if (portNumInt < 1 || portNumInt > 65535) {
env.setResultMsg("Bad port number");
break;
}
portNum = (portNumBits)portNumInt;
while (*from >= '0' && *from <= '9') ++from; // skip over port number
}
// The remainder of the URL is the suffix:
if (urlSuffix != NULL) *urlSuffix = from;
return True;
} while (0);
return False;
}
void RTSPClient::setUserAgentString(char const* userAgentName) {
if (userAgentName == NULL) return;
// Change the existing user agent header string:
char const* const formatStr = "User-Agent: %s\r\n";
unsigned const headerSize = strlen(formatStr) + strlen(userAgentName);
delete[] fUserAgentHeaderStr;
fUserAgentHeaderStr = new char[headerSize];
sprintf(fUserAgentHeaderStr, formatStr, userAgentName);
fUserAgentHeaderStrLen = strlen(fUserAgentHeaderStr);
}
unsigned RTSPClient::responseBufferSize = 20000; // default value; you can reassign this in your application if you need to
RTSPClient::RTSPClient(UsageEnvironment& env, char const* rtspURL,
int verbosityLevel, char const* applicationName,
portNumBits tunnelOverHTTPPortNum, int socketNumToServer)
: Medium(env),
fVerbosityLevel(verbosityLevel), fCSeq(1), fServerAddress(0),
fTunnelOverHTTPPortNum(tunnelOverHTTPPortNum), fUserAgentHeaderStr(NULL), fUserAgentHeaderStrLen(0),
fInputSocketNum(-1), fOutputSocketNum(-1), fBaseURL(NULL), fTCPStreamIdCount(0),
fLastSessionId(NULL), fSessionTimeoutParameter(0), fSessionCookieCounter(0), fHTTPTunnelingConnectionIsPending(False) {
setBaseURL(rtspURL);
fResponseBuffer = new char[responseBufferSize+1];
resetResponseBuffer();
if (socketNumToServer >= 0) {
// This socket number is (assumed to be) already connected to the server.
// Use it, and arrange to handle responses to requests sent on it:
fInputSocketNum = fOutputSocketNum = socketNumToServer;
envir().taskScheduler().setBackgroundHandling(fInputSocketNum, SOCKET_READABLE|SOCKET_EXCEPTION,
(TaskScheduler::BackgroundHandlerProc*)&incomingDataHandler, this);
}
// Set the "User-Agent:" header to use in each request:
char const* const libName = "LIVE555 Streaming Media v";
char const* const libVersionStr = LIVEMEDIA_LIBRARY_VERSION_STRING;
char const* libPrefix; char const* libSuffix;
if (applicationName == NULL || applicationName[0] == '\0') {
applicationName = libPrefix = libSuffix = "";
} else {
libPrefix = " (";
libSuffix = ")";
}
unsigned userAgentNameSize
= strlen(applicationName) + strlen(libPrefix) + strlen(libName) + strlen(libVersionStr) + strlen(libSuffix) + 1;
char* userAgentName = new char[userAgentNameSize];
sprintf(userAgentName, "%s%s%s%s%s", applicationName, libPrefix, libName, libVersionStr, libSuffix);
setUserAgentString(userAgentName);
delete[] userAgentName;
}
RTSPClient::~RTSPClient() {
RTPInterface::clearServerRequestAlternativeByteHandler(envir(), fInputSocketNum); // in case we were receiving RTP-over-TCP
reset();
delete[] fResponseBuffer;
delete[] fUserAgentHeaderStr;
}
void RTSPClient::reset() {
resetTCPSockets();
resetResponseBuffer();
fServerAddress = 0;
setBaseURL(NULL);
fCurrentAuthenticator.reset();
delete[] fLastSessionId; fLastSessionId = NULL;
}
void RTSPClient::setBaseURL(char const* url) {
delete[] fBaseURL; fBaseURL = strDup(url);
}
int RTSPClient::grabSocket() {
int inputSocket = fInputSocketNum;
fInputSocketNum = -1;
return inputSocket;
}
unsigned RTSPClient::sendRequest(RequestRecord* request) {
char* cmd = NULL;
do {
Boolean connectionIsPending = False;
if (!fRequestsAwaitingConnection.isEmpty()) {
// A connection is currently pending (with at least one enqueued request). Enqueue this request also:
connectionIsPending = True;
} else if (fInputSocketNum < 0) { // we need to open a connection
int connectResult = openConnection();
if (connectResult < 0) break; // an error occurred
else if (connectResult == 0) {
// A connection is pending
connectionIsPending = True;
} // else the connection succeeded. Continue sending the command.
}
if (connectionIsPending) {
fRequestsAwaitingConnection.enqueue(request);
return request->cseq();
}
// If requested (and we're not already doing it, or have done it), set up the special protocol for tunneling RTSP-over-HTTP:
if (fTunnelOverHTTPPortNum != 0 && strcmp(request->commandName(), "GET") != 0 && fOutputSocketNum == fInputSocketNum) {
if (!setupHTTPTunneling1()) break;
fRequestsAwaitingHTTPTunneling.enqueue(request);
return request->cseq();
}
// Construct and send the command:
// First, construct command-specific headers that we need:
char* cmdURL = fBaseURL; // by default
Boolean cmdURLWasAllocated = False;
char const* protocolStr = "RTSP/1.0"; // by default
char* extraHeaders = (char*)""; // by default
Boolean extraHeadersWereAllocated = False;
char* contentLengthHeader = (char*)""; // by default
Boolean contentLengthHeaderWasAllocated = False;
if (!setRequestFields(request,
cmdURL, cmdURLWasAllocated,
protocolStr,
extraHeaders, extraHeadersWereAllocated)) {
break;
}
char const* contentStr = request->contentStr(); // by default
if (contentStr == NULL) contentStr = "";
unsigned contentStrLen = strlen(contentStr);
if (contentStrLen > 0) {
char const* contentLengthHeaderFmt =
"Content-Length: %d\r\n";
unsigned contentLengthHeaderSize = strlen(contentLengthHeaderFmt)
+ 20 /* max int len */;
contentLengthHeader = new char[contentLengthHeaderSize];
sprintf(contentLengthHeader, contentLengthHeaderFmt, contentStrLen);
contentLengthHeaderWasAllocated = True;
}
char* authenticatorStr = createAuthenticatorString(request->commandName(), fBaseURL);
char const* const cmdFmt =
"%s %s %s\r\n"
"CSeq: %d\r\n"
"%s"
"%s"
"%s"
"%s"
"\r\n"
"%s";
unsigned cmdSize = strlen(cmdFmt)
+ strlen(request->commandName()) + strlen(cmdURL) + strlen(protocolStr)
+ 20 /* max int len */
+ strlen(authenticatorStr)
+ fUserAgentHeaderStrLen
+ strlen(extraHeaders)
+ strlen(contentLengthHeader)
+ contentStrLen;
cmd = new char[cmdSize];
sprintf(cmd, cmdFmt,
request->commandName(), cmdURL, protocolStr,
request->cseq(),
authenticatorStr,
fUserAgentHeaderStr,
extraHeaders,
contentLengthHeader,
contentStr);
delete[] authenticatorStr;
if (cmdURLWasAllocated) delete[] cmdURL;
if (extraHeadersWereAllocated) delete[] extraHeaders;
if (contentLengthHeaderWasAllocated) delete[] contentLengthHeader;
if (fVerbosityLevel >= 1) envir() << "Sending request: " << cmd << "\n";
if (fTunnelOverHTTPPortNum != 0 && strcmp(request->commandName(), "GET") != 0 && strcmp(request->commandName(), "POST") != 0) {
// When we're tunneling RTSP-over-HTTP, we Base-64-encode the request before we send it.
// (However, we don't do this for the HTTP "GET" and "POST" commands that we use to set up the tunnel.)
char* origCmd = cmd;
cmd = base64Encode(origCmd, strlen(cmd));
if (fVerbosityLevel >= 1) envir() << "\tThe request was base-64 encoded to: " << cmd << "\n\n";
delete[] origCmd;
}
if (send(fOutputSocketNum, cmd, strlen(cmd), 0) < 0) {
char const* errFmt = "%s send() failed: ";
unsigned const errLength = strlen(errFmt) + strlen(request->commandName());
char* err = new char[errLength];
sprintf(err, errFmt, request->commandName());
envir().setResultErrMsg(err);
delete[] err;
break;
}
// The command send succeeded, so enqueue the request record, so that its response (when it comes) can be handled.
// However, note that we do not expect a response to a POST command with RTSP-over-HTTP, so don't enqueue that.
int cseq = request->cseq();
if (fTunnelOverHTTPPortNum == 0 || strcmp(request->commandName(), "POST") != 0) {
fRequestsAwaitingResponse.enqueue(request);
} else {
delete request;
}
delete[] cmd;
return cseq;
} while (0);
// An error occurred, so call the response handler immediately (indicating the error):
delete[] cmd;
handleRequestError(request);
delete request;
return 0;
}
static char* createSessionString(char const* sessionId) {
char* sessionStr;
if (sessionId != NULL) {
sessionStr = new char[20+strlen(sessionId)];
sprintf(sessionStr, "Session: %s\r\n", sessionId);
} else {
sessionStr = strDup("");
}
return sessionStr;
}
static char* createScaleString(float scale, float currentScale) {
char buf[100];
if (scale == 1.0f && currentScale == 1.0f) {
// This is the default value; we don't need a "Scale:" header:
buf[0] = '\0';
} else {
Locale l("C", Numeric);
sprintf(buf, "Scale: %f\r\n", scale);
}
return strDup(buf);
}
static char* createRangeString(double start, double end, char const* absStartTime, char const* absEndTime) {
char buf[100];
if (absStartTime != NULL) {
// Create a "Range:" header that specifies 'absolute' time values:
if (absEndTime == NULL) {
// There's no end time:
snprintf(buf, sizeof buf, "Range: clock=%s-\r\n", absStartTime);
} else {
// There's both a start and an end time; include them both in the "Range:" hdr
snprintf(buf, sizeof buf, "Range: clock=%s-%s\r\n", absStartTime, absEndTime);
}
} else {
// Create a "Range:" header that specifies relative (i.e., NPT) time values:
if (start < 0) {
// We're resuming from a PAUSE; there's no "Range:" header at all
buf[0] = '\0';
} else if (end < 0) {
// There's no end time:
Locale l("C", Numeric);
sprintf(buf, "Range: npt=%.3f-\r\n", start);
} else {
// There's both a start and an end time; include them both in the "Range:" hdr
Locale l("C", Numeric);
sprintf(buf, "Range: npt=%.3f-%.3f\r\n", start, end);
}
}
return strDup(buf);
}
Boolean RTSPClient::setRequestFields(RequestRecord* request,
char*& cmdURL, Boolean& cmdURLWasAllocated,
char const*& protocolStr,
char*& extraHeaders, Boolean& extraHeadersWereAllocated
) {
// Set various fields that will appear in our outgoing request, depending upon the particular command that we are sending.
if (strcmp(request->commandName(), "DESCRIBE") == 0) {
extraHeaders = (char*)"Accept: application/sdp\r\n";
} else if (strcmp(request->commandName(), "OPTIONS") == 0) {
// If we're currently part of a session, create a "Session:" header (in case the server wants this to indicate
// client 'liveness); this makes up our 'extra headers':
extraHeaders = createSessionString(fLastSessionId);
extraHeadersWereAllocated = True;
} else if (strcmp(request->commandName(), "ANNOUNCE") == 0) {
extraHeaders = (char*)"Content-Type: application/sdp\r\n";
} else if (strcmp(request->commandName(), "SETUP") == 0) {
MediaSubsession& subsession = *request->subsession();
Boolean streamUsingTCP = (request->booleanFlags()&0x1) != 0;
Boolean streamOutgoing = (request->booleanFlags()&0x2) != 0;
Boolean forceMulticastOnUnspecified = (request->booleanFlags()&0x4) != 0;
char const *prefix, *separator, *suffix;
constructSubsessionURL(subsession, prefix, separator, suffix);
char const* transportFmt;
if (strcmp(subsession.protocolName(), "UDP") == 0) {
suffix = "";
transportFmt = "Transport: RAW/RAW/UDP%s%s%s=%d-%d\r\n";
} else {
transportFmt = "Transport: RTP/AVP%s%s%s=%d-%d\r\n";
}
cmdURL = new char[strlen(prefix) + strlen(separator) + strlen(suffix) + 1];
cmdURLWasAllocated = True;
sprintf(cmdURL, "%s%s%s", prefix, separator, suffix);
// Construct a "Transport:" header.
char const* transportTypeStr;
char const* modeStr = streamOutgoing ? ";mode=receive" : "";
// Note: I think the above is nonstandard, but DSS wants it this way
char const* portTypeStr;
portNumBits rtpNumber, rtcpNumber;
if (streamUsingTCP) { // streaming over the RTSP connection
transportTypeStr = "/TCP;unicast";
portTypeStr = ";interleaved";
rtpNumber = fTCPStreamIdCount++;
rtcpNumber = fTCPStreamIdCount++;
} else { // normal RTP streaming
unsigned connectionAddress = subsession.connectionEndpointAddress();
Boolean requestMulticastStreaming
= IsMulticastAddress(connectionAddress) || (connectionAddress == 0 && forceMulticastOnUnspecified);
transportTypeStr = requestMulticastStreaming ? ";multicast" : ";unicast";
portTypeStr = ";client_port";
rtpNumber = subsession.clientPortNum();
if (rtpNumber == 0) {
envir().setResultMsg("Client port number unknown\n");
delete[] cmdURL;
return False;
}
rtcpNumber = rtpNumber + 1;
}
unsigned transportSize = strlen(transportFmt)
+ strlen(transportTypeStr) + strlen(modeStr) + strlen(portTypeStr) + 2*5 /* max port len */;
char* transportStr = new char[transportSize];
sprintf(transportStr, transportFmt,
transportTypeStr, modeStr, portTypeStr, rtpNumber, rtcpNumber);
// When sending more than one "SETUP" request, include a "Session:" header in the 2nd and later commands:
char* sessionStr = createSessionString(fLastSessionId);
// The "Transport:" and "Session:" (if present) headers make up the 'extra headers':
extraHeaders = new char[transportSize + strlen(sessionStr)];
extraHeadersWereAllocated = True;
sprintf(extraHeaders, "%s%s", transportStr, sessionStr);
delete[] transportStr; delete[] sessionStr;
} else if (strcmp(request->commandName(), "GET") == 0 || strcmp(request->commandName(), "POST") == 0) {
// We will be sending a HTTP (not a RTSP) request.
// Begin by re-parsing our RTSP URL, to get the stream name (which we'll use as our 'cmdURL'
// in the subsequent request), and the server address (which we'll use in a "Host:" header):
char* username;
char* password;
NetAddress destAddress;
portNumBits urlPortNum;
if (!parseRTSPURL(envir(), fBaseURL, username, password, destAddress, urlPortNum, (char const**)&cmdURL)) return False;
if (cmdURL[0] == '\0') cmdURL = (char*)"/";
delete[] username;
delete[] password;
netAddressBits serverAddress = *(netAddressBits*)(destAddress.data());
AddressString serverAddressString(serverAddress);
protocolStr = "HTTP/1.1";
if (strcmp(request->commandName(), "GET") == 0) {
// Create a 'session cookie' string, using MD5:
struct {
struct timeval timestamp;
unsigned counter;
} seedData;
gettimeofday(&seedData.timestamp, NULL);
seedData.counter = ++fSessionCookieCounter;
our_MD5Data((unsigned char*)(&seedData), sizeof seedData, fSessionCookie);
// DSS seems to require that the 'session cookie' string be 22 bytes long:
fSessionCookie[23] = '\0';
char const* const extraHeadersFmt =
"Host: %s\r\n"
"x-sessioncookie: %s\r\n"
"Accept: application/x-rtsp-tunnelled\r\n"
"Pragma: no-cache\r\n"
"Cache-Control: no-cache\r\n";
unsigned extraHeadersSize = strlen(extraHeadersFmt)
+ strlen(serverAddressString.val())
+ strlen(fSessionCookie);
extraHeaders = new char[extraHeadersSize];
extraHeadersWereAllocated = True;
sprintf(extraHeaders, extraHeadersFmt,
serverAddressString.val(),
fSessionCookie);
} else { // "POST"
char const* const extraHeadersFmt =
"Host: %s\r\n"
"x-sessioncookie: %s\r\n"
"Content-Type: application/x-rtsp-tunnelled\r\n"
"Pragma: no-cache\r\n"
"Cache-Control: no-cache\r\n"
"Content-Length: 32767\r\n"
"Expires: Sun, 9 Jan 1972 00:00:00 GMT\r\n";
unsigned extraHeadersSize = strlen(extraHeadersFmt)
+ strlen(serverAddressString.val())
+ strlen(fSessionCookie);
extraHeaders = new char[extraHeadersSize];
extraHeadersWereAllocated = True;
sprintf(extraHeaders, extraHeadersFmt,
serverAddressString.val(),
fSessionCookie);
}
} else { // "PLAY", "PAUSE", "TEARDOWN", "RECORD", "SET_PARAMETER", "GET_PARAMETER"
// First, make sure that we have a RTSP session in progress
if (fLastSessionId == NULL) {
envir().setResultMsg("No RTSP session is currently in progress\n");
return False;
}
char const* sessionId;
float originalScale;
if (request->session() != NULL) {
// Session-level operation
cmdURL = (char*)sessionURL(*request->session());
sessionId = fLastSessionId;
originalScale = request->session()->scale();
} else {
// Media-level operation
char const *prefix, *separator, *suffix;
constructSubsessionURL(*request->subsession(), prefix, separator, suffix);
cmdURL = new char[strlen(prefix) + strlen(separator) + strlen(suffix) + 1];
cmdURLWasAllocated = True;
sprintf(cmdURL, "%s%s%s", prefix, separator, suffix);
sessionId = request->subsession()->sessionId();
originalScale = request->subsession()->scale();
}
if (strcmp(request->commandName(), "PLAY") == 0) {
// Create "Session:", "Scale:", and "Range:" headers; these make up the 'extra headers':
char* sessionStr = createSessionString(sessionId);
char* scaleStr = createScaleString(request->scale(), originalScale);
char* rangeStr = createRangeString(request->start(), request->end(), request->absStartTime(), request->absEndTime());
extraHeaders = new char[strlen(sessionStr) + strlen(scaleStr) + strlen(rangeStr) + 1];
extraHeadersWereAllocated = True;
sprintf(extraHeaders, "%s%s%s", sessionStr, scaleStr, rangeStr);
delete[] sessionStr; delete[] scaleStr; delete[] rangeStr;
} else {
// Create a "Session:" header; this makes up our 'extra headers':
extraHeaders = createSessionString(sessionId);
extraHeadersWereAllocated = True;
}
}
return True;
}
Boolean RTSPClient::isRTSPClient() const {
return True;
}
void RTSPClient::resetTCPSockets() {
if (fInputSocketNum >= 0) {
envir().taskScheduler().disableBackgroundHandling(fInputSocketNum);
::closeSocket(fInputSocketNum);
if (fOutputSocketNum != fInputSocketNum) {
envir().taskScheduler().disableBackgroundHandling(fOutputSocketNum);
::closeSocket(fOutputSocketNum);
}
}
fInputSocketNum = fOutputSocketNum = -1;
}
void RTSPClient::resetResponseBuffer() {
fResponseBytesAlreadySeen = 0;
fResponseBufferBytesLeft = responseBufferSize;
}
int RTSPClient::openConnection() {
do {
// Set up a connection to the server. Begin by parsing the URL:
char* username;
char* password;
NetAddress destAddress;
portNumBits urlPortNum;
char const* urlSuffix;
if (!parseRTSPURL(envir(), fBaseURL, username, password, destAddress, urlPortNum, &urlSuffix)) break;
portNumBits destPortNum = fTunnelOverHTTPPortNum == 0 ? urlPortNum : fTunnelOverHTTPPortNum;
if (username != NULL || password != NULL) {
fCurrentAuthenticator.setUsernameAndPassword(username, password);
delete[] username;
delete[] password;
}
// We don't yet have a TCP socket (or we used to have one, but it got closed). Set it up now.
fInputSocketNum = fOutputSocketNum = setupStreamSocket(envir(), 0);
if (fInputSocketNum < 0) break;
ignoreSigPipeOnSocket(fInputSocketNum); // so that servers on the same host that get killed don't also kill us
// Connect to the remote endpoint:
fServerAddress = *(netAddressBits*)(destAddress.data());
int connectResult = connectToServer(fInputSocketNum, destPortNum);
if (connectResult < 0) break;
else if (connectResult > 0) {
// The connection succeeded. Arrange to handle responses to requests sent on it:
envir().taskScheduler().setBackgroundHandling(fInputSocketNum, SOCKET_READABLE|SOCKET_EXCEPTION,
(TaskScheduler::BackgroundHandlerProc*)&incomingDataHandler, this);
}
return connectResult;
} while (0);
resetTCPSockets();
return -1;
}
int RTSPClient::connectToServer(int socketNum, portNumBits remotePortNum) {
MAKE_SOCKADDR_IN(remoteName, fServerAddress, htons(remotePortNum));
if (fVerbosityLevel >= 1) {
envir() << "Opening connection to " << AddressString(remoteName).val() << ", port " << remotePortNum << "...\n";
}
if (connect(socketNum, (struct sockaddr*) &remoteName, sizeof remoteName) != 0) {
int const err = envir().getErrno();
if (err == EINPROGRESS || err == EWOULDBLOCK) {
// The connection is pending; we'll need to handle it later. Wait for our socket to be 'writable', or have an exception.
envir().taskScheduler().setBackgroundHandling(socketNum, SOCKET_WRITABLE|SOCKET_EXCEPTION,
(TaskScheduler::BackgroundHandlerProc*)&connectionHandler, this);
return 0;
}
envir().setResultErrMsg("connect() failed: ");
if (fVerbosityLevel >= 1) envir() << "..." << envir().getResultMsg() << "\n";
return -1;
}
if (fVerbosityLevel >= 1) envir() << "...local connection opened\n";
return 1;
}
char* RTSPClient::createAuthenticatorString(char const* cmd, char const* url) {
Authenticator& auth = fCurrentAuthenticator; // alias, for brevity
if (auth.realm() != NULL && auth.username() != NULL && auth.password() != NULL) {
// We have a filled-in authenticator, so use it:
char* authenticatorStr;
if (auth.nonce() != NULL) { // Digest authentication
char const* const authFmt =
"Authorization: Digest username=\"%s\", realm=\"%s\", "
"nonce=\"%s\", uri=\"%s\", response=\"%s\"\r\n";
char const* response = auth.computeDigestResponse(cmd, url);
unsigned authBufSize = strlen(authFmt)
+ strlen(auth.username()) + strlen(auth.realm())
+ strlen(auth.nonce()) + strlen(url) + strlen(response);
authenticatorStr = new char[authBufSize];
sprintf(authenticatorStr, authFmt,
auth.username(), auth.realm(),
auth.nonce(), url, response);
auth.reclaimDigestResponse(response);
} else { // Basic authentication
char const* const authFmt = "Authorization: Basic %s\r\n";
unsigned usernamePasswordLength = strlen(auth.username()) + 1 + strlen(auth.password());
char* usernamePassword = new char[usernamePasswordLength+1];
sprintf(usernamePassword, "%s:%s", auth.username(), auth.password());
char* response = base64Encode(usernamePassword, usernamePasswordLength);
unsigned const authBufSize = strlen(authFmt) + strlen(response) + 1;
authenticatorStr = new char[authBufSize];
sprintf(authenticatorStr, authFmt, response);
delete[] response; delete[] usernamePassword;
}
return authenticatorStr;
}
// We don't have a (filled-in) authenticator.
return strDup("");
}
void RTSPClient::handleRequestError(RequestRecord* request) {
int resultCode = -envir().getErrno();
if (resultCode == 0) {
// Choose some generic error code instead:
#if defined(__WIN32__) || defined(_WIN32) || defined(_QNX4)
resultCode = -WSAENOTCONN;
#else
resultCode = -ENOTCONN;
#endif
}
if (request->handler() != NULL) (*request->handler())(this, resultCode, strDup(envir().getResultMsg()));
}
Boolean RTSPClient
::parseResponseCode(char const* line, unsigned& responseCode, char const*& responseString) {
if (sscanf(line, "RTSP/%*s%u", &responseCode) != 1 &&
sscanf(line, "HTTP/%*s%u", &responseCode) != 1) return False;
// Note: We check for HTTP responses as well as RTSP responses, both in order to setup RTSP-over-HTTP tunneling,
// and so that we get back a meaningful error if the client tried to mistakenly send a RTSP command to a HTTP-only server.
// Use everything after the RTSP/* (or HTTP/*) as the response string:
responseString = line;
while (responseString[0] != '\0' && responseString[0] != ' ' && responseString[0] != '\t') ++responseString;
while (responseString[0] != '\0' && (responseString[0] == ' ' || responseString[0] == '\t')) ++responseString; // skip whitespace
return True;
}
void RTSPClient::handleIncomingRequest() {
// Parse the request string into command name and 'CSeq', then 'handle' the command (by responding that we don't support it):
char cmdName[RTSP_PARAM_STRING_MAX];
char urlPreSuffix[RTSP_PARAM_STRING_MAX];
char urlSuffix[RTSP_PARAM_STRING_MAX];
char cseq[RTSP_PARAM_STRING_MAX];
char sessionId[RTSP_PARAM_STRING_MAX];
unsigned contentLength;
if (!parseRTSPRequestString(fResponseBuffer, fResponseBytesAlreadySeen,
cmdName, sizeof cmdName,
urlPreSuffix, sizeof urlPreSuffix,
urlSuffix, sizeof urlSuffix,
cseq, sizeof cseq,
sessionId, sizeof sessionId,
contentLength)) {
return;
} else {
if (fVerbosityLevel >= 1) {
envir() << "Received incoming RTSP request: " << fResponseBuffer << "\n";
}
char tmpBuf[2*RTSP_PARAM_STRING_MAX];
snprintf((char*)tmpBuf, sizeof tmpBuf,
"RTSP/1.0 405 Method Not Allowed\r\nCSeq: %s\r\n\r\n", cseq);
send(fOutputSocketNum, tmpBuf, strlen(tmpBuf), 0);
}
}
Boolean RTSPClient::checkForHeader(char const* line, char const* headerName, unsigned headerNameLength, char const*& headerParams) {
if (_strncasecmp(line, headerName, headerNameLength) != 0) return False;
// The line begins with the desired header name. Trim off any whitespace, and return the header parameters:
unsigned paramIndex = headerNameLength;
while (line[paramIndex] != '\0' && (line[paramIndex] == ' ' || line[paramIndex] == '\t')) ++paramIndex;
if (line[paramIndex] == '\0') return False; // the header is assumed to be bad if it has no parameters
headerParams = &line[paramIndex];
return True;
}
Boolean RTSPClient::parseTransportParams(char const* paramsStr,
char*& serverAddressStr, portNumBits& serverPortNum,
unsigned char& rtpChannelId, unsigned char& rtcpChannelId) {
// Initialize the return parameters to 'not found' values:
serverAddressStr = NULL;
serverPortNum = 0;
rtpChannelId = rtcpChannelId = 0xFF;
if (paramsStr == NULL) return False;
char* foundServerAddressStr = NULL;
Boolean foundServerPortNum = False;
portNumBits clientPortNum = 0;
Boolean foundClientPortNum = False;
Boolean foundChannelIds = False;
unsigned rtpCid, rtcpCid;
Boolean isMulticast = True; // by default
char* foundDestinationStr = NULL;
portNumBits multicastPortNumRTP, multicastPortNumRTCP;
Boolean foundMulticastPortNum = False;
// Run through each of the parameters, looking for ones that we handle:
char const* fields = paramsStr;
char* field = strDupSize(fields);
while (sscanf(fields, "%[^;]", field) == 1) {
if (sscanf(field, "server_port=%hu", &serverPortNum) == 1) {
foundServerPortNum = True;
} else if (sscanf(field, "client_port=%hu", &clientPortNum) == 1) {
foundClientPortNum = True;
} else if (_strncasecmp(field, "source=", 7) == 0) {
delete[] foundServerAddressStr;
foundServerAddressStr = strDup(field+7);
} else if (sscanf(field, "interleaved=%u-%u", &rtpCid, &rtcpCid) == 2) {
rtpChannelId = (unsigned char)rtpCid;
rtcpChannelId = (unsigned char)rtcpCid;
foundChannelIds = True;
} else if (strcmp(field, "unicast") == 0) {
isMulticast = False;
} else if (_strncasecmp(field, "destination=", 12) == 0) {
delete[] foundDestinationStr;
foundDestinationStr = strDup(field+12);
} else if (sscanf(field, "port=%hu-%hu", &multicastPortNumRTP, &multicastPortNumRTCP) == 2 ||
sscanf(field, "port=%hu", &multicastPortNumRTP) == 1) {
foundMulticastPortNum = True;
}
fields += strlen(field);
while (fields[0] == ';') ++fields; // skip over all leading ';' chars
if (fields[0] == '\0') break;
}
delete[] field;
// If we're multicast, and have a "destination=" (multicast) address, then use this
// as the 'server' address (because some weird servers don't specify the multicast
// address earlier, in the "DESCRIBE" response's SDP:
if (isMulticast && foundDestinationStr != NULL && foundMulticastPortNum) {
delete[] foundServerAddressStr;
serverAddressStr = foundDestinationStr;
serverPortNum = multicastPortNumRTP;
return True;
}
delete[] foundDestinationStr;
// We have a valid "Transport:" header if any of the following are true:
// - We saw a "interleaved=" field, indicating RTP/RTCP-over-TCP streaming, or
// - We saw a "server_port=" field, or
// - We saw a "client_port=" field.
// If we didn't also see a "server_port=" field, then the server port is assumed to be the same as the client port.
if (foundChannelIds || foundServerPortNum || foundClientPortNum) {
if (foundClientPortNum && !foundServerPortNum) {
serverPortNum = clientPortNum;
}
serverAddressStr = foundServerAddressStr;
return True;
}
delete[] foundServerAddressStr;
return False;
}
Boolean RTSPClient::parseScaleParam(char const* paramStr, float& scale) {
Locale l("C", Numeric);
return sscanf(paramStr, "%f", &scale) == 1;
}
Boolean RTSPClient::parseRTPInfoParams(char const*& paramsStr, u_int16_t& seqNum, u_int32_t& timestamp) {
if (paramsStr == NULL || paramsStr[0] == '\0') return False;
while (paramsStr[0] == ',') ++paramsStr;
// "paramsStr" now consists of a ';'-separated list of parameters, ending with ',' or '\0'.
char* field = strDupSize(paramsStr);
Boolean sawSeq = False, sawRtptime = False;
while (sscanf(paramsStr, "%[^;,]", field) == 1) {
if (sscanf(field, "seq=%hu", &seqNum) == 1) {
sawSeq = True;
} else if (sscanf(field, "rtptime=%u", ×tamp) == 1) {
sawRtptime = True;
}
paramsStr += strlen(field);
if (paramsStr[0] == '\0' || paramsStr[0] == ',') break;
// ASSERT: paramsStr[0] == ';'
++paramsStr; // skip over the ';'
}
delete[] field;
// For the "RTP-Info:" parameters to be useful to us, we need to have seen both the "seq=" and "rtptime=" parameters:
return sawSeq && sawRtptime;
}
Boolean RTSPClient::handleSETUPResponse(MediaSubsession& subsession, char const* sessionParamsStr, char const* transportParamsStr,
Boolean streamUsingTCP) {
char* sessionId = new char[responseBufferSize]; // ensures we have enough space
Boolean success = False;
do {
// Check for a session id:
if (sessionParamsStr == NULL || sscanf(sessionParamsStr, "%[^;]", sessionId) != 1) {
envir().setResultMsg("Missing or bad \"Session:\" header");
break;
}
subsession.setSessionId(sessionId);
delete[] fLastSessionId; fLastSessionId = strDup(sessionId);
// Also look for an optional "; timeout = " parameter following this:
char const* afterSessionId = sessionParamsStr + strlen(sessionId);
int timeoutVal;
if (sscanf(afterSessionId, "; timeout = %d", &timeoutVal) == 1) {
fSessionTimeoutParameter = timeoutVal;
}
// Parse the "Transport:" header parameters:
char* serverAddressStr;
portNumBits serverPortNum;
unsigned char rtpChannelId, rtcpChannelId;
if (!parseTransportParams(transportParamsStr, serverAddressStr, serverPortNum, rtpChannelId, rtcpChannelId)) {
envir().setResultMsg("Missing or bad \"Transport:\" header");
break;
}
delete[] subsession.connectionEndpointName();
subsession.connectionEndpointName() = serverAddressStr;
subsession.serverPortNum = serverPortNum;
subsession.rtpChannelId = rtpChannelId;
subsession.rtcpChannelId = rtcpChannelId;
if (streamUsingTCP) {
// Tell the subsession to receive RTP (and send/receive RTCP) over the RTSP stream:
if (subsession.rtpSource() != NULL) {
subsession.rtpSource()->setStreamSocket(fInputSocketNum, subsession.rtpChannelId);
// So that we continue to receive & handle RTSP commands and responses from the server
subsession.rtpSource()->enableRTCPReports() = False;
// To avoid confusing the server (which won't start handling RTP/RTCP-over-TCP until "PLAY"), don't send RTCP "RR"s yet
}
if (subsession.rtcpInstance() != NULL) subsession.rtcpInstance()->setStreamSocket(fInputSocketNum, subsession.rtcpChannelId);
RTPInterface::setServerRequestAlternativeByteHandler(envir(), fInputSocketNum, handleAlternativeRequestByte, this);
} else {
// Normal case.
// Set the RTP and RTCP sockets' destination address and port from the information in the SETUP response (if present):
netAddressBits destAddress = subsession.connectionEndpointAddress();
if (destAddress == 0) destAddress = fServerAddress;
subsession.setDestinations(destAddress);
}
success = True;
} while (0);
delete[] sessionId;
return success;
}
Boolean RTSPClient::handlePLAYResponse(MediaSession& session, MediaSubsession& subsession,
char const* scaleParamsStr, char const* rangeParamsStr, char const* rtpInfoParamsStr) {
Boolean scaleOK = False, rangeOK = False;
do {
if (&session != NULL) {
// The command was on the whole session
if (scaleParamsStr != NULL && !parseScaleParam(scaleParamsStr, session.scale())) break;
scaleOK = True;
if (rangeParamsStr != NULL && !parseRangeParam(rangeParamsStr, session.playStartTime(), session.playEndTime(),
session._absStartTime(), session._absEndTime())) break;
rangeOK = True;
MediaSubsessionIterator iter(session);
MediaSubsession* subsession;
while ((subsession = iter.next()) != NULL) {
u_int16_t seqNum; u_int32_t timestamp;
subsession->rtpInfo.infoIsNew = False;
if (parseRTPInfoParams(rtpInfoParamsStr, seqNum, timestamp)) {
subsession->rtpInfo.seqNum = seqNum;
subsession->rtpInfo.timestamp = timestamp;
subsession->rtpInfo.infoIsNew = True;
}
if (subsession->rtpSource() != NULL) subsession->rtpSource()->enableRTCPReports() = True; // start sending RTCP "RR"s now
}
} else {
// The command was on a subsession
if (scaleParamsStr != NULL && !parseScaleParam(scaleParamsStr, subsession.scale())) break;
scaleOK = True;
if (rangeParamsStr != NULL && !parseRangeParam(rangeParamsStr, subsession._playStartTime(), subsession._playEndTime(),
subsession._absStartTime(), subsession._absEndTime())) break;
rangeOK = True;
u_int16_t seqNum; u_int32_t timestamp;
subsession.rtpInfo.infoIsNew = False;
if (parseRTPInfoParams(rtpInfoParamsStr, seqNum, timestamp)) {
subsession.rtpInfo.seqNum = seqNum;
subsession.rtpInfo.timestamp = timestamp;
subsession.rtpInfo.infoIsNew = True;
}
if (subsession.rtpSource() != NULL) subsession.rtpSource()->enableRTCPReports() = True; // start sending RTCP "RR"s now
}
return True;
} while (0);
// An error occurred:
if (!scaleOK) {
envir().setResultMsg("Bad \"Scale:\" header");
} else if (!rangeOK) {
envir().setResultMsg("Bad \"Range:\" header");
} else {
envir().setResultMsg("Bad \"RTP-Info:\" header");
}
return False;
}
Boolean RTSPClient::handleTEARDOWNResponse(MediaSession& /*session*/, MediaSubsession& /*subsession*/) {
// Because we don't expect to always get a response to "TEARDOWN", we don't need to do anything if we do get one:
return True;
}
Boolean RTSPClient::handleGET_PARAMETERResponse(char const* parameterName, char*& resultValueString) {
do {
// If "parameterName" is non-empty, it may be (possibly followed by ':' and whitespace) at the start of the result string:
if (parameterName != NULL && parameterName[0] != '\0') {
if (parameterName[1] == '\0') break; // sanity check; there should have been \r\n at the end of "parameterName"
unsigned parameterNameLen = strlen(parameterName);
// ASSERT: parameterNameLen >= 2;
parameterNameLen -= 2; // because of the trailing \r\n
if (_strncasecmp(resultValueString, parameterName, parameterNameLen) == 0) {
resultValueString += parameterNameLen;
if (resultValueString[0] == ':') ++resultValueString;
while (resultValueString[0] == ' ' || resultValueString[0] == '\t') ++resultValueString;
}
}
// The rest of "resultValueStr" should be our desired result, but first trim off any \r and/or \n characters at the end:
unsigned resultLen = strlen(resultValueString);
while (resultLen > 0 && (resultValueString[resultLen-1] == '\r' || resultValueString[resultLen-1] == '\n')) --resultLen;
resultValueString[resultLen] = '\0';
return True;
} while (0);
// An error occurred:
envir().setResultMsg("Bad \"GET_PARAMETER\" response");
return False;
}
Boolean RTSPClient::handleAuthenticationFailure(char const* paramsStr) {
if (paramsStr == NULL) return False; // There was no "WWW-Authenticate:" header; we can't proceed.
// Fill in "fCurrentAuthenticator" with the information from the "WWW-Authenticate:" header:
Boolean alreadyHadRealm = fCurrentAuthenticator.realm() != NULL;
char* realm = strDupSize(paramsStr);
char* nonce = strDupSize(paramsStr);
Boolean success = True;
if (sscanf(paramsStr, "Digest realm=\"%[^\"]\", nonce=\"%[^\"]\"", realm, nonce) == 2) {
fCurrentAuthenticator.setRealmAndNonce(realm, nonce);
} else if (sscanf(paramsStr, "Basic realm=\"%[^\"]\"", realm) == 1) {
fCurrentAuthenticator.setRealmAndNonce(realm, NULL); // Basic authentication
} else {
success = False; // bad "WWW-Authenticate:" header
}
delete[] realm; delete[] nonce;
if (alreadyHadRealm || fCurrentAuthenticator.username() == NULL || fCurrentAuthenticator.password() == NULL) {
// We already had a 'realm', or don't have a username and/or password,
// so the new "WWW-Authenticate:" header information won't help us. We remain unauthenticated.
success = False;
}
return success;
}
Boolean RTSPClient::resendCommand(RequestRecord* request) {
if (fVerbosityLevel >= 1) envir() << "Resending...\n";
if (request != NULL && strcmp(request->commandName(), "GET") != 0) request->cseq() = ++fCSeq;
return sendRequest(request) != 0;
}
char const* RTSPClient::sessionURL(MediaSession const& session) const {
char const* url = session.controlPath();
if (url == NULL || strcmp(url, "*") == 0) url = fBaseURL;
return url;
}
void RTSPClient::handleAlternativeRequestByte(void* rtspClient, u_int8_t requestByte) {
((RTSPClient*)rtspClient)->handleAlternativeRequestByte1(requestByte);
}
void RTSPClient::handleAlternativeRequestByte1(u_int8_t requestByte) {
if (requestByte == 0xFF) {
// Hack: The new handler of the input TCP socket encountered an error reading it. Indicate this:
handleResponseBytes(-1);
} else if (requestByte == 0xFE) {
// Another hack: The new handler of the input TCP socket no longer needs it, so take back control:
envir().taskScheduler().setBackgroundHandling(fInputSocketNum, SOCKET_READABLE|SOCKET_EXCEPTION,
(TaskScheduler::BackgroundHandlerProc*)&incomingDataHandler, this);
} else {
// Normal case:
fResponseBuffer[fResponseBytesAlreadySeen] = requestByte;
handleResponseBytes(1);
}
}
static Boolean isAbsoluteURL(char const* url) {
// Assumption: "url" is absolute if it contains a ':', before any
// occurrence of '/'
while (*url != '\0' && *url != '/') {
if (*url == ':') return True;
++url;
}
return False;
}
void RTSPClient::constructSubsessionURL(MediaSubsession const& subsession,
char const*& prefix,
char const*& separator,
char const*& suffix) {
// Figure out what the URL describing "subsession" will look like.
// The URL is returned in three parts: prefix; separator; suffix
//##### NOTE: This code doesn't really do the right thing if "sessionURL()"
// doesn't end with a "/", and "subsession.controlPath()" is relative.
// The right thing would have been to truncate "sessionURL()" back to the
// rightmost "/", and then add "subsession.controlPath()".
// In practice, though, each "DESCRIBE" response typically contains
// a "Content-Base:" header that consists of "sessionURL()" followed by
// a "/", in which case this code ends up giving the correct result.
// However, we should really fix this code to do the right thing, and
// also check for and use the "Content-Base:" header appropriately. #####
prefix = sessionURL(subsession.parentSession());
if (prefix == NULL) prefix = "";
suffix = subsession.controlPath();
if (suffix == NULL) suffix = "";
if (isAbsoluteURL(suffix)) {
prefix = separator = "";
} else {
unsigned prefixLen = strlen(prefix);
separator = (prefixLen == 0 || prefix[prefixLen-1] == '/' || suffix[0] == '/') ? "" : "/";
}
}
Boolean RTSPClient::setupHTTPTunneling1() {
// Set up RTSP-over-HTTP tunneling, as described in
// http://developer.apple.com/quicktime/icefloe/dispatch028.html and http://images.apple.com/br/quicktime/pdf/QTSS_Modules.pdf
if (fVerbosityLevel >= 1) {
envir() << "Requesting RTSP-over-HTTP tunneling (on port " << fTunnelOverHTTPPortNum << ")\n\n";
}
// Begin by sending a HTTP "GET", to set up the server->client link. Continue when we handle the response:
return sendRequest(new RequestRecord(1, "GET", responseHandlerForHTTP_GET)) != 0;
}
void RTSPClient::responseHandlerForHTTP_GET(RTSPClient* rtspClient, int responseCode, char* responseString) {
if (rtspClient != NULL) rtspClient->responseHandlerForHTTP_GET1(responseCode, responseString);
}
void RTSPClient::responseHandlerForHTTP_GET1(int responseCode, char* responseString) {
RequestRecord* request;
do {
delete[] responseString; // we don't need it (but are responsible for deleting it)
if (responseCode != 0) break; // The HTTP "GET" failed.
// Having successfully set up (using the HTTP "GET" command) the server->client link, set up a second TCP connection
// (to the same server & port as before) for the client->server link. All future output will be to this new socket.
fOutputSocketNum = setupStreamSocket(envir(), 0);
if (fOutputSocketNum < 0) break;
ignoreSigPipeOnSocket(fOutputSocketNum); // so that servers on the same host that killed don't also kill us
fHTTPTunnelingConnectionIsPending = True;
int connectResult = connectToServer(fOutputSocketNum, fTunnelOverHTTPPortNum);
if (connectResult < 0) break; // an error occurred
else if (connectResult == 0) {
// A connection is pending. Continue setting up RTSP-over-HTTP when the connection completes.
// First, move the pending requests to the 'awaiting connection' queue:
while ((request = fRequestsAwaitingHTTPTunneling.dequeue()) != NULL) {
fRequestsAwaitingConnection.enqueue(request);
}
return;
}
// The connection succeeded. Continue setting up RTSP-over-HTTP:
if (!setupHTTPTunneling2()) break;
// RTSP-over-HTTP tunneling succeeded. Resume the pending request(s):
while ((request = fRequestsAwaitingHTTPTunneling.dequeue()) != NULL) {
sendRequest(request);
}
return;
} while (0);
// An error occurred. Dequeue the pending request(s), and tell them about the error:
fHTTPTunnelingConnectionIsPending = False;
resetTCPSockets(); // do this now, in case an error handler deletes "this"
RequestQueue requestQueue(fRequestsAwaitingHTTPTunneling);
while ((request = requestQueue.dequeue()) != NULL) {
handleRequestError(request);
delete request;
}
}
Boolean RTSPClient::setupHTTPTunneling2() {
fHTTPTunnelingConnectionIsPending = False;
// Send a HTTP "POST", to set up the client->server link. (Note that we won't see a reply to the "POST".)
return sendRequest(new RequestRecord(1, "POST", NULL)) != 0;
}
void RTSPClient::connectionHandler(void* instance, int /*mask*/) {
RTSPClient* client = (RTSPClient*)instance;
client->connectionHandler1();
}
void RTSPClient::connectionHandler1() {
// Restore normal handling on our sockets:
envir().taskScheduler().disableBackgroundHandling(fOutputSocketNum);
envir().taskScheduler().setBackgroundHandling(fInputSocketNum, SOCKET_READABLE|SOCKET_EXCEPTION,
(TaskScheduler::BackgroundHandlerProc*)&incomingDataHandler, this);
// Move all requests awaiting connection into a new, temporary queue, to clear "fRequestsAwaitingConnection"
// (so that "sendRequest()" doesn't get confused by "fRequestsAwaitingConnection" being nonempty, and enqueue them all over again).
RequestQueue tmpRequestQueue(fRequestsAwaitingConnection);
RequestRecord* request;
// Find out whether the connection succeeded or failed:
do {
int err = 0;
SOCKLEN_T len = sizeof err;
if (getsockopt(fInputSocketNum, SOL_SOCKET, SO_ERROR, (char*)&err, &len) < 0 || err != 0) {
envir().setResultErrMsg("Connection to server failed: ", err);
if (fVerbosityLevel >= 1) envir() << "..." << envir().getResultMsg() << "\n";
break;
}
// The connection succeeded. If the connection came about from an attempt to set up RTSP-over-HTTP, finish this now:
if (fVerbosityLevel >= 1) envir() << "...remote connection opened\n";
if (fHTTPTunnelingConnectionIsPending && !setupHTTPTunneling2()) break;
// Resume sending all pending requests:
while ((request = tmpRequestQueue.dequeue()) != NULL) {
sendRequest(request);
}
return;
} while (0);
// An error occurred. Tell all pending requests about the error:
resetTCPSockets(); // do this now, in case an error handler deletes "this"
while ((request = tmpRequestQueue.dequeue()) != NULL) {
handleRequestError(request);
delete request;
}
}
void RTSPClient::incomingDataHandler(void* instance, int /*mask*/) {
RTSPClient* client = (RTSPClient*)instance;
client->incomingDataHandler1();
}
void RTSPClient::incomingDataHandler1() {
struct sockaddr_in dummy; // 'from' address - not used
int bytesRead = readSocket(envir(), fInputSocketNum, (unsigned char*)&fResponseBuffer[fResponseBytesAlreadySeen], fResponseBufferBytesLeft, dummy);
handleResponseBytes(bytesRead);
}
static char* getLine(char* startOfLine) {
// returns the start of the next line, or NULL if none. Note that this modifies the input string to add '\0' characters.
for (char* ptr = startOfLine; *ptr != '\0'; ++ptr) {
// Check for the end of line: \r\n (but also accept \r or \n by itself):
if (*ptr == '\r' || *ptr == '\n') {
// We found the end of the line
if (*ptr == '\r') {
*ptr++ = '\0';
if (*ptr == '\n') ++ptr;
} else {
*ptr++ = '\0';
}
return ptr;
}
}
return NULL;
}
void RTSPClient::handleResponseBytes(int newBytesRead) {
do {
if (newBytesRead >= 0 && (unsigned)newBytesRead < fResponseBufferBytesLeft) break; // data was read OK; process it below
if (newBytesRead >= (int)fResponseBufferBytesLeft) {
// We filled up our response buffer. Treat this as an error (for the first response handler):
envir().setResultMsg("RTSP response was truncated. Increase \"RTSPClient::responseBufferSize\"");
}
// An error occurred while reading our TCP socket. Call all pending response handlers, indicating this error.
// (However, the "RTSP response was truncated" error is applied to the first response handler only.)
resetResponseBuffer();
RequestRecord* request;
if (newBytesRead > 0) { // The "RTSP response was truncated" error
if ((request = fRequestsAwaitingResponse.dequeue()) != NULL) {
handleRequestError(request);
delete request;
}
} else {
RequestQueue requestQueue(fRequestsAwaitingResponse);
resetTCPSockets(); // do this now, in case an error handler deletes "this"
while ((request = requestQueue.dequeue()) != NULL) {
handleRequestError(request);
delete request;
}
}
return;
} while (0);
fResponseBufferBytesLeft -= newBytesRead;
fResponseBytesAlreadySeen += newBytesRead;
fResponseBuffer[fResponseBytesAlreadySeen] = '\0';
if (fVerbosityLevel >= 1 && newBytesRead > 1) envir() << "Received " << newBytesRead << " new bytes of response data.\n";
unsigned numExtraBytesAfterResponse = 0;
Boolean responseSuccess = False; // by default
do {
// Data was read OK. Look through the data that we've read so far, to see if it contains .
// (If not, wait for more data to arrive.)
Boolean endOfHeaders = False;
char const* ptr = fResponseBuffer;
if (fResponseBytesAlreadySeen > 3) {
char const* const ptrEnd = &fResponseBuffer[fResponseBytesAlreadySeen-3];
while (ptr < ptrEnd) {
if (*ptr++ == '\r' && *ptr++ == '\n' && *ptr++ == '\r' && *ptr++ == '\n') {
// This is it
endOfHeaders = True;
break;
}
}
}
if (!endOfHeaders) return; // subsequent reads will be needed to get the complete response
// Now that we have the complete response headers (ending with ), parse them to get the response code, CSeq,
// and various other header parameters. To do this, we first make a copy of the received header data, because we'll be
// modifying it by adding '\0' bytes.
char* headerDataCopy;
unsigned responseCode = 200;
char const* responseStr = NULL;
RequestRecord* foundRequest = NULL;
char const* sessionParamsStr = NULL;
char const* transportParamsStr = NULL;
char const* scaleParamsStr = NULL;
char const* rangeParamsStr = NULL;
char const* rtpInfoParamsStr = NULL;
char const* wwwAuthenticateParamsStr = NULL;
char const* publicParamsStr = NULL;
char* bodyStart = NULL;
unsigned numBodyBytes = 0;
responseSuccess = False;
do {
headerDataCopy = new char[responseBufferSize];
strncpy(headerDataCopy, fResponseBuffer, fResponseBytesAlreadySeen);
headerDataCopy[fResponseBytesAlreadySeen] = '\0';
char* lineStart = headerDataCopy;
char* nextLineStart = getLine(lineStart);
if (!parseResponseCode(lineStart, responseCode, responseStr)) {
// This does not appear to be a RTSP response; perhaps it's a RTSP request instead?
handleIncomingRequest();
break; // we're done with this data
}
// Scan through the headers, handling the ones that we're interested in:
Boolean reachedEndOfHeaders;
unsigned cseq = 0;
unsigned contentLength = 0;
while (1) {
reachedEndOfHeaders = True; // by default; may get changed below
lineStart = nextLineStart;
if (lineStart == NULL) break;
nextLineStart = getLine(lineStart);
if (lineStart[0] == '\0') break; // this is a blank line
reachedEndOfHeaders = False;
char const* headerParamsStr;
if (checkForHeader(lineStart, "CSeq:", 5, headerParamsStr)) {
if (sscanf(headerParamsStr, "%u", &cseq) != 1 || cseq <= 0) {
envir().setResultMsg("Bad \"CSeq:\" header: \"", lineStart, "\"");
break;
}
// Find the handler function for "cseq":
RequestRecord* request;
while ((request = fRequestsAwaitingResponse.dequeue()) != NULL) {
if (request->cseq() < cseq) { // assumes that the CSeq counter will never wrap around
// We never received (and will never receive) a response for this handler, so delete it:
if (fVerbosityLevel >= 1 && strcmp(request->commandName(), "POST") != 0) {
envir() << "WARNING: The server did not respond to our \"" << request->commandName() << "\" request (CSeq: "
<< request->cseq() << "). The server appears to be buggy (perhaps not handling pipelined requests properly).\n";
}
delete request;
} else if (request->cseq() == cseq) {
// This is the handler that we want. Remove its record, but remember it, so that we can later call its handler:
foundRequest = request;
break;
} else { // request->cseq() > cseq
// No handler was registered for this response, so ignore it.
break;
}
}
} else if (checkForHeader(lineStart, "Content-Length:", 15, headerParamsStr)) {
if (sscanf(headerParamsStr, "%u", &contentLength) != 1) {
envir().setResultMsg("Bad \"Content-Length:\" header: \"", lineStart, "\"");
break;
}
} else if (checkForHeader(lineStart, "Content-Base:", 13, headerParamsStr)) {
setBaseURL(headerParamsStr);
} else if (checkForHeader(lineStart, "Session:", 8, sessionParamsStr)) {
} else if (checkForHeader(lineStart, "Transport:", 10, transportParamsStr)) {
} else if (checkForHeader(lineStart, "Scale:", 6, scaleParamsStr)) {
} else if (checkForHeader(lineStart, "Range:", 6, rangeParamsStr)) {
} else if (checkForHeader(lineStart, "RTP-Info:", 9, rtpInfoParamsStr)) {
} else if (checkForHeader(lineStart, "WWW-Authenticate:", 17, headerParamsStr)) {
// If we've already seen a "WWW-Authenticate:" header, then we replace it with this new one only if
// the new one specifies "Digest" authentication:
if (wwwAuthenticateParamsStr == NULL || _strncasecmp(headerParamsStr, "Digest", 6) == 0) {
wwwAuthenticateParamsStr = headerParamsStr;
}
} else if (checkForHeader(lineStart, "Public:", 7, publicParamsStr)) {
} else if (checkForHeader(lineStart, "Allow:", 6, publicParamsStr)) {
// Note: we accept "Allow:" instead of "Public:", so that "OPTIONS" requests made to HTTP servers will work.
} else if (checkForHeader(lineStart, "Location:", 9, headerParamsStr)) {
setBaseURL(headerParamsStr);
}
}
if (!reachedEndOfHeaders) break; // an error occurred
if (foundRequest == NULL) {
// Hack: The response didn't have a "CSeq:" header; assume it's for our most recent request:
foundRequest = fRequestsAwaitingResponse.dequeue();
}
// If we saw a "Content-Length:" header, then make sure that we have the amount of data that it specified:
unsigned bodyOffset = nextLineStart == NULL ? fResponseBytesAlreadySeen : nextLineStart - headerDataCopy;
bodyStart = &fResponseBuffer[bodyOffset];
numBodyBytes = fResponseBytesAlreadySeen - bodyOffset;
if (contentLength > numBodyBytes) {
// We need to read more data. First, make sure we have enough space for it:
unsigned numExtraBytesNeeded = contentLength - numBodyBytes;
unsigned remainingBufferSize = responseBufferSize - fResponseBytesAlreadySeen;
if (numExtraBytesNeeded > remainingBufferSize) {
char tmpBuf[200];
sprintf(tmpBuf, "Response buffer size (%d) is too small for \"Content-Length:\" %d (need a buffer size of >= %d bytes\n",
responseBufferSize, contentLength, fResponseBytesAlreadySeen + numExtraBytesNeeded);
envir().setResultMsg(tmpBuf);
break;
}
if (fVerbosityLevel >= 1) {
envir() << "Have received " << fResponseBytesAlreadySeen << " total bytes of a "
<< (foundRequest != NULL ? foundRequest->commandName() : "(unknown)")
<< " RTSP response; awaiting " << numExtraBytesNeeded << " bytes more.\n";
}
delete[] headerDataCopy;
if (foundRequest != NULL) fRequestsAwaitingResponse.putAtHead(foundRequest);// put our request record back; we need it again
return; // We need to read more data
}
// We now have a complete response (including all bytes specified by the "Content-Length:" header, if any).
char* responseEnd = bodyStart + contentLength;
numExtraBytesAfterResponse = &fResponseBuffer[fResponseBytesAlreadySeen] - responseEnd;
if (fVerbosityLevel >= 1) {
char saved = *responseEnd;
*responseEnd = '\0';
envir() << "Received a complete "
<< (foundRequest != NULL ? foundRequest->commandName() : "(unknown)")
<< " response:\n" << fResponseBuffer << "\n";
if (numExtraBytesAfterResponse > 0) envir() << "\t(plus " << numExtraBytesAfterResponse << " additional bytes)\n";
*responseEnd = saved;
}
if (foundRequest != NULL) {
Boolean needToResendCommand = False; // by default...
if (responseCode == 200) {
// Do special-case response handling for some commands:
if (strcmp(foundRequest->commandName(), "SETUP") == 0) {
if (!handleSETUPResponse(*foundRequest->subsession(), sessionParamsStr, transportParamsStr, foundRequest->booleanFlags()&0x1)) break;
} else if (strcmp(foundRequest->commandName(), "PLAY") == 0) {
if (!handlePLAYResponse(*foundRequest->session(), *foundRequest->subsession(), scaleParamsStr, rangeParamsStr, rtpInfoParamsStr)) break;
} else if (strcmp(foundRequest->commandName(), "TEARDOWN") == 0) {
if (!handleTEARDOWNResponse(*foundRequest->session(), *foundRequest->subsession())) break;
} else if (strcmp(foundRequest->commandName(), "GET_PARAMETER") == 0) {
if (!handleGET_PARAMETERResponse(foundRequest->contentStr(), bodyStart)) break;
}
} else if (responseCode == 401 && handleAuthenticationFailure(wwwAuthenticateParamsStr)) {
// We need to resend the command, with an "Authorization:" header:
needToResendCommand = True;
if (strcmp(foundRequest->commandName(), "GET") == 0) {
// Note: If a HTTP "GET" command (for RTSP-over-HTTP tunneling) returns "401 Unauthorized", then we resend it
// (with an "Authorization:" header), just as we would for a RTSP command. However, we do so using a new TCP connection,
// because some servers close the original connection after returning the "401 Unauthorized".
resetTCPSockets(); // forces the opening of a new connection for the resent command
}
} else if (responseCode == 301 || responseCode == 302) { // redirection
resetTCPSockets(); // because we need to connect somewhere else next
needToResendCommand = True;
}
if (needToResendCommand) {
resetResponseBuffer();
if (!resendCommand(foundRequest)) break;
delete[] headerDataCopy;
return; // without calling our response handler; the response to the resent command will do that
}
}
responseSuccess = True;
} while (0);
// If we have a handler function for this response, call it.
// But first, reset our response buffer, in case the handler goes to the event loop, and we end up getting called recursively:
if (numExtraBytesAfterResponse > 0) {
// An unusual case; usually due to having received pipelined responses. Move the extra bytes to the front of the buffer:
char* responseEnd = &fResponseBuffer[fResponseBytesAlreadySeen - numExtraBytesAfterResponse];
// But first: A hack to save a copy of the response 'body', in case it's needed below for "resultString":
numBodyBytes -= numExtraBytesAfterResponse;
if (numBodyBytes > 0) {
char saved = *responseEnd;
*responseEnd = '\0';
bodyStart = strDup(bodyStart);
*responseEnd = saved;
}
memmove(fResponseBuffer, responseEnd, numExtraBytesAfterResponse);
fResponseBytesAlreadySeen = numExtraBytesAfterResponse;
fResponseBufferBytesLeft = responseBufferSize - numExtraBytesAfterResponse;
fResponseBuffer[numExtraBytesAfterResponse] = '\0';
} else {
resetResponseBuffer();
}
if (foundRequest != NULL && foundRequest->handler() != NULL) {
int resultCode;
char* resultString;
if (responseSuccess) {
if (responseCode == 200) {
resultCode = 0;
resultString = numBodyBytes > 0 ? strDup(bodyStart) : strDup(publicParamsStr);
// Note: The "strDup(bodyStart)" call assumes that the body is encoded without interior '\0' bytes
} else {
resultCode = responseCode;
resultString = strDup(responseStr);
envir().setResultMsg(responseStr);
}
(*foundRequest->handler())(this, resultCode, resultString);
} else {
// An error occurred parsing the response, so call the handler, indicating an error:
handleRequestError(foundRequest);
}
}
delete foundRequest;
delete[] headerDataCopy;
if (numExtraBytesAfterResponse > 0 && numBodyBytes > 0) delete[] bodyStart;
} while (numExtraBytesAfterResponse > 0 && responseSuccess);
}
////////// RTSPClient::RequestRecord implementation //////////
RTSPClient::RequestRecord::RequestRecord(unsigned cseq, char const* commandName, responseHandler* handler,
MediaSession* session, MediaSubsession* subsession, u_int32_t booleanFlags,
double start, double end, float scale, char const* contentStr)
: fNext(NULL), fCSeq(cseq), fCommandName(commandName), fSession(session), fSubsession(subsession), fBooleanFlags(booleanFlags),
fStart(start), fEnd(end), fAbsStartTime(NULL), fAbsEndTime(NULL), fScale(scale), fContentStr(strDup(contentStr)), fHandler(handler) {
}
RTSPClient::RequestRecord::RequestRecord(unsigned cseq, responseHandler* handler,
char const* absStartTime, char const* absEndTime, float scale,
MediaSession* session, MediaSubsession* subsession)
: fNext(NULL), fCSeq(cseq), fCommandName("PLAY"), fSession(session), fSubsession(subsession), fBooleanFlags(0),
fStart(0.0f), fEnd(-1.0f), fAbsStartTime(strDup(absStartTime)), fAbsEndTime(strDup(absEndTime)), fScale(scale),
fContentStr(NULL), fHandler(handler) {
}
RTSPClient::RequestRecord::~RequestRecord() {
// Delete the rest of the list first:
delete fNext;
delete[] fAbsStartTime; delete[] fAbsEndTime;
delete[] fContentStr;
}
////////// RTSPClient::RequestQueue implementation //////////
RTSPClient::RequestQueue::RequestQueue()
: fHead(NULL), fTail(NULL) {
}
RTSPClient::RequestQueue::RequestQueue(RequestQueue& origQueue)
: fHead(NULL), fTail(NULL) {
RequestRecord* request;
while ((request = origQueue.dequeue()) != NULL) {
enqueue(request);
}
}
RTSPClient::RequestQueue::~RequestQueue() {
delete fHead;
}
void RTSPClient::RequestQueue::enqueue(RequestRecord* request) {
if (fTail == NULL) {
fHead = request;
} else {
fTail->next() = request;
}
fTail = request;
}
RTSPClient::RequestRecord* RTSPClient::RequestQueue::dequeue() {
RequestRecord* request = fHead;
if (fHead == fTail) {
fHead = NULL;
fTail = NULL;
} else {
fHead = fHead->next();
}
if (request != NULL) request->next() = NULL;
return request;
}
void RTSPClient::RequestQueue::putAtHead(RequestRecord* request) {
request->next() = fHead;
fHead = request;
if (fTail == NULL) {
fTail = request;
}
}
RTSPClient::RequestRecord* RTSPClient::RequestQueue::findByCSeq(unsigned cseq) {
RequestRecord* request;
for (request = fHead; request != NULL; request = request->next()) {
if (request->cseq() == cseq) return request;
}
return NULL;
}
////////// HandlerServerForREGISTERCommand implementation /////////
HandlerServerForREGISTERCommand* HandlerServerForREGISTERCommand
::createNew(UsageEnvironment& env, onRTSPClientCreationFunc* creationFunc, Port ourPort,
UserAuthenticationDatabase* authDatabase, int verbosityLevel, char const* applicationName) {
int ourSocket = setUpOurSocket(env, ourPort);
if (ourSocket == -1) return NULL;
return new HandlerServerForREGISTERCommand(env, creationFunc, ourSocket, ourPort, authDatabase, verbosityLevel, applicationName);
}
HandlerServerForREGISTERCommand
::HandlerServerForREGISTERCommand(UsageEnvironment& env, onRTSPClientCreationFunc* creationFunc, int ourSocket, Port ourPort,
UserAuthenticationDatabase* authDatabase, int verbosityLevel, char const* applicationName)
: RTSPServer(env, ourSocket, ourPort, authDatabase, 30/*small reclamationTestSeconds*/),
fCreationFunc(creationFunc), fVerbosityLevel(verbosityLevel), fApplicationName(strDup(applicationName)) {
}
HandlerServerForREGISTERCommand::~HandlerServerForREGISTERCommand() {
delete[] fApplicationName;
}
RTSPClient* HandlerServerForREGISTERCommand
::createNewRTSPClient(char const* rtspURL, int verbosityLevel, char const* applicationName, int socketNumToServer) {
// Default implementation: create a basic "RTSPClient":
return RTSPClient::createNew(envir(), rtspURL, verbosityLevel, applicationName, 0, socketNumToServer);
}
char const* HandlerServerForREGISTERCommand::allowedCommandNames() {
return "OPTIONS, REGISTER";
}
Boolean HandlerServerForREGISTERCommand::weImplementREGISTER(char const* proxyURLSuffix, char*& responseStr) {
responseStr = NULL;
return True;
}
void HandlerServerForREGISTERCommand::implementCmd_REGISTER(char const* url, char const* urlSuffix, int socketToRemoteServer,
Boolean deliverViaTCP, char const* /*proxyURLSuffix*/) {
// Create a new "RTSPClient" object, and call our 'creation function' with it:
RTSPClient* newRTSPClient = createNewRTSPClient(url, fVerbosityLevel, fApplicationName, socketToRemoteServer);
if (fCreationFunc != NULL) (*fCreationFunc)(newRTSPClient, deliverViaTCP);
}
live/liveMedia/RTSPCommon.cpp 000444 001751 000000 00000030465 12265042432 016313 0 ustar 00rsf wheel 000000 000000 /**********
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the
Free Software Foundation; either version 2.1 of the License, or (at your
option) any later version. (See .)
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
more details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
**********/
// "liveMedia"
// Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved.
// Common routines used by both RTSP clients and servers
// Implementation
#include "RTSPCommon.hh"
#include "Locale.hh"
#include
#include
#include // for "isxdigit()
#include // for "strftime()" and "gmtime()"
#if defined(__WIN32__) || defined(_WIN32) || defined(_QNX4)
#else
#include
#define USE_SIGNALS 1
#endif
static void decodeURL(char* url) {
// Replace (in place) any % sequences with the appropriate 8-bit character.
char* cursor = url;
while (*cursor) {
if ((cursor[0] == '%') &&
cursor[1] && isxdigit(cursor[1]) &&
cursor[2] && isxdigit(cursor[2])) {
// We saw a % followed by 2 hex digits, so we copy the literal hex value into the URL, then advance the cursor past it:
char hex[3];
hex[0] = cursor[1];
hex[1] = cursor[2];
hex[2] = '\0';
*url++ = (char)strtol(hex, NULL, 16);
cursor += 3;
} else {
// Common case: This is a normal character or a bogus % expression, so just copy it
*url++ = *cursor++;
}
}
*url = '\0';
}
Boolean parseRTSPRequestString(char const* reqStr,
unsigned reqStrSize,
char* resultCmdName,
unsigned resultCmdNameMaxSize,
char* resultURLPreSuffix,
unsigned resultURLPreSuffixMaxSize,
char* resultURLSuffix,
unsigned resultURLSuffixMaxSize,
char* resultCSeq,
unsigned resultCSeqMaxSize,
char* resultSessionIdStr,
unsigned resultSessionIdStrMaxSize,
unsigned& contentLength) {
// This parser is currently rather dumb; it should be made smarter #####
// "Be liberal in what you accept": Skip over any whitespace at the start of the request:
unsigned i;
for (i = 0; i < reqStrSize; ++i) {
char c = reqStr[i];
if (!(c == ' ' || c == '\t' || c == '\r' || c == '\n' || c == '\0')) break;
}
if (i == reqStrSize) return False; // The request consisted of nothing but whitespace!
// Then read everything up to the next space (or tab) as the command name:
Boolean parseSucceeded = False;
unsigned i1 = 0;
for (; i1 < resultCmdNameMaxSize-1 && i < reqStrSize; ++i,++i1) {
char c = reqStr[i];
if (c == ' ' || c == '\t') {
parseSucceeded = True;
break;
}
resultCmdName[i1] = c;
}
resultCmdName[i1] = '\0';
if (!parseSucceeded) return False;
// Skip over the prefix of any "rtsp://" or "rtsp:/" URL that follows:
unsigned j = i+1;
while (j < reqStrSize && (reqStr[j] == ' ' || reqStr[j] == '\t')) ++j; // skip over any additional white space
for (; (int)j < (int)(reqStrSize-8); ++j) {
if ((reqStr[j] == 'r' || reqStr[j] == 'R')
&& (reqStr[j+1] == 't' || reqStr[j+1] == 'T')
&& (reqStr[j+2] == 's' || reqStr[j+2] == 'S')
&& (reqStr[j+3] == 'p' || reqStr[j+3] == 'P')
&& reqStr[j+4] == ':' && reqStr[j+5] == '/') {
j += 6;
if (reqStr[j] == '/') {
// This is a "rtsp://" URL; skip over the host:port part that follows:
++j;
while (j < reqStrSize && reqStr[j] != '/' && reqStr[j] != ' ') ++j;
} else {
// This is a "rtsp:/" URL; back up to the "/":
--j;
}
i = j;
break;
}
}
// Look for the URL suffix (before the following "RTSP/"):
parseSucceeded = False;
for (unsigned k = i+1; (int)k < (int)(reqStrSize-5); ++k) {
if (reqStr[k] == 'R' && reqStr[k+1] == 'T' &&
reqStr[k+2] == 'S' && reqStr[k+3] == 'P' && reqStr[k+4] == '/') {
while (--k >= i && reqStr[k] == ' ') {} // go back over all spaces before "RTSP/"
unsigned k1 = k;
while (k1 > i && reqStr[k1] != '/') --k1;
// ASSERT: At this point
// i: first space or slash after "host" or "host:port"
// k: last non-space before "RTSP/"
// k1: last slash in the range [i,k]
// The URL suffix comes from [k1+1,k]
// Copy "resultURLSuffix":
unsigned n = 0, k2 = k1+1;
if (k2 <= k) {
if (k - k1 + 1 > resultURLSuffixMaxSize) return False; // there's no room
while (k2 <= k) resultURLSuffix[n++] = reqStr[k2++];
}
resultURLSuffix[n] = '\0';
// The URL 'pre-suffix' comes from [i+1,k1-1]
// Copy "resultURLPreSuffix":
n = 0; k2 = i+1;
if (k2+1 <= k1) {
if (k1 - i > resultURLPreSuffixMaxSize) return False; // there's no room
while (k2 <= k1-1) resultURLPreSuffix[n++] = reqStr[k2++];
}
resultURLPreSuffix[n] = '\0';
decodeURL(resultURLPreSuffix);
i = k + 7; // to go past " RTSP/"
parseSucceeded = True;
break;
}
}
if (!parseSucceeded) return False;
// Look for "CSeq:" (mandatory, case insensitive), skip whitespace,
// then read everything up to the next \r or \n as 'CSeq':
parseSucceeded = False;
for (j = i; (int)j < (int)(reqStrSize-5); ++j) {
if (_strncasecmp("CSeq:", &reqStr[j], 5) == 0) {
j += 5;
while (j < reqStrSize && (reqStr[j] == ' ' || reqStr[j] == '\t')) ++j;
unsigned n;
for (n = 0; n < resultCSeqMaxSize-1 && j < reqStrSize; ++n,++j) {
char c = reqStr[j];
if (c == '\r' || c == '\n') {
parseSucceeded = True;
break;
}
resultCSeq[n] = c;
}
resultCSeq[n] = '\0';
break;
}
}
if (!parseSucceeded) return False;
// Look for "Session:" (optional, case insensitive), skip whitespace,
// then read everything up to the next \r or \n as 'Session':
resultSessionIdStr[0] = '\0'; // default value (empty string)
for (j = i; (int)j < (int)(reqStrSize-8); ++j) {
if (_strncasecmp("Session:", &reqStr[j], 8) == 0) {
j += 8;
while (j < reqStrSize && (reqStr[j] == ' ' || reqStr[j] == '\t')) ++j;
unsigned n;
for (n = 0; n < resultSessionIdStrMaxSize-1 && j < reqStrSize; ++n,++j) {
char c = reqStr[j];
if (c == '\r' || c == '\n') {
break;
}
resultSessionIdStr[n] = c;
}
resultSessionIdStr[n] = '\0';
break;
}
}
// Also: Look for "Content-Length:" (optional, case insensitive)
contentLength = 0; // default value
for (j = i; (int)j < (int)(reqStrSize-15); ++j) {
if (_strncasecmp("Content-Length:", &(reqStr[j]), 15) == 0) {
j += 15;
while (j < reqStrSize && (reqStr[j] == ' ' || reqStr[j] == '\t')) ++j;
unsigned num;
if (sscanf(&reqStr[j], "%u", &num) == 1) {
contentLength = num;
}
}
}
return True;
}
Boolean parseRangeParam(char const* paramStr, double& rangeStart, double& rangeEnd, char*& absStartTime, char*& absEndTime) {
delete[] absStartTime; delete[] absEndTime;
absStartTime = absEndTime = NULL; // by default, unless "paramStr" is a "clock=..." string
double start, end;
int numCharsMatched = 0;
Locale l("C", Numeric);
if (sscanf(paramStr, "npt = %lf - %lf", &start, &end) == 2) {
rangeStart = start;
rangeEnd = end;
} else if (sscanf(paramStr, "npt = %lf -", &start) == 1) {
if (start < 0.0) {
// special case for "npt = -", which seems to match here:
rangeStart = 0.0;
rangeEnd = -start;
} else {
rangeStart = start;
rangeEnd = 0.0;
}
} else if (strcmp(paramStr, "npt=now-") == 0) {
rangeStart = 0.0;
rangeEnd = 0.0;
} else if (sscanf(paramStr, "clock = %n", &numCharsMatched) == 0 && numCharsMatched > 0) {
rangeStart = rangeEnd = 0.0;
char const* utcTimes = ¶mStr[numCharsMatched];
size_t len = strlen(utcTimes) + 1;
char* as = new char[len];
char* ae = new char[len];
int sscanfResult = sscanf(utcTimes, "%[^-]-%s", as, ae);
if (sscanfResult == 2) {
absStartTime = as;
absEndTime = ae;
} else if (sscanfResult == 1) {
absStartTime = as;
delete[] ae;
} else {
delete[] as; delete[] ae;
return False;
}
} else if (sscanf(paramStr, "smtpe = %n", &numCharsMatched) == 0 && numCharsMatched > 0) {
// We accept "smtpe=" parameters, but currently do not interpret them.
} else {
return False; // The header is malformed
}
return True;
}
Boolean parseRangeHeader(char const* buf, double& rangeStart, double& rangeEnd, char*& absStartTime, char*& absEndTime) {
// First, find "Range:"
while (1) {
if (*buf == '\0') return False; // not found
if (_strncasecmp(buf, "Range: ", 7) == 0) break;
++buf;
}
// Then, run through each of the fields, looking for ones we handle:
char const* fields = buf + 7;
while (*fields == ' ') ++fields;
return parseRangeParam(fields, rangeStart, rangeEnd, absStartTime, absEndTime);
}
Boolean parseScaleHeader(char const* buf, float& scale) {
// Initialize the result parameter to a default value:
scale = 1.0;
// First, find "Scale:"
while (1) {
if (*buf == '\0') return False; // not found
if (_strncasecmp(buf, "Scale:", 6) == 0) break;
++buf;
}
// Then, run through each of the fields, looking for ones we handle:
char const* fields = buf + 6;
while (*fields == ' ') ++fields;
float sc;
if (sscanf(fields, "%f", &sc) == 1) {
scale = sc;
} else {
return False; // The header is malformed
}
return True;
}
// Used to implement "RTSPOptionIsSupported()":
static Boolean isSeparator(char c) { return c == ' ' || c == ',' || c == ';' || c == ':'; }
Boolean RTSPOptionIsSupported(char const* commandName, char const* optionsResponseString) {
do {
if (commandName == NULL || optionsResponseString == NULL) break;
unsigned const commandNameLen = strlen(commandName);
if (commandNameLen == 0) break;
// "optionsResponseString" is assumed to be a list of command names, separated by " " and/or ",", ";", or ":"
// Scan through these, looking for "commandName".
while (1) {
// Skip over separators:
while (*optionsResponseString != '\0' && isSeparator(*optionsResponseString)) ++optionsResponseString;
if (*optionsResponseString == '\0') break;
// At this point, "optionsResponseString" begins with a command name (with perhaps a separator afterwads).
if (strncmp(commandName, optionsResponseString, commandNameLen) == 0) {
// We have at least a partial match here.
optionsResponseString += commandNameLen;
if (*optionsResponseString == '\0' || isSeparator(*optionsResponseString)) return True;
}
// No match. Skip over the rest of the command name:
while (*optionsResponseString != '\0' && !isSeparator(*optionsResponseString)) ++optionsResponseString;
}
} while (0);
return False;
}
char const* dateHeader() {
static char buf[200];
#if !defined(_WIN32_WCE)
time_t tt = time(NULL);
strftime(buf, sizeof buf, "Date: %a, %b %d %Y %H:%M:%S GMT\r\n", gmtime(&tt));
#else
// WinCE apparently doesn't have "time()", "strftime()", or "gmtime()",
// so generate the "Date:" header a different, WinCE-specific way.
// (Thanks to Pierre l'Hussiez for this code)
// RSF: But where is the "Date: " string? This code doesn't look quite right...
SYSTEMTIME SystemTime;
GetSystemTime(&SystemTime);
WCHAR dateFormat[] = L"ddd, MMM dd yyyy";
WCHAR timeFormat[] = L"HH:mm:ss GMT\r\n";
WCHAR inBuf[200];
DWORD locale = LOCALE_NEUTRAL;
int ret = GetDateFormat(locale, 0, &SystemTime,
(LPTSTR)dateFormat, (LPTSTR)inBuf, sizeof inBuf);
inBuf[ret - 1] = ' ';
ret = GetTimeFormat(locale, 0, &SystemTime,
(LPTSTR)timeFormat,
(LPTSTR)inBuf + ret, (sizeof inBuf) - ret);
wcstombs(buf, inBuf, wcslen(inBuf));
#endif
return buf;
}
void ignoreSigPipeOnSocket(int socketNum) {
#ifdef USE_SIGNALS
#ifdef SO_NOSIGPIPE
int set_option = 1;
setsockopt(socketNum, SOL_SOCKET, SO_NOSIGPIPE, &set_option, sizeof set_option);
#else
signal(SIGPIPE, SIG_IGN);
#endif
#endif
}
live/liveMedia/RTCP.cpp 000444 001751 000000 00000101451 12265042432 015114 0 ustar 00rsf wheel 000000 000000 /**********
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the
Free Software Foundation; either version 2.1 of the License, or (at your
option) any later version. (See .)
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
more details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
**********/
// "liveMedia"
// Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved.
// RTCP
// Implementation
#include "RTCP.hh"
#include "GroupsockHelper.hh"
#include "rtcp_from_spec.h"
////////// RTCPMemberDatabase //////////
class RTCPMemberDatabase {
public:
RTCPMemberDatabase(RTCPInstance& ourRTCPInstance)
: fOurRTCPInstance(ourRTCPInstance), fNumMembers(1 /*ourself*/),
fTable(HashTable::create(ONE_WORD_HASH_KEYS)) {
}
virtual ~RTCPMemberDatabase() {
delete fTable;
}
Boolean isMember(unsigned ssrc) const {
return fTable->Lookup((char*)(long)ssrc) != NULL;
}
Boolean noteMembership(unsigned ssrc, unsigned curTimeCount) {
Boolean isNew = !isMember(ssrc);
if (isNew) {
++fNumMembers;
}
// Record the current time, so we can age stale members
fTable->Add((char*)(long)ssrc, (void*)(long)curTimeCount);
return isNew;
}
Boolean remove(unsigned ssrc) {
Boolean wasPresent = fTable->Remove((char*)(long)ssrc);
if (wasPresent) {
--fNumMembers;
}
return wasPresent;
}
unsigned numMembers() const {
return fNumMembers;
}
void reapOldMembers(unsigned threshold);
private:
RTCPInstance& fOurRTCPInstance;
unsigned fNumMembers;
HashTable* fTable;
};
void RTCPMemberDatabase::reapOldMembers(unsigned threshold) {
Boolean foundOldMember;
u_int32_t oldSSRC = 0;
do {
foundOldMember = False;
HashTable::Iterator* iter
= HashTable::Iterator::create(*fTable);
uintptr_t timeCount;
char const* key;
while ((timeCount = (uintptr_t)(iter->next(key))) != 0) {
#ifdef DEBUG
fprintf(stderr, "reap: checking SSRC 0x%lx: %ld (threshold %d)\n", (unsigned long)key, timeCount, threshold);
#endif
if (timeCount < (uintptr_t)threshold) { // this SSRC is old
uintptr_t ssrc = (uintptr_t)key;
oldSSRC = (u_int32_t)ssrc;
foundOldMember = True;
}
}
delete iter;
if (foundOldMember) {
#ifdef DEBUG
fprintf(stderr, "reap: removing SSRC 0x%x\n", oldSSRC);
#endif
fOurRTCPInstance.removeSSRC(oldSSRC, True);
}
} while (foundOldMember);
}
////////// RTCPInstance //////////
static double dTimeNow() {
struct timeval timeNow;
gettimeofday(&timeNow, NULL);
return (double) (timeNow.tv_sec + timeNow.tv_usec/1000000.0);
}
static unsigned const maxRTCPPacketSize = 1450;
// bytes (1500, minus some allowance for IP, UDP, UMTP headers)
static unsigned const preferredPacketSize = 1000; // bytes
RTCPInstance::RTCPInstance(UsageEnvironment& env, Groupsock* RTCPgs,
unsigned totSessionBW,
unsigned char const* cname,
RTPSink* sink, RTPSource const* source,
Boolean isSSMSource)
: Medium(env), fRTCPInterface(this, RTCPgs), fTotSessionBW(totSessionBW),
fSink(sink), fSource(source), fIsSSMSource(isSSMSource),
fCNAME(RTCP_SDES_CNAME, cname), fOutgoingReportCount(1),
fAveRTCPSize(0), fIsInitial(1), fPrevNumMembers(0),
fLastSentSize(0), fLastReceivedSize(0), fLastReceivedSSRC(0),
fTypeOfEvent(EVENT_UNKNOWN), fTypeOfPacket(PACKET_UNKNOWN_TYPE),
fHaveJustSentPacket(False), fLastPacketSentSize(0),
fByeHandlerTask(NULL), fByeHandlerClientData(NULL),
fSRHandlerTask(NULL), fSRHandlerClientData(NULL),
fRRHandlerTask(NULL), fRRHandlerClientData(NULL),
fSpecificRRHandlerTable(NULL) {
#ifdef DEBUG
fprintf(stderr, "RTCPInstance[%p]::RTCPInstance()\n", this);
#endif
if (fTotSessionBW == 0) { // not allowed!
env << "RTCPInstance::RTCPInstance error: totSessionBW parameter should not be zero!\n";
fTotSessionBW = 1;
}
if (isSSMSource) RTCPgs->multicastSendOnly(); // don't receive multicast
double timeNow = dTimeNow();
fPrevReportTime = fNextReportTime = timeNow;
fKnownMembers = new RTCPMemberDatabase(*this);
fInBuf = new unsigned char[maxRTCPPacketSize];
if (fKnownMembers == NULL || fInBuf == NULL) return;
fNumBytesAlreadyRead = 0;
// A hack to save buffer space, because RTCP packets are always small:
unsigned savedMaxSize = OutPacketBuffer::maxSize;
OutPacketBuffer::maxSize = maxRTCPPacketSize;
fOutBuf = new OutPacketBuffer(preferredPacketSize, maxRTCPPacketSize);
OutPacketBuffer::maxSize = savedMaxSize;
if (fOutBuf == NULL) return;
// Arrange to handle incoming reports from others:
TaskScheduler::BackgroundHandlerProc* handler
= (TaskScheduler::BackgroundHandlerProc*)&incomingReportHandler;
fRTCPInterface.startNetworkReading(handler);
// Send our first report.
fTypeOfEvent = EVENT_REPORT;
onExpire(this);
}
struct RRHandlerRecord {
TaskFunc* rrHandlerTask;
void* rrHandlerClientData;
};
RTCPInstance::~RTCPInstance() {
#ifdef DEBUG
fprintf(stderr, "RTCPInstance[%p]::~RTCPInstance()\n", this);
#endif
// Begin by sending a BYE. We have to do this immediately, without
// 'reconsideration', because "this" is going away.
fTypeOfEvent = EVENT_BYE; // not used, but...
sendBYE();
if (fSpecificRRHandlerTable != NULL) {
AddressPortLookupTable::Iterator iter(*fSpecificRRHandlerTable);
RRHandlerRecord* rrHandler;
while ((rrHandler = (RRHandlerRecord*)iter.next()) != NULL) {
delete rrHandler;
}
delete fSpecificRRHandlerTable;
}
delete fKnownMembers;
delete fOutBuf;
delete[] fInBuf;
}
RTCPInstance* RTCPInstance::createNew(UsageEnvironment& env, Groupsock* RTCPgs,
unsigned totSessionBW,
unsigned char const* cname,
RTPSink* sink, RTPSource const* source,
Boolean isSSMSource) {
return new RTCPInstance(env, RTCPgs, totSessionBW, cname, sink, source,
isSSMSource);
}
Boolean RTCPInstance::lookupByName(UsageEnvironment& env,
char const* instanceName,
RTCPInstance*& resultInstance) {
resultInstance = NULL; // unless we succeed
Medium* medium;
if (!Medium::lookupByName(env, instanceName, medium)) return False;
if (!medium->isRTCPInstance()) {
env.setResultMsg(instanceName, " is not a RTCP instance");
return False;
}
resultInstance = (RTCPInstance*)medium;
return True;
}
Boolean RTCPInstance::isRTCPInstance() const {
return True;
}
unsigned RTCPInstance::numMembers() const {
if (fKnownMembers == NULL) return 0;
return fKnownMembers->numMembers();
}
void RTCPInstance::setByeHandler(TaskFunc* handlerTask, void* clientData,
Boolean handleActiveParticipantsOnly) {
fByeHandlerTask = handlerTask;
fByeHandlerClientData = clientData;
fByeHandleActiveParticipantsOnly = handleActiveParticipantsOnly;
}
void RTCPInstance::setSRHandler(TaskFunc* handlerTask, void* clientData) {
fSRHandlerTask = handlerTask;
fSRHandlerClientData = clientData;
}
void RTCPInstance::setRRHandler(TaskFunc* handlerTask, void* clientData) {
fRRHandlerTask = handlerTask;
fRRHandlerClientData = clientData;
}
void RTCPInstance
::setSpecificRRHandler(netAddressBits fromAddress, Port fromPort,
TaskFunc* handlerTask, void* clientData) {
if (handlerTask == NULL && clientData == NULL) {
unsetSpecificRRHandler(fromAddress, fromPort);
return;
}
RRHandlerRecord* rrHandler = new RRHandlerRecord;
rrHandler->rrHandlerTask = handlerTask;
rrHandler->rrHandlerClientData = clientData;
if (fSpecificRRHandlerTable == NULL) {
fSpecificRRHandlerTable = new AddressPortLookupTable;
}
RRHandlerRecord* existingRecord = (RRHandlerRecord*)fSpecificRRHandlerTable->Add(fromAddress, (~0), fromPort, rrHandler);
delete existingRecord; // if any
}
void RTCPInstance
::unsetSpecificRRHandler(netAddressBits fromAddress, Port fromPort) {
if (fSpecificRRHandlerTable == NULL) return;
RRHandlerRecord* rrHandler
= (RRHandlerRecord*)(fSpecificRRHandlerTable->Lookup(fromAddress, (~0), fromPort));
if (rrHandler != NULL) {
fSpecificRRHandlerTable->Remove(fromAddress, (~0), fromPort);
delete rrHandler;
}
}
void RTCPInstance::setStreamSocket(int sockNum,
unsigned char streamChannelId) {
// Turn off background read handling:
fRTCPInterface.stopNetworkReading();
// Switch to RTCP-over-TCP:
fRTCPInterface.setStreamSocket(sockNum, streamChannelId);
// Turn background reading back on:
TaskScheduler::BackgroundHandlerProc* handler
= (TaskScheduler::BackgroundHandlerProc*)&incomingReportHandler;
fRTCPInterface.startNetworkReading(handler);
}
void RTCPInstance::addStreamSocket(int sockNum,
unsigned char streamChannelId) {
// First, turn off background read handling for the default (UDP) socket:
envir().taskScheduler().turnOffBackgroundReadHandling(fRTCPInterface.gs()->socketNum());
// Add the RTCP-over-TCP interface:
fRTCPInterface.addStreamSocket(sockNum, streamChannelId);
// Turn on background reading for this socket (in case it's not on already):
TaskScheduler::BackgroundHandlerProc* handler
= (TaskScheduler::BackgroundHandlerProc*)&incomingReportHandler;
fRTCPInterface.startNetworkReading(handler);
}
static unsigned const IP_UDP_HDR_SIZE = 28;
// overhead (bytes) of IP and UDP hdrs
#define ADVANCE(n) pkt += (n); packetSize -= (n)
void RTCPInstance::incomingReportHandler(RTCPInstance* instance,
int /*mask*/) {
instance->incomingReportHandler1();
}
void RTCPInstance::incomingReportHandler1() {
do {
Boolean callByeHandler = False;
int tcpReadStreamSocketNum = fRTCPInterface.nextTCPReadStreamSocketNum();
unsigned char tcpReadStreamChannelId = fRTCPInterface.nextTCPReadStreamChannelId();
unsigned packetSize = 0;
unsigned numBytesRead;
struct sockaddr_in fromAddress;
Boolean packetReadWasIncomplete;
if (fNumBytesAlreadyRead >= maxRTCPPacketSize) {
envir() << "RTCPInstance error: Hit limit when reading incoming packet over TCP. Increase \"maxRTCPPacketSize\"\n";
break;
}
Boolean readResult
= fRTCPInterface.handleRead(&fInBuf[fNumBytesAlreadyRead], maxRTCPPacketSize - fNumBytesAlreadyRead,
numBytesRead, fromAddress, packetReadWasIncomplete);
if (packetReadWasIncomplete) {
fNumBytesAlreadyRead += numBytesRead;
return; // more reads are needed to get the entire packet
} else { // normal case: We've read the entire packet
packetSize = fNumBytesAlreadyRead + numBytesRead;
fNumBytesAlreadyRead = 0; // for next time
}
if (!readResult) break;
// Ignore the packet if it was looped-back from ourself:
Boolean packetWasFromOurHost = False;
if (RTCPgs()->wasLoopedBackFromUs(envir(), fromAddress)) {
packetWasFromOurHost = True;
// However, we still want to handle incoming RTCP packets from
// *other processes* on the same machine. To distinguish this
// case from a true loop-back, check whether we've just sent a
// packet of the same size. (This check isn't perfect, but it seems
// to be the best we can do.)
if (fHaveJustSentPacket && fLastPacketSentSize == packetSize) {
// This is a true loop-back:
fHaveJustSentPacket = False;
break; // ignore this packet
}
}
unsigned char* pkt = fInBuf;
if (fIsSSMSource && !packetWasFromOurHost) {
// This packet is assumed to have been received via unicast (because we're a SSM source, and SSM receivers send back RTCP "RR"
// packets via unicast). 'Reflect' the packet by resending it to the multicast group, so that any other receivers can also
// get to see it.
// NOTE: Denial-of-service attacks are possible here.
// Users of this software may wish to add their own,
// application-specific mechanism for 'authenticating' the
// validity of this packet before reflecting it.
// NOTE: The test for "!packetWasFromOurHost" means that we won't reflect RTCP packets that come from other processes on
// the same host as us. The reason for this is that the 'packet size' test above is not 100% reliable; some packets
// that were truly looped back from us might not be detected as such, and this might lead to infinite forwarding/receiving
// of some packets. To avoid this possibility, we only reflect RTCP packets that we know for sure originated elsewhere.
// (Note, though, that if we ever re-enable the code in "Groupsock::multicastSendOnly()", then we could remove the test for
// "!packetWasFromOurHost".)
fRTCPInterface.sendPacket(pkt, packetSize);
fHaveJustSentPacket = True;
fLastPacketSentSize = packetSize;
}
#ifdef DEBUG
fprintf(stderr, "[%p]saw incoming RTCP packet", this);
if (tcpReadStreamSocketNum < 0) {
// Note that "fromAddress" is valid only if we're receiving over UDP (not over TCP):
fprintf(stderr, " (from address %s, port %d)", AddressString(fromAddress).val(), ntohs(fromAddress.sin_port));
}
fprintf(stderr, "\n");
for (unsigned i = 0; i < packetSize; ++i) {
if (i%4 == 0) fprintf(stderr, " ");
fprintf(stderr, "%02x", pkt[i]);
}
fprintf(stderr, "\n");
#endif
int totPacketSize = IP_UDP_HDR_SIZE + packetSize;
// Check the RTCP packet for validity:
// It must at least contain a header (4 bytes), and this header
// must be version=2, with no padding bit, and a payload type of
// SR (200) or RR (201):
if (packetSize < 4) break;
unsigned rtcpHdr = ntohl(*(u_int32_t*)pkt);
if ((rtcpHdr & 0xE0FE0000) != (0x80000000 | (RTCP_PT_SR<<16))) {
#ifdef DEBUG
fprintf(stderr, "rejected bad RTCP packet: header 0x%08x\n", rtcpHdr);
#endif
break;
}
// Process each of the individual RTCP 'subpackets' in (what may be)
// a compound RTCP packet.
int typeOfPacket = PACKET_UNKNOWN_TYPE;
unsigned reportSenderSSRC = 0;
Boolean packetOK = False;
while (1) {
unsigned rc = (rtcpHdr>>24)&0x1F;
unsigned pt = (rtcpHdr>>16)&0xFF;
unsigned length = 4*(rtcpHdr&0xFFFF); // doesn't count hdr
ADVANCE(4); // skip over the header
if (length > packetSize) break;
// Assume that each RTCP subpacket begins with a 4-byte SSRC:
if (length < 4) break; length -= 4;
reportSenderSSRC = ntohl(*(u_int32_t*)pkt); ADVANCE(4);
Boolean subPacketOK = False;
switch (pt) {
case RTCP_PT_SR: {
#ifdef DEBUG
fprintf(stderr, "SR\n");
#endif
if (length < 20) break; length -= 20;
// Extract the NTP timestamp, and note this:
unsigned NTPmsw = ntohl(*(u_int32_t*)pkt); ADVANCE(4);
unsigned NTPlsw = ntohl(*(u_int32_t*)pkt); ADVANCE(4);
unsigned rtpTimestamp = ntohl(*(u_int32_t*)pkt); ADVANCE(4);
if (fSource != NULL) {
RTPReceptionStatsDB& receptionStats
= fSource->receptionStatsDB();
receptionStats.noteIncomingSR(reportSenderSSRC,
NTPmsw, NTPlsw, rtpTimestamp);
}
ADVANCE(8); // skip over packet count, octet count
// If a 'SR handler' was set, call it now:
if (fSRHandlerTask != NULL) (*fSRHandlerTask)(fSRHandlerClientData);
// The rest of the SR is handled like a RR (so, no "break;" here)
}
case RTCP_PT_RR: {
#ifdef DEBUG
fprintf(stderr, "RR\n");
#endif
unsigned reportBlocksSize = rc*(6*4);
if (length < reportBlocksSize) break;
length -= reportBlocksSize;
if (fSink != NULL) {
// Use this information to update stats about our transmissions:
RTPTransmissionStatsDB& transmissionStats = fSink->transmissionStatsDB();
for (unsigned i = 0; i < rc; ++i) {
unsigned senderSSRC = ntohl(*(u_int32_t*)pkt); ADVANCE(4);
// We care only about reports about our own transmission, not others'
if (senderSSRC == fSink->SSRC()) {
unsigned lossStats = ntohl(*(u_int32_t*)pkt); ADVANCE(4);
unsigned highestReceived = ntohl(*(u_int32_t*)pkt); ADVANCE(4);
unsigned jitter = ntohl(*(u_int32_t*)pkt); ADVANCE(4);
unsigned timeLastSR = ntohl(*(u_int32_t*)pkt); ADVANCE(4);
unsigned timeSinceLastSR = ntohl(*(u_int32_t*)pkt); ADVANCE(4);
transmissionStats.noteIncomingRR(reportSenderSSRC, fromAddress,
lossStats,
highestReceived, jitter,
timeLastSR, timeSinceLastSR);
} else {
ADVANCE(4*5);
}
}
} else {
ADVANCE(reportBlocksSize);
}
if (pt == RTCP_PT_RR) { // i.e., we didn't fall through from 'SR'
// If a 'RR handler' was set, call it now:
// Specific RR handler:
if (fSpecificRRHandlerTable != NULL) {
netAddressBits fromAddr;
portNumBits fromPortNum;
if (tcpReadStreamSocketNum < 0) {
// Normal case: We read the RTCP packet over UDP
fromAddr = fromAddress.sin_addr.s_addr;
fromPortNum = ntohs(fromAddress.sin_port);
} else {
// Special case: We read the RTCP packet over TCP (interleaved)
// Hack: Use the TCP socket and channel id to look up the handler
fromAddr = tcpReadStreamSocketNum;
fromPortNum = tcpReadStreamChannelId;
}
Port fromPort(fromPortNum);
RRHandlerRecord* rrHandler
= (RRHandlerRecord*)(fSpecificRRHandlerTable->Lookup(fromAddr, (~0), fromPort));
if (rrHandler != NULL) {
if (rrHandler->rrHandlerTask != NULL) {
(*(rrHandler->rrHandlerTask))(rrHandler->rrHandlerClientData);
}
}
}
// General RR handler:
if (fRRHandlerTask != NULL) (*fRRHandlerTask)(fRRHandlerClientData);
}
subPacketOK = True;
typeOfPacket = PACKET_RTCP_REPORT;
break;
}
case RTCP_PT_BYE: {
#ifdef DEBUG
fprintf(stderr, "BYE\n");
#endif
// If a 'BYE handler' was set, arrange for it to be called at the end of this routine.
// (Note: We don't call it immediately, in case it happens to cause "this" to be deleted.)
if (fByeHandlerTask != NULL
&& (!fByeHandleActiveParticipantsOnly
|| (fSource != NULL
&& fSource->receptionStatsDB().lookup(reportSenderSSRC) != NULL)
|| (fSink != NULL
&& fSink->transmissionStatsDB().lookup(reportSenderSSRC) != NULL))) {
callByeHandler = True;
}
// We should really check for & handle >1 SSRCs being present #####
subPacketOK = True;
typeOfPacket = PACKET_BYE;
break;
}
// Later handle SDES, APP, and compound RTCP packets #####
default:
#ifdef DEBUG
fprintf(stderr, "UNSUPPORTED TYPE(0x%x)\n", pt);
#endif
subPacketOK = True;
break;
}
if (!subPacketOK) break;
// need to check for (& handle) SSRC collision! #####
#ifdef DEBUG
fprintf(stderr, "validated RTCP subpacket (type %d): %d, %d, %d, 0x%08x\n", typeOfPacket, rc, pt, length, reportSenderSSRC);
#endif
// Skip over any remaining bytes in this subpacket:
ADVANCE(length);
// Check whether another RTCP 'subpacket' follows:
if (packetSize == 0) {
packetOK = True;
break;
} else if (packetSize < 4) {
#ifdef DEBUG
fprintf(stderr, "extraneous %d bytes at end of RTCP packet!\n", packetSize);
#endif
break;
}
rtcpHdr = ntohl(*(u_int32_t*)pkt);
if ((rtcpHdr & 0xC0000000) != 0x80000000) {
#ifdef DEBUG
fprintf(stderr, "bad RTCP subpacket: header 0x%08x\n", rtcpHdr);
#endif
break;
}
}
if (!packetOK) {
#ifdef DEBUG
fprintf(stderr, "rejected bad RTCP subpacket: header 0x%08x\n", rtcpHdr);
#endif
break;
} else {
#ifdef DEBUG
fprintf(stderr, "validated entire RTCP packet\n");
#endif
}
onReceive(typeOfPacket, totPacketSize, reportSenderSSRC);
// Finally, if we need to call a "BYE" handler, do so now (in case it causes "this" to get deleted):
if (callByeHandler && fByeHandlerTask != NULL/*sanity check*/) {
TaskFunc* byeHandler = fByeHandlerTask;
fByeHandlerTask = NULL; // because we call the handler only once, by default
(*byeHandler)(fByeHandlerClientData);
}
} while (0);
}
void RTCPInstance::onReceive(int typeOfPacket, int totPacketSize,
unsigned ssrc) {
fTypeOfPacket = typeOfPacket;
fLastReceivedSize = totPacketSize;
fLastReceivedSSRC = ssrc;
int members = (int)numMembers();
int senders = (fSink != NULL) ? 1 : 0;
OnReceive(this, // p
this, // e
&members, // members
&fPrevNumMembers, // pmembers
&senders, // senders
&fAveRTCPSize, // avg_rtcp_size
&fPrevReportTime, // tp
dTimeNow(), // tc
fNextReportTime);
}
void RTCPInstance::sendReport() {
#ifdef DEBUG
fprintf(stderr, "sending REPORT\n");
#endif
// Begin by including a SR and/or RR report:
if (!addReport()) return;
// Then, include a SDES:
addSDES();
// Send the report:
sendBuiltPacket();
// Periodically clean out old members from our SSRC membership database:
const unsigned membershipReapPeriod = 5;
if ((++fOutgoingReportCount) % membershipReapPeriod == 0) {
unsigned threshold = fOutgoingReportCount - membershipReapPeriod;
fKnownMembers->reapOldMembers(threshold);
}
}
void RTCPInstance::sendBYE() {
#ifdef DEBUG
fprintf(stderr, "sending BYE\n");
#endif
// The packet must begin with a SR and/or RR report:
(void)addReport(True);
addBYE();
sendBuiltPacket();
}
void RTCPInstance::sendBuiltPacket() {
#ifdef DEBUG
fprintf(stderr, "sending RTCP packet\n");
unsigned char* p = fOutBuf->packet();
for (unsigned i = 0; i < fOutBuf->curPacketSize(); ++i) {
if (i%4 == 0) fprintf(stderr," ");
fprintf(stderr, "%02x", p[i]);
}
fprintf(stderr, "\n");
#endif
unsigned reportSize = fOutBuf->curPacketSize();
fRTCPInterface.sendPacket(fOutBuf->packet(), reportSize);
fOutBuf->resetOffset();
fLastSentSize = IP_UDP_HDR_SIZE + reportSize;
fHaveJustSentPacket = True;
fLastPacketSentSize = reportSize;
}
int RTCPInstance::checkNewSSRC() {
return fKnownMembers->noteMembership(fLastReceivedSSRC,
fOutgoingReportCount);
}
void RTCPInstance::removeLastReceivedSSRC() {
removeSSRC(fLastReceivedSSRC, False/*keep stats around*/);
}
void RTCPInstance::removeSSRC(u_int32_t ssrc, Boolean alsoRemoveStats) {
fKnownMembers->remove(ssrc);
if (alsoRemoveStats) {
// Also, remove records of this SSRC from any reception or transmission stats
if (fSource != NULL) fSource->receptionStatsDB().removeRecord(ssrc);
if (fSink != NULL) fSink->transmissionStatsDB().removeRecord(ssrc);
}
}
void RTCPInstance::onExpire(RTCPInstance* instance) {
instance->onExpire1();
}
// Member functions to build specific kinds of report:
Boolean RTCPInstance::addReport(Boolean alwaysAdd) {
// Include a SR or a RR, depending on whether we have an associated sink or source:
if (fSink != NULL) {
if (!alwaysAdd) {
if (!fSink->enableRTCPReports()) return False;
// Hack: Don't send a SR during those (brief) times when the timestamp of the
// next outgoing RTP packet has been preset, to ensure that that timestamp gets
// used for that outgoing packet. (David Bertrand, 2006.07.18)
if (fSink->nextTimestampHasBeenPreset()) return False;
}
addSR();
} else if (fSource != NULL) {
if (!alwaysAdd) {
if (!fSource->enableRTCPReports()) return False;
}
addRR();
}
return True;
}
void RTCPInstance::addSR() {
// ASSERT: fSink != NULL
enqueueCommonReportPrefix(RTCP_PT_SR, fSink->SSRC(),
5 /* extra words in a SR */);
// Now, add the 'sender info' for our sink
// Insert the NTP and RTP timestamps for the 'wallclock time':
struct timeval timeNow;
gettimeofday(&timeNow, NULL);
fOutBuf->enqueueWord(timeNow.tv_sec + 0x83AA7E80);
// NTP timestamp most-significant word (1970 epoch -> 1900 epoch)
double fractionalPart = (timeNow.tv_usec/15625.0)*0x04000000; // 2^32/10^6
fOutBuf->enqueueWord((unsigned)(fractionalPart+0.5));
// NTP timestamp least-significant word
unsigned rtpTimestamp = fSink->convertToRTPTimestamp(timeNow);
fOutBuf->enqueueWord(rtpTimestamp); // RTP ts
// Insert the packet and byte counts:
fOutBuf->enqueueWord(fSink->packetCount());
fOutBuf->enqueueWord(fSink->octetCount());
enqueueCommonReportSuffix();
}
void RTCPInstance::addRR() {
// ASSERT: fSource != NULL
enqueueCommonReportPrefix(RTCP_PT_RR, fSource->SSRC());
enqueueCommonReportSuffix();
}
void RTCPInstance::enqueueCommonReportPrefix(unsigned char packetType,
unsigned SSRC,
unsigned numExtraWords) {
unsigned numReportingSources;
if (fSource == NULL) {
numReportingSources = 0; // we don't receive anything
} else {
RTPReceptionStatsDB& allReceptionStats
= fSource->receptionStatsDB();
numReportingSources = allReceptionStats.numActiveSourcesSinceLastReset();
// This must be <32, to fit in 5 bits:
if (numReportingSources >= 32) { numReportingSources = 32; }
// Later: support adding more reports to handle >32 sources (unlikely)#####
}
unsigned rtcpHdr = 0x80000000; // version 2, no padding
rtcpHdr |= (numReportingSources<<24);
rtcpHdr |= (packetType<<16);
rtcpHdr |= (1 + numExtraWords + 6*numReportingSources);
// each report block is 6 32-bit words long
fOutBuf->enqueueWord(rtcpHdr);
fOutBuf->enqueueWord(SSRC);
}
void RTCPInstance::enqueueCommonReportSuffix() {
// Output the report blocks for each source:
if (fSource != NULL) {
RTPReceptionStatsDB& allReceptionStats
= fSource->receptionStatsDB();
RTPReceptionStatsDB::Iterator iterator(allReceptionStats);
while (1) {
RTPReceptionStats* receptionStats = iterator.next();
if (receptionStats == NULL) break;
enqueueReportBlock(receptionStats);
}
allReceptionStats.reset(); // because we have just generated a report
}
}
void
RTCPInstance::enqueueReportBlock(RTPReceptionStats* stats) {
fOutBuf->enqueueWord(stats->SSRC());
unsigned highestExtSeqNumReceived = stats->highestExtSeqNumReceived();
unsigned totNumExpected
= highestExtSeqNumReceived - stats->baseExtSeqNumReceived();
int totNumLost = totNumExpected - stats->totNumPacketsReceived();
// 'Clamp' this loss number to a 24-bit signed value:
if (totNumLost > 0x007FFFFF) {
totNumLost = 0x007FFFFF;
} else if (totNumLost < 0) {
if (totNumLost < -0x00800000) totNumLost = 0x00800000; // unlikely, but...
totNumLost &= 0x00FFFFFF;
}
unsigned numExpectedSinceLastReset
= highestExtSeqNumReceived - stats->lastResetExtSeqNumReceived();
int numLostSinceLastReset
= numExpectedSinceLastReset - stats->numPacketsReceivedSinceLastReset();
unsigned char lossFraction;
if (numExpectedSinceLastReset == 0 || numLostSinceLastReset < 0) {
lossFraction = 0;
} else {
lossFraction = (unsigned char)
((numLostSinceLastReset << 8) / numExpectedSinceLastReset);
}
fOutBuf->enqueueWord((lossFraction<<24) | totNumLost);
fOutBuf->enqueueWord(highestExtSeqNumReceived);
fOutBuf->enqueueWord(stats->jitter());
unsigned NTPmsw = stats->lastReceivedSR_NTPmsw();
unsigned NTPlsw = stats->lastReceivedSR_NTPlsw();
unsigned LSR = ((NTPmsw&0xFFFF)<<16)|(NTPlsw>>16); // middle 32 bits
fOutBuf->enqueueWord(LSR);
// Figure out how long has elapsed since the last SR rcvd from this src:
struct timeval const& LSRtime = stats->lastReceivedSR_time(); // "last SR"
struct timeval timeNow, timeSinceLSR;
gettimeofday(&timeNow, NULL);
if (timeNow.tv_usec < LSRtime.tv_usec) {
timeNow.tv_usec += 1000000;
timeNow.tv_sec -= 1;
}
timeSinceLSR.tv_sec = timeNow.tv_sec - LSRtime.tv_sec;
timeSinceLSR.tv_usec = timeNow.tv_usec - LSRtime.tv_usec;
// The enqueued time is in units of 1/65536 seconds.
// (Note that 65536/1000000 == 1024/15625)
unsigned DLSR;
if (LSR == 0) {
DLSR = 0;
} else {
DLSR = (timeSinceLSR.tv_sec<<16)
| ( (((timeSinceLSR.tv_usec<<11)+15625)/31250) & 0xFFFF);
}
fOutBuf->enqueueWord(DLSR);
}
void RTCPInstance::addSDES() {
// For now we support only the CNAME item; later support more #####
// Begin by figuring out the size of the entire SDES report:
unsigned numBytes = 4;
// counts the SSRC, but not the header; it'll get subtracted out
numBytes += fCNAME.totalSize(); // includes id and length
numBytes += 1; // the special END item
unsigned num4ByteWords = (numBytes + 3)/4;
unsigned rtcpHdr = 0x81000000; // version 2, no padding, 1 SSRC chunk
rtcpHdr |= (RTCP_PT_SDES<<16);
rtcpHdr |= num4ByteWords;
fOutBuf->enqueueWord(rtcpHdr);
if (fSource != NULL) {
fOutBuf->enqueueWord(fSource->SSRC());
} else if (fSink != NULL) {
fOutBuf->enqueueWord(fSink->SSRC());
}
// Add the CNAME:
fOutBuf->enqueue(fCNAME.data(), fCNAME.totalSize());
// Add the 'END' item (i.e., a zero byte), plus any more needed to pad:
unsigned numPaddingBytesNeeded = 4 - (fOutBuf->curPacketSize() % 4);
unsigned char const zero = '\0';
while (numPaddingBytesNeeded-- > 0) fOutBuf->enqueue(&zero, 1);
}
void RTCPInstance::addBYE() {
unsigned rtcpHdr = 0x81000000; // version 2, no padding, 1 SSRC
rtcpHdr |= (RTCP_PT_BYE<<16);
rtcpHdr |= 1; // 2 32-bit words total (i.e., with 1 SSRC)
fOutBuf->enqueueWord(rtcpHdr);
if (fSource != NULL) {
fOutBuf->enqueueWord(fSource->SSRC());
} else if (fSink != NULL) {
fOutBuf->enqueueWord(fSink->SSRC());
}
}
void RTCPInstance::schedule(double nextTime) {
fNextReportTime = nextTime;
double secondsToDelay = nextTime - dTimeNow();
if (secondsToDelay < 0) secondsToDelay = 0;
#ifdef DEBUG
fprintf(stderr, "schedule(%f->%f)\n", secondsToDelay, nextTime);
#endif
int64_t usToGo = (int64_t)(secondsToDelay * 1000000);
nextTask() = envir().taskScheduler().scheduleDelayedTask(usToGo,
(TaskFunc*)RTCPInstance::onExpire, this);
}
void RTCPInstance::reschedule(double nextTime) {
envir().taskScheduler().unscheduleDelayedTask(nextTask());
schedule(nextTime);
}
void RTCPInstance::onExpire1() {
// Note: fTotSessionBW is kbits per second
double rtcpBW = 0.05*fTotSessionBW*1024/8; // -> bytes per second
OnExpire(this, // event
numMembers(), // members
(fSink != NULL) ? 1 : 0, // senders
rtcpBW, // rtcp_bw
(fSink != NULL) ? 1 : 0, // we_sent
&fAveRTCPSize, // ave_rtcp_size
&fIsInitial, // initial
dTimeNow(), // tc
&fPrevReportTime, // tp
&fPrevNumMembers // pmembers
);
}
////////// SDESItem //////////
SDESItem::SDESItem(unsigned char tag, unsigned char const* value) {
unsigned length = strlen((char const*)value);
if (length > 0xFF) length = 0xFF; // maximum data length for a SDES item
fData[0] = tag;
fData[1] = (unsigned char)length;
memmove(&fData[2], value, length);
}
unsigned SDESItem::totalSize() const {
return 2 + (unsigned)fData[1];
}
////////// Implementation of routines imported by the "rtcp_from_spec" C code
extern "C" void Schedule(double nextTime, event e) {
RTCPInstance* instance = (RTCPInstance*)e;
if (instance == NULL) return;
instance->schedule(nextTime);
}
extern "C" void Reschedule(double nextTime, event e) {
RTCPInstance* instance = (RTCPInstance*)e;
if (instance == NULL) return;
instance->reschedule(nextTime);
}
extern "C" void SendRTCPReport(event e) {
RTCPInstance* instance = (RTCPInstance*)e;
if (instance == NULL) return;
instance->sendReport();
}
extern "C" void SendBYEPacket(event e) {
RTCPInstance* instance = (RTCPInstance*)e;
if (instance == NULL) return;
instance->sendBYE();
}
extern "C" int TypeOfEvent(event e) {
RTCPInstance* instance = (RTCPInstance*)e;
if (instance == NULL) return EVENT_UNKNOWN;
return instance->typeOfEvent();
}
extern "C" int SentPacketSize(event e) {
RTCPInstance* instance = (RTCPInstance*)e;
if (instance == NULL) return 0;
return instance->sentPacketSize();
}
extern "C" int PacketType(packet p) {
RTCPInstance* instance = (RTCPInstance*)p;
if (instance == NULL) return PACKET_UNKNOWN_TYPE;
return instance->packetType();
}
extern "C" int ReceivedPacketSize(packet p) {
RTCPInstance* instance = (RTCPInstance*)p;
if (instance == NULL) return 0;
return instance->receivedPacketSize();
}
extern "C" int NewMember(packet p) {
RTCPInstance* instance = (RTCPInstance*)p;
if (instance == NULL) return 0;
return instance->checkNewSSRC();
}
extern "C" int NewSender(packet /*p*/) {
return 0; // we don't yet recognize senders other than ourselves #####
}
extern "C" void AddMember(packet /*p*/) {
// Do nothing; all of the real work was done when NewMember() was called
}
extern "C" void AddSender(packet /*p*/) {
// we don't yet recognize senders other than ourselves #####
}
extern "C" void RemoveMember(packet p) {
RTCPInstance* instance = (RTCPInstance*)p;
if (instance == NULL) return;
instance->removeLastReceivedSSRC();
}
extern "C" void RemoveSender(packet /*p*/) {
// we don't yet recognize senders other than ourselves #####
}
extern "C" double drand30() {
unsigned tmp = our_random()&0x3FFFFFFF; // a random 30-bit integer
return tmp/(double)(1024*1024*1024);
}
live/liveMedia/EBMLNumber.hh 000444 001751 000000 00000011247 12265042432 016054 0 ustar 00rsf wheel 000000 000000 /**********
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the
Free Software Foundation; either version 2.1 of the License, or (at your
option) any later version. (See .)
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
more details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
**********/
// "liveMedia"
// Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved.
// EBML numbers (ids and sizes)
// C++ header
#ifndef _EBML_NUMBER_HH
#define _EBML_NUMBER_HH
#include "NetCommon.h"
#include "Boolean.hh"
#include
#define EBML_NUMBER_MAX_LEN 8
class EBMLNumber {
public:
EBMLNumber(Boolean stripLeading1 = True);
virtual ~EBMLNumber();
u_int64_t val() const;
char* hexString() const; // used for debugging
Boolean operator==(u_int64_t arg2) const { return val() == arg2; }
Boolean operator!=(u_int64_t arg2) const { return !(*this == arg2); }
public:
Boolean stripLeading1;
unsigned len;
u_int8_t data[EBML_NUMBER_MAX_LEN];
};
// Definitions of some Matroska/EBML IDs (including the ones that we check for):
#define MATROSKA_ID_EBML 0x1A45DFA3
#define MATROSKA_ID_VOID 0xEC
#define MATROSKA_ID_CRC_32 0xBF
#define MATROSKA_ID_SEGMENT 0x18538067
#define MATROSKA_ID_SEEK_HEAD 0x114D9B74
#define MATROSKA_ID_SEEK 0x4DBB
#define MATROSKA_ID_SEEK_ID 0x53AB
#define MATROSKA_ID_SEEK_POSITION 0x53AC
#define MATROSKA_ID_INFO 0x1549A966
#define MATROSKA_ID_SEGMENT_UID 0x73A4
#define MATROSKA_ID_TIMECODE_SCALE 0x2AD7B1
#define MATROSKA_ID_DURATION 0x4489
#define MATROSKA_ID_DATE_UTC 0x4461
#define MATROSKA_ID_TITLE 0x7BA9
#define MATROSKA_ID_MUXING_APP 0x4D80
#define MATROSKA_ID_WRITING_APP 0x5741
#define MATROSKA_ID_CLUSTER 0x1F43B675
#define MATROSKA_ID_TIMECODE 0xE7
#define MATROSKA_ID_POSITION 0xA7
#define MATROSKA_ID_PREV_SIZE 0xAB
#define MATROSKA_ID_SIMPLEBLOCK 0xA3
#define MATROSKA_ID_BLOCK_GROUP 0xA0
#define MATROSKA_ID_BLOCK 0xA1
#define MATROSKA_ID_BLOCK_DURATION 0x9B
#define MATROSKA_ID_REFERENCE_BLOCK 0xFB
#define MATROSKA_ID_TRACKS 0x1654AE6B
#define MATROSKA_ID_TRACK_ENTRY 0xAE
#define MATROSKA_ID_TRACK_NUMBER 0xD7
#define MATROSKA_ID_TRACK_UID 0x73C5
#define MATROSKA_ID_TRACK_TYPE 0x83
#define MATROSKA_ID_FLAG_ENABLED 0xB9
#define MATROSKA_ID_FLAG_DEFAULT 0x88
#define MATROSKA_ID_FLAG_FORCED 0x55AA
#define MATROSKA_ID_FLAG_LACING 0x9C
#define MATROSKA_ID_MIN_CACHE 0x6DE7
#define MATROSKA_ID_DEFAULT_DURATION 0x23E383
#define MATROSKA_ID_TRACK_TIMECODE_SCALE 0x23314F
#define MATROSKA_ID_MAX_BLOCK_ADDITION_ID 0x55EE
#define MATROSKA_ID_NAME 0x536E
#define MATROSKA_ID_LANGUAGE 0x22B59C
#define MATROSKA_ID_CODEC 0x86
#define MATROSKA_ID_CODEC_PRIVATE 0x63A2
#define MATROSKA_ID_CODEC_NAME 0x258688
#define MATROSKA_ID_CODEC_DECODE_ALL 0xAA
#define MATROSKA_ID_VIDEO 0xE0
#define MATROSKA_ID_FLAG_INTERLACED 0x9A
#define MATROSKA_ID_PIXEL_WIDTH 0xB0
#define MATROSKA_ID_PIXEL_HEIGHT 0xBA
#define MATROSKA_ID_DISPLAY_WIDTH 0x54B0
#define MATROSKA_ID_DISPLAY_HEIGHT 0x54BA
#define MATROSKA_ID_DISPLAY_UNIT 0x54B2
#define MATROSKA_ID_AUDIO 0xE1
#define MATROSKA_ID_SAMPLING_FREQUENCY 0xB5
#define MATROSKA_ID_OUTPUT_SAMPLING_FREQUENCY 0x78B5
#define MATROSKA_ID_CHANNELS 0x9F
#define MATROSKA_ID_BIT_DEPTH 0x6264
#define MATROSKA_ID_CONTENT_ENCODINGS 0x6D80
#define MATROSKA_ID_CONTENT_ENCODING 0x6240
#define MATROSKA_ID_CONTENT_COMPRESSION 0x5034
#define MATROSKA_ID_CONTENT_COMP_ALGO 0x4254
#define MATROSKA_ID_CONTENT_COMP_SETTINGS 0x4255
#define MATROSKA_ID_CONTENT_ENCRYPTION 0x5035
#define MATROSKA_ID_ATTACHMENTS 0x1941A469
#define MATROSKA_ID_ATTACHED_FILE 0x61A7
#define MATROSKA_ID_FILE_DESCRIPTION 0x467E
#define MATROSKA_ID_FILE_NAME 0x466E
#define MATROSKA_ID_FILE_MIME_TYPE 0x4660
#define MATROSKA_ID_FILE_DATA 0x465C
#define MATROSKA_ID_FILE_UID 0x46AE
#define MATROSKA_ID_CUES 0x1C53BB6B
#define MATROSKA_ID_CUE_POINT 0xBB
#define MATROSKA_ID_CUE_TIME 0xB3
#define MATROSKA_ID_CUE_TRACK_POSITIONS 0xB7
#define MATROSKA_ID_CUE_TRACK 0xF7
#define MATROSKA_ID_CUE_CLUSTER_POSITION 0xF1
#define MATROSKA_ID_CUE_BLOCK_NUMBER 0x5378
#define MATROSKA_ID_TAGS 0x1254C367
class EBMLId: public EBMLNumber {
public:
EBMLId();
virtual ~EBMLId();
char const* stringName() const; // used for debugging
};
class EBMLDataSize: public EBMLNumber {
public:
EBMLDataSize();
virtual ~EBMLDataSize();
};
#endif
live/liveMedia/QuickTimeGenericRTPSource.cpp 000444 001751 000000 00000023044 12265042432 021304 0 ustar 00rsf wheel 000000 000000 /**********
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the
Free Software Foundation; either version 2.1 of the License, or (at your
option) any later version. (See .)
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
more details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
**********/
// "liveMedia"
// Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved.
// RTP Sources containing generic QuickTime stream data, as defined in
//
// Implementation
#include "QuickTimeGenericRTPSource.hh"
///// QTGenericBufferedPacket and QTGenericBufferedPacketFactory /////
// A subclass of BufferedPacket, used to separate out
// individual frames (when PCK == 2)
class QTGenericBufferedPacket: public BufferedPacket {
public:
QTGenericBufferedPacket(QuickTimeGenericRTPSource& ourSource);
virtual ~QTGenericBufferedPacket();
private: // redefined virtual functions
virtual unsigned nextEnclosedFrameSize(unsigned char*& framePtr,
unsigned dataSize);
private:
QuickTimeGenericRTPSource& fOurSource;
};
class QTGenericBufferedPacketFactory: public BufferedPacketFactory {
private: // redefined virtual functions
virtual BufferedPacket* createNewPacket(MultiFramedRTPSource* ourSource);
};
////////// QuickTimeGenericRTPSource //////////
QuickTimeGenericRTPSource*
QuickTimeGenericRTPSource::createNew(UsageEnvironment& env,
Groupsock* RTPgs,
unsigned char rtpPayloadFormat,
unsigned rtpTimestampFrequency,
char const* mimeTypeString) {
return new QuickTimeGenericRTPSource(env, RTPgs, rtpPayloadFormat,
rtpTimestampFrequency,
mimeTypeString);
}
QuickTimeGenericRTPSource
::QuickTimeGenericRTPSource(UsageEnvironment& env, Groupsock* RTPgs,
unsigned char rtpPayloadFormat,
unsigned rtpTimestampFrequency,
char const* mimeTypeString)
: MultiFramedRTPSource(env, RTPgs,
rtpPayloadFormat, rtpTimestampFrequency,
new QTGenericBufferedPacketFactory),
fMIMEtypeString(strDup(mimeTypeString)) {
qtState.PCK = 0;
qtState.timescale = 0;
qtState.sdAtom = NULL;
qtState.sdAtomSize = qtState.width = qtState.height = 0;
}
QuickTimeGenericRTPSource::~QuickTimeGenericRTPSource() {
delete[] qtState.sdAtom;
delete[] (char*)fMIMEtypeString;
}
Boolean QuickTimeGenericRTPSource
::processSpecialHeader(BufferedPacket* packet,
unsigned& resultSpecialHeaderSize) {
unsigned char* headerStart = packet->data();
unsigned packetSize = packet->dataSize();
// The "QuickTime Header" must be at least 4 bytes in size:
// Extract the known fields from the first 4 bytes:
unsigned expectedHeaderSize = 4;
if (packetSize < expectedHeaderSize) return False;
unsigned char VER = (headerStart[0]&0xF0)>>4;
if (VER > 1) return False; // unknown header version
qtState.PCK = (headerStart[0]&0x0C)>>2;
#ifdef DEBUG
Boolean S = (headerStart[0]&0x02) != 0;
#endif
Boolean Q = (headerStart[0]&0x01) != 0;
Boolean L = (headerStart[1]&0x80) != 0;
#ifdef DEBUG
Boolean D = (headerStart[2]&0x80) != 0;
unsigned short payloadId = ((headerStart[2]&0x7F)<<8)|headerStart[3];
#endif
headerStart += 4;
#ifdef DEBUG
fprintf(stderr, "PCK: %d, S: %d, Q: %d, L: %d, D: %d, payloadId: %d\n", qtState.PCK, S, Q, L, D, payloadId);
#endif
if (Q) { // A "QuickTime Payload Description" follows
expectedHeaderSize += 4;
if (packetSize < expectedHeaderSize) return False;
#ifdef DEBUG
Boolean K = (headerStart[0]&0x80) != 0;
Boolean F = (headerStart[0]&0x40) != 0;
Boolean A = (headerStart[0]&0x20) != 0;
Boolean Z = (headerStart[0]&0x10) != 0;
#endif
unsigned payloadDescriptionLength = (headerStart[2]<<8)|headerStart[3];
headerStart += 4;
#ifdef DEBUG
fprintf(stderr, "\tK: %d, F: %d, A: %d, Z: %d, payloadDescriptionLength: %d\n", K, F, A, Z, payloadDescriptionLength);
#endif
// Make sure "payloadDescriptionLength" is valid
if (payloadDescriptionLength < 12) return False;
expectedHeaderSize += (payloadDescriptionLength - 4);
unsigned nonPaddedSize = expectedHeaderSize;
expectedHeaderSize += 3;
expectedHeaderSize -= expectedHeaderSize%4; // adds padding
if (packetSize < expectedHeaderSize) return False;
unsigned char padding = expectedHeaderSize - nonPaddedSize;
#ifdef DEBUG
unsigned mediaType = (headerStart[0]<<24)|(headerStart[1]<<16)
|(headerStart[2]<<8)|headerStart[3];
#endif
qtState.timescale = (headerStart[4]<<24)|(headerStart[5]<<16)
|(headerStart[6]<<8)|headerStart[7];
headerStart += 8;
payloadDescriptionLength -= 12;
#ifdef DEBUG
fprintf(stderr, "\tmediaType: '%c%c%c%c', timescale: %d, %d bytes of TLVs left\n", mediaType>>24, (mediaType&0xFF0000)>>16, (mediaType&0xFF00)>>8, mediaType&0xFF, qtState.timescale, payloadDescriptionLength);
#endif
while (payloadDescriptionLength > 3) {
unsigned short tlvLength = (headerStart[0]<<8)|headerStart[1];
unsigned short tlvType = (headerStart[2]<<8)|headerStart[3];
payloadDescriptionLength -= 4;
if (tlvLength > payloadDescriptionLength) return False; // bad TLV
headerStart += 4;
#ifdef DEBUG
fprintf(stderr, "\t\tTLV '%c%c', length %d, leaving %d remaining bytes\n", tlvType>>8, tlvType&0xFF, tlvLength, payloadDescriptionLength - tlvLength);
for (int i = 0; i < tlvLength; ++i) fprintf(stderr, "%02x:", headerStart[i]); fprintf(stderr, "\n");
#endif
// Check for 'TLV's that we can use for our 'qtState'
switch (tlvType) {
case ('s'<<8|'d'): { // session description atom
// Sanity check: the first 4 bytes of this must equal "tlvLength":
unsigned atomLength = (headerStart[0]<<24)|(headerStart[1]<<16)
|(headerStart[2]<<8)|(headerStart[3]);
if (atomLength != (unsigned)tlvLength) break;
delete[] qtState.sdAtom; qtState.sdAtom = new char[tlvLength];
memmove(qtState.sdAtom, headerStart, tlvLength);
qtState.sdAtomSize = tlvLength;
break;
}
case ('t'<<8|'w'): { // track width
qtState.width = (headerStart[0]<<8)|headerStart[1];
break;
}
case ('t'<<8|'h'): { // track height
qtState.height = (headerStart[0]<<8)|headerStart[1];
break;
}
}
payloadDescriptionLength -= tlvLength;
headerStart += tlvLength;
}
if (payloadDescriptionLength > 0) return False; // malformed TLV data
headerStart += padding;
}
if (L) { // Sample-Specific info follows
expectedHeaderSize += 4;
if (packetSize < expectedHeaderSize) return False;
unsigned ssInfoLength = (headerStart[2]<<8)|headerStart[3];
headerStart += 4;
#ifdef DEBUG
fprintf(stderr, "\tssInfoLength: %d\n", ssInfoLength);
#endif
// Make sure "ssInfoLength" is valid
if (ssInfoLength < 4) return False;
expectedHeaderSize += (ssInfoLength - 4);
unsigned nonPaddedSize = expectedHeaderSize;
expectedHeaderSize += 3;
expectedHeaderSize -= expectedHeaderSize%4; // adds padding
if (packetSize < expectedHeaderSize) return False;
unsigned char padding = expectedHeaderSize - nonPaddedSize;
ssInfoLength -= 4;
while (ssInfoLength > 3) {
unsigned short tlvLength = (headerStart[0]<<8)|headerStart[1];
#ifdef DEBUG
unsigned short tlvType = (headerStart[2]<<8)|headerStart[3];
#endif
ssInfoLength -= 4;
if (tlvLength > ssInfoLength) return False; // bad TLV
#ifdef DEBUG
fprintf(stderr, "\t\tTLV '%c%c', length %d, leaving %d remaining bytes\n", tlvType>>8, tlvType&0xFF, tlvLength, ssInfoLength - tlvLength);
for (int i = 0; i < tlvLength; ++i) fprintf(stderr, "%02x:", headerStart[4+i]); fprintf(stderr, "\n");
#endif
ssInfoLength -= tlvLength;
headerStart += 4 + tlvLength;
}
if (ssInfoLength > 0) return False; // malformed TLV data
headerStart += padding;
}
fCurrentPacketBeginsFrame = fCurrentPacketCompletesFrame;
// whether the *previous* packet ended a frame
fCurrentPacketCompletesFrame = packet->rtpMarkerBit();
resultSpecialHeaderSize = expectedHeaderSize;
#ifdef DEBUG
fprintf(stderr, "Result special header size: %d\n", resultSpecialHeaderSize);
#endif
return True;
}
char const* QuickTimeGenericRTPSource::MIMEtype() const {
if (fMIMEtypeString == NULL) return MultiFramedRTPSource::MIMEtype();
return fMIMEtypeString;
}
////////// QTGenericBufferedPacket and QTGenericBufferedPacketFactory impl
QTGenericBufferedPacket
::QTGenericBufferedPacket(QuickTimeGenericRTPSource& ourSource)
: fOurSource(ourSource) {
}
QTGenericBufferedPacket::~QTGenericBufferedPacket() {
}
unsigned QTGenericBufferedPacket::
nextEnclosedFrameSize(unsigned char*& framePtr, unsigned dataSize) {
// We use the entire packet for a frame, unless "PCK" == 2
if (fOurSource.qtState.PCK != 2) return dataSize;
if (dataSize < 8) return 0; // sanity check
unsigned short sampleLength = (framePtr[2]<<8)|framePtr[3];
// later, extract and use the "timestamp" field #####
framePtr += 8;
dataSize -= 8;
return sampleLength < dataSize ? sampleLength : dataSize;
}
BufferedPacket* QTGenericBufferedPacketFactory
::createNewPacket(MultiFramedRTPSource* ourSource) {
return new QTGenericBufferedPacket((QuickTimeGenericRTPSource&)(*ourSource));
}
live/liveMedia/MPEG1or2Demux.cpp 000444 001751 000000 00000062641 12265042432 016612 0 ustar 00rsf wheel 000000 000000 /**********
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the
Free Software Foundation; either version 2.1 of the License, or (at your
option) any later version. (See .)
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
more details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
**********/
// "liveMedia"
// Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved.
// Demultiplexer for a MPEG 1 or 2 Program Stream
// Implementation
#include "MPEG1or2Demux.hh"
#include "MPEG1or2DemuxedElementaryStream.hh"
#include "StreamParser.hh"
#include
////////// MPEGProgramStreamParser definition //////////
// An enum representing the current state of the parser:
enum MPEGParseState {
PARSING_PACK_HEADER,
PARSING_SYSTEM_HEADER,
PARSING_PES_PACKET
};
class MPEGProgramStreamParser: public StreamParser {
public:
MPEGProgramStreamParser(MPEG1or2Demux* usingDemux, FramedSource* inputSource);
virtual ~MPEGProgramStreamParser();
public:
unsigned char parse();
// returns the stream id of a stream for which a frame was acquired,
// or 0 if no such frame was acquired.
private:
void setParseState(MPEGParseState parseState);
void parsePackHeader();
void parseSystemHeader();
unsigned char parsePESPacket(); // returns as does parse()
Boolean isSpecialStreamId(unsigned char stream_id) const;
// for PES packet header parsing
private:
MPEG1or2Demux* fUsingDemux;
MPEGParseState fCurrentParseState;
};
////////// MPEG1or2Demux::OutputDescriptor::SavedData definition/implementation //////////
class MPEG1or2Demux::OutputDescriptor::SavedData {
public:
SavedData(unsigned char* buf, unsigned size)
: next(NULL), data(buf), dataSize(size), numBytesUsed(0) {
}
virtual ~SavedData() {
delete[] data;
delete next;
}
SavedData* next;
unsigned char* data;
unsigned dataSize, numBytesUsed;
};
////////// MPEG1or2Demux implementation //////////
MPEG1or2Demux
::MPEG1or2Demux(UsageEnvironment& env,
FramedSource* inputSource, Boolean reclaimWhenLastESDies)
: Medium(env),
fInputSource(inputSource), fMPEGversion(0),
fNextAudioStreamNumber(0), fNextVideoStreamNumber(0),
fReclaimWhenLastESDies(reclaimWhenLastESDies), fNumOutstandingESs(0),
fNumPendingReads(0), fHaveUndeliveredData(False) {
fParser = new MPEGProgramStreamParser(this, inputSource);
for (unsigned i = 0; i < 256; ++i) {
fOutput[i].savedDataHead = fOutput[i].savedDataTail = NULL;
fOutput[i].isPotentiallyReadable = False;
fOutput[i].isCurrentlyActive = False;
fOutput[i].isCurrentlyAwaitingData = False;
}
}
MPEG1or2Demux::~MPEG1or2Demux() {
delete fParser;
for (unsigned i = 0; i < 256; ++i) delete fOutput[i].savedDataHead;
Medium::close(fInputSource);
}
MPEG1or2Demux* MPEG1or2Demux
::createNew(UsageEnvironment& env,
FramedSource* inputSource, Boolean reclaimWhenLastESDies) {
// Need to add source type checking here??? #####
return new MPEG1or2Demux(env, inputSource, reclaimWhenLastESDies);
}
MPEG1or2Demux::SCR::SCR()
: highBit(0), remainingBits(0), extension(0), isValid(False) {
}
void MPEG1or2Demux
::noteElementaryStreamDeletion(MPEG1or2DemuxedElementaryStream* /*es*/) {
if (--fNumOutstandingESs == 0 && fReclaimWhenLastESDies) {
Medium::close(this);
}
}
void MPEG1or2Demux::flushInput() {
fParser->flushInput();
}
MPEG1or2DemuxedElementaryStream*
MPEG1or2Demux::newElementaryStream(u_int8_t streamIdTag) {
++fNumOutstandingESs;
fOutput[streamIdTag].isPotentiallyReadable = True;
return new MPEG1or2DemuxedElementaryStream(envir(), streamIdTag, *this);
}
MPEG1or2DemuxedElementaryStream* MPEG1or2Demux::newAudioStream() {
unsigned char newAudioStreamTag = 0xC0 | (fNextAudioStreamNumber++&~0xE0);
// MPEG audio stream tags are 110x xxxx (binary)
return newElementaryStream(newAudioStreamTag);
}
MPEG1or2DemuxedElementaryStream* MPEG1or2Demux::newVideoStream() {
unsigned char newVideoStreamTag = 0xE0 | (fNextVideoStreamNumber++&~0xF0);
// MPEG video stream tags are 1110 xxxx (binary)
return newElementaryStream(newVideoStreamTag);
}
// Appropriate one of the reserved stream id tags to mean: return raw PES packets:
#define RAW_PES 0xFC
MPEG1or2DemuxedElementaryStream* MPEG1or2Demux::newRawPESStream() {
return newElementaryStream(RAW_PES);
}
void MPEG1or2Demux::registerReadInterest(u_int8_t streamIdTag,
unsigned char* to, unsigned maxSize,
FramedSource::afterGettingFunc* afterGettingFunc,
void* afterGettingClientData,
FramedSource::onCloseFunc* onCloseFunc,
void* onCloseClientData) {
struct OutputDescriptor& out = fOutput[streamIdTag];
// Make sure this stream is not already being read:
if (out.isCurrentlyAwaitingData) {
envir() << "MPEG1or2Demux::registerReadInterest(): attempt to read stream id "
<< (void*)streamIdTag << " more than once!\n";
envir().internalError();
}
out.to = to; out.maxSize = maxSize;
out.fAfterGettingFunc = afterGettingFunc;
out.afterGettingClientData = afterGettingClientData;
out.fOnCloseFunc = onCloseFunc;
out.onCloseClientData = onCloseClientData;
out.isCurrentlyActive = True;
out.isCurrentlyAwaitingData = True;
// out.frameSize and out.presentationTime will be set when a frame's read
++fNumPendingReads;
}
Boolean MPEG1or2Demux::useSavedData(u_int8_t streamIdTag,
unsigned char* to, unsigned maxSize,
FramedSource::afterGettingFunc* afterGettingFunc,
void* afterGettingClientData) {
struct OutputDescriptor& out = fOutput[streamIdTag];
if (out.savedDataHead == NULL) return False; // common case
unsigned totNumBytesCopied = 0;
while (maxSize > 0 && out.savedDataHead != NULL) {
OutputDescriptor::SavedData& savedData = *(out.savedDataHead);
unsigned char* from = &savedData.data[savedData.numBytesUsed];
unsigned numBytesToCopy = savedData.dataSize - savedData.numBytesUsed;
if (numBytesToCopy > maxSize) numBytesToCopy = maxSize;
memmove(to, from, numBytesToCopy);
to += numBytesToCopy;
maxSize -= numBytesToCopy;
out.savedDataTotalSize -= numBytesToCopy;
totNumBytesCopied += numBytesToCopy;
savedData.numBytesUsed += numBytesToCopy;
if (savedData.numBytesUsed == savedData.dataSize) {
out.savedDataHead = savedData.next;
if (out.savedDataHead == NULL) out.savedDataTail = NULL;
savedData.next = NULL;
delete &savedData;
}
}
out.isCurrentlyActive = True;
if (afterGettingFunc != NULL) {
struct timeval presentationTime;
presentationTime.tv_sec = 0; presentationTime.tv_usec = 0; // should fix #####
(*afterGettingFunc)(afterGettingClientData, totNumBytesCopied,
0 /* numTruncatedBytes */, presentationTime,
0 /* durationInMicroseconds ?????#####*/);
}
return True;
}
void MPEG1or2Demux
::continueReadProcessing(void* clientData,
unsigned char* /*ptr*/, unsigned /*size*/,
struct timeval /*presentationTime*/) {
MPEG1or2Demux* demux = (MPEG1or2Demux*)clientData;
demux->continueReadProcessing();
}
void MPEG1or2Demux::continueReadProcessing() {
while (fNumPendingReads > 0) {
unsigned char acquiredStreamIdTag = fParser->parse();
if (acquiredStreamIdTag != 0) {
// We were able to acquire a frame from the input.
struct OutputDescriptor& newOut = fOutput[acquiredStreamIdTag];
newOut.isCurrentlyAwaitingData = False;
// indicates that we can be read again
// (This needs to be set before the 'after getting' call below,
// in case it tries to read another frame)
// Call our own 'after getting' function. Because we're not a 'leaf'
// source, we can call this directly, without risking infinite recursion.
if (newOut.fAfterGettingFunc != NULL) {
(*newOut.fAfterGettingFunc)(newOut.afterGettingClientData,
newOut.frameSize, 0 /* numTruncatedBytes */,
newOut.presentationTime,
0 /* durationInMicroseconds ?????#####*/);
--fNumPendingReads;
}
} else {
// We were unable to parse a complete frame from the input, because:
// - we had to read more data from the source stream, or
// - we found a frame for a stream that was being read, but whose
// reader is not ready to get the frame right now, or
// - the source stream has ended.
break;
}
}
}
void MPEG1or2Demux::getNextFrame(u_int8_t streamIdTag,
unsigned char* to, unsigned maxSize,
FramedSource::afterGettingFunc* afterGettingFunc,
void* afterGettingClientData,
FramedSource::onCloseFunc* onCloseFunc,
void* onCloseClientData) {
// First, check whether we have saved data for this stream id:
if (useSavedData(streamIdTag, to, maxSize,
afterGettingFunc, afterGettingClientData)) {
return;
}
// Then save the parameters of the specified stream id:
registerReadInterest(streamIdTag, to, maxSize,
afterGettingFunc, afterGettingClientData,
onCloseFunc, onCloseClientData);
// Next, if we're the only currently pending read, continue looking for data:
if (fNumPendingReads == 1 || fHaveUndeliveredData) {
fHaveUndeliveredData = 0;
continueReadProcessing();
} // otherwise the continued read processing has already been taken care of
}
void MPEG1or2Demux::stopGettingFrames(u_int8_t streamIdTag) {
struct OutputDescriptor& out = fOutput[streamIdTag];
if (out.isCurrentlyAwaitingData && fNumPendingReads > 0) --fNumPendingReads;
out.isCurrentlyActive = out.isCurrentlyAwaitingData = False;
}
void MPEG1or2Demux::handleClosure(void* clientData) {
MPEG1or2Demux* demux = (MPEG1or2Demux*)clientData;
demux->fNumPendingReads = 0;
// Tell all pending readers that our source has closed.
// Note that we need to make a copy of our readers' close functions
// (etc.) before we start calling any of them, in case one of them
// ends up deleting this.
struct {
FramedSource::onCloseFunc* fOnCloseFunc;
void* onCloseClientData;
} savedPending[256];
unsigned i, numPending = 0;
for (i = 0; i < 256; ++i) {
struct OutputDescriptor& out = demux->fOutput[i];
if (out.isCurrentlyAwaitingData) {
if (out.fOnCloseFunc != NULL) {
savedPending[numPending].fOnCloseFunc = out.fOnCloseFunc;
savedPending[numPending].onCloseClientData = out.onCloseClientData;
++numPending;
}
}
delete out.savedDataHead; out.savedDataHead = out.savedDataTail = NULL;
out.savedDataTotalSize = 0;
out.isPotentiallyReadable = out.isCurrentlyActive = out.isCurrentlyAwaitingData
= False;
}
for (i = 0; i < numPending; ++i) {
(*savedPending[i].fOnCloseFunc)(savedPending[i].onCloseClientData);
}
}
////////// MPEGProgramStreamParser implementation //////////
#include
MPEGProgramStreamParser::MPEGProgramStreamParser(MPEG1or2Demux* usingDemux,
FramedSource* inputSource)
: StreamParser(inputSource, MPEG1or2Demux::handleClosure, usingDemux,
&MPEG1or2Demux::continueReadProcessing, usingDemux),
fUsingDemux(usingDemux), fCurrentParseState(PARSING_PACK_HEADER) {
}
MPEGProgramStreamParser::~MPEGProgramStreamParser() {
}
void MPEGProgramStreamParser::setParseState(MPEGParseState parseState) {
fCurrentParseState = parseState;
saveParserState();
}
unsigned char MPEGProgramStreamParser::parse() {
unsigned char acquiredStreamTagId = 0;
try {
do {
switch (fCurrentParseState) {
case PARSING_PACK_HEADER: {
parsePackHeader();
break;
}
case PARSING_SYSTEM_HEADER: {
parseSystemHeader();
break;
}
case PARSING_PES_PACKET: {
acquiredStreamTagId = parsePESPacket();
break;
}
}
} while(acquiredStreamTagId == 0);
return acquiredStreamTagId;
} catch (int /*e*/) {
#ifdef DEBUG
fprintf(stderr, "MPEGProgramStreamParser::parse() EXCEPTION (This is normal behavior - *not* an error)\n");
fflush(stderr);
#endif
return 0; // the parsing got interrupted
}
}
#define PACK_START_CODE 0x000001BA
#define SYSTEM_HEADER_START_CODE 0x000001BB
#define PACKET_START_CODE_PREFIX 0x00000100
static inline Boolean isPacketStartCode(unsigned code) {
return (code&0xFFFFFF00) == PACKET_START_CODE_PREFIX
&& code > SYSTEM_HEADER_START_CODE;
}
void MPEGProgramStreamParser::parsePackHeader() {
#ifdef DEBUG
fprintf(stderr, "parsing pack header\n"); fflush(stderr);
#endif
unsigned first4Bytes;
while (1) {
first4Bytes = test4Bytes();
// We're supposed to have a pack header here, but check also for
// a system header or a PES packet, just in case:
if (first4Bytes == PACK_START_CODE) {
skipBytes(4);
break;
} else if (first4Bytes == SYSTEM_HEADER_START_CODE) {
#ifdef DEBUG
fprintf(stderr, "found system header instead of pack header\n");
#endif
setParseState(PARSING_SYSTEM_HEADER);
return;
} else if (isPacketStartCode(first4Bytes)) {
#ifdef DEBUG
fprintf(stderr, "found packet start code 0x%02x instead of pack header\n", first4Bytes);
#endif
setParseState(PARSING_PES_PACKET);
return;
}
setParseState(PARSING_PACK_HEADER); // ensures we progress over bad data
if ((first4Bytes&0xFF) > 1) { // a system code definitely doesn't start here
skipBytes(4);
} else {
skipBytes(1);
}
}
// The size of the pack header differs depending on whether it's
// MPEG-1 or MPEG-2. The next byte tells us this:
unsigned char nextByte = get1Byte();
MPEG1or2Demux::SCR& scr = fUsingDemux->fLastSeenSCR; // alias
if ((nextByte&0xF0) == 0x20) { // MPEG-1
fUsingDemux->fMPEGversion = 1;
scr.highBit = (nextByte&0x08)>>3;
scr.remainingBits = (nextByte&0x06)<<29;
unsigned next4Bytes = get4Bytes();
scr.remainingBits |= (next4Bytes&0xFFFE0000)>>2;
scr.remainingBits |= (next4Bytes&0x0000FFFE)>>1;
scr.extension = 0;
scr.isValid = True;
skipBits(24);
#if defined(DEBUG_TIMESTAMPS) || defined(DEBUG_SCR_TIMESTAMPS)
fprintf(stderr, "pack hdr system_clock_reference_base: 0x%x",
scr.highBit);
fprintf(stderr, "%08x\n", scr.remainingBits);
#endif
} else if ((nextByte&0xC0) == 0x40) { // MPEG-2
fUsingDemux->fMPEGversion = 2;
scr.highBit = (nextByte&0x20)>>5;
scr.remainingBits = (nextByte&0x18)<<27;
scr.remainingBits |= (nextByte&0x03)<<28;
unsigned next4Bytes = get4Bytes();
scr.remainingBits |= (next4Bytes&0xFFF80000)>>4;
scr.remainingBits |= (next4Bytes&0x0003FFF8)>>3;
scr.extension = (next4Bytes&0x00000003)<<7;
next4Bytes = get4Bytes();
scr.extension |= (next4Bytes&0xFE000000)>>25;
scr.isValid = True;
skipBits(5);
#if defined(DEBUG_TIMESTAMPS) || defined(DEBUG_SCR_TIMESTAMPS)
fprintf(stderr, "pack hdr system_clock_reference_base: 0x%x",
scr.highBit);
fprintf(stderr, "%08x\n", scr.remainingBits);
fprintf(stderr, "pack hdr system_clock_reference_extension: 0x%03x\n",
scr.extension);
#endif
unsigned char pack_stuffing_length = getBits(3);
skipBytes(pack_stuffing_length);
} else { // unknown
fUsingDemux->envir() << "StreamParser::parsePack() saw strange byte "
<< (void*)nextByte
<< " following pack_start_code\n";
}
// Check for a System Header next:
setParseState(PARSING_SYSTEM_HEADER);
}
void MPEGProgramStreamParser::parseSystemHeader() {
#ifdef DEBUG
fprintf(stderr, "parsing system header\n"); fflush(stderr);
#endif
unsigned next4Bytes = test4Bytes();
if (next4Bytes != SYSTEM_HEADER_START_CODE) {
// The system header was optional. Look for a PES Packet instead:
setParseState(PARSING_PES_PACKET);
return;
}
#ifdef DEBUG
fprintf(stderr, "saw system_header_start_code\n"); fflush(stderr);
#endif
skipBytes(4); // we've already seen the system_header_start_code
unsigned short remaining_header_length = get2Bytes();
// According to the MPEG-1 and MPEG-2 specs, "remaining_header_length" should be
// at least 6 bytes. Check this now:
if (remaining_header_length < 6) {
fUsingDemux->envir() << "StreamParser::parseSystemHeader(): saw strange header_length: "
<< remaining_header_length << " < 6\n";
}
skipBytes(remaining_header_length);
// Check for a PES Packet next:
setParseState(PARSING_PES_PACKET);
}
#define private_stream_1 0xBD
#define private_stream_2 0xBF
// A test for stream ids that are exempt from normal PES packet header parsing
Boolean MPEGProgramStreamParser
::isSpecialStreamId(unsigned char stream_id) const {
if (stream_id == RAW_PES) return True; // hack
if (fUsingDemux->fMPEGversion == 1) {
return stream_id == private_stream_2;
} else { // assume MPEG-2
if (stream_id <= private_stream_2) {
return stream_id != private_stream_1;
} else if ((stream_id&0xF0) == 0xF0) {
unsigned char lower4Bits = stream_id&0x0F;
return lower4Bits <= 2 || lower4Bits == 0x8 || lower4Bits == 0xF;
} else {
return False;
}
}
}
#define READER_NOT_READY 2
unsigned char MPEGProgramStreamParser::parsePESPacket() {
#ifdef DEBUG
fprintf(stderr, "parsing PES packet\n"); fflush(stderr);
#endif
unsigned next4Bytes = test4Bytes();
if (!isPacketStartCode(next4Bytes)) {
// The PES Packet was optional. Look for a Pack Header instead:
setParseState(PARSING_PACK_HEADER);
return 0;
}
#ifdef DEBUG
fprintf(stderr, "saw packet_start_code_prefix\n"); fflush(stderr);
#endif
skipBytes(3); // we've already seen the packet_start_code_prefix
unsigned char stream_id = get1Byte();
#if defined(DEBUG) || defined(DEBUG_TIMESTAMPS)
unsigned char streamNum = stream_id;
char const* streamTypeStr;
if ((stream_id&0xE0) == 0xC0) {
streamTypeStr = "audio";
streamNum = stream_id&~0xE0;
} else if ((stream_id&0xF0) == 0xE0) {
streamTypeStr = "video";
streamNum = stream_id&~0xF0;
} else if (stream_id == 0xbc) {
streamTypeStr = "reserved";
} else if (stream_id == 0xbd) {
streamTypeStr = "private_1";
} else if (stream_id == 0xbe) {
streamTypeStr = "padding";
} else if (stream_id == 0xbf) {
streamTypeStr = "private_2";
} else {
streamTypeStr = "unknown";
}
#endif
#ifdef DEBUG
static unsigned frameCount = 1;
fprintf(stderr, "%d, saw %s stream: 0x%02x\n", frameCount, streamTypeStr, streamNum); fflush(stderr);
#endif
unsigned short PES_packet_length = get2Bytes();
#ifdef DEBUG
fprintf(stderr, "PES_packet_length: %d\n", PES_packet_length); fflush(stderr);
#endif
// Parse over the rest of the header, until we get to the packet data itself.
// This varies depending upon the MPEG version:
if (fUsingDemux->fOutput[RAW_PES].isPotentiallyReadable) {
// Hack: We've been asked to return raw PES packets, for every stream:
stream_id = RAW_PES;
}
unsigned savedParserOffset = curOffset();
#ifdef DEBUG_TIMESTAMPS
unsigned char pts_highBit = 0;
unsigned pts_remainingBits = 0;
unsigned char dts_highBit = 0;
unsigned dts_remainingBits = 0;
#endif
if (fUsingDemux->fMPEGversion == 1) {
if (!isSpecialStreamId(stream_id)) {
unsigned char nextByte;
while ((nextByte = get1Byte()) == 0xFF) { // stuffing_byte
}
if ((nextByte&0xC0) == 0x40) { // '01'
skipBytes(1);
nextByte = get1Byte();
}
if ((nextByte&0xF0) == 0x20) { // '0010'
#ifdef DEBUG_TIMESTAMPS
pts_highBit = (nextByte&0x08)>>3;
pts_remainingBits = (nextByte&0x06)<<29;
unsigned next4Bytes = get4Bytes();
pts_remainingBits |= (next4Bytes&0xFFFE0000)>>2;
pts_remainingBits |= (next4Bytes&0x0000FFFE)>>1;
#else
skipBytes(4);
#endif
} else if ((nextByte&0xF0) == 0x30) { // '0011'
#ifdef DEBUG_TIMESTAMPS
pts_highBit = (nextByte&0x08)>>3;
pts_remainingBits = (nextByte&0x06)<<29;
unsigned next4Bytes = get4Bytes();
pts_remainingBits |= (next4Bytes&0xFFFE0000)>>2;
pts_remainingBits |= (next4Bytes&0x0000FFFE)>>1;
nextByte = get1Byte();
dts_highBit = (nextByte&0x08)>>3;
dts_remainingBits = (nextByte&0x06)<<29;
next4Bytes = get4Bytes();
dts_remainingBits |= (next4Bytes&0xFFFE0000)>>2;
dts_remainingBits |= (next4Bytes&0x0000FFFE)>>1;
#else
skipBytes(9);
#endif
}
}
} else { // assume MPEG-2
if (!isSpecialStreamId(stream_id)) {
// Fields in the next 3 bytes determine the size of the rest:
unsigned next3Bytes = getBits(24);
#ifdef DEBUG_TIMESTAMPS
unsigned char PTS_DTS_flags = (next3Bytes&0x00C000)>>14;
#endif
#ifdef undef
unsigned char ESCR_flag = (next3Bytes&0x002000)>>13;
unsigned char ES_rate_flag = (next3Bytes&0x001000)>>12;
unsigned char DSM_trick_mode_flag = (next3Bytes&0x000800)>>11;
#endif
unsigned char PES_header_data_length = (next3Bytes&0x0000FF);
#ifdef DEBUG
fprintf(stderr, "PES_header_data_length: 0x%02x\n", PES_header_data_length); fflush(stderr);
#endif
#ifdef DEBUG_TIMESTAMPS
if (PTS_DTS_flags == 0x2 && PES_header_data_length >= 5) {
unsigned char nextByte = get1Byte();
pts_highBit = (nextByte&0x08)>>3;
pts_remainingBits = (nextByte&0x06)<<29;
unsigned next4Bytes = get4Bytes();
pts_remainingBits |= (next4Bytes&0xFFFE0000)>>2;
pts_remainingBits |= (next4Bytes&0x0000FFFE)>>1;
skipBytes(PES_header_data_length-5);
} else if (PTS_DTS_flags == 0x3 && PES_header_data_length >= 10) {
unsigned char nextByte = get1Byte();
pts_highBit = (nextByte&0x08)>>3;
pts_remainingBits = (nextByte&0x06)<<29;
unsigned next4Bytes = get4Bytes();
pts_remainingBits |= (next4Bytes&0xFFFE0000)>>2;
pts_remainingBits |= (next4Bytes&0x0000FFFE)>>1;
nextByte = get1Byte();
dts_highBit = (nextByte&0x08)>>3;
dts_remainingBits = (nextByte&0x06)<<29;
next4Bytes = get4Bytes();
dts_remainingBits |= (next4Bytes&0xFFFE0000)>>2;
dts_remainingBits |= (next4Bytes&0x0000FFFE)>>1;
skipBytes(PES_header_data_length-10);
}
#else
skipBytes(PES_header_data_length);
#endif
}
}
#ifdef DEBUG_TIMESTAMPS
fprintf(stderr, "%s stream, ", streamTypeStr);
fprintf(stderr, "packet presentation_time_stamp: 0x%x", pts_highBit);
fprintf(stderr, "%08x\n", pts_remainingBits);
fprintf(stderr, "\t\tpacket decoding_time_stamp: 0x%x", dts_highBit);
fprintf(stderr, "%08x\n", dts_remainingBits);
#endif
// The rest of the packet will be the "PES_packet_data_byte"s
// Make sure that "PES_packet_length" was consistent with where we are now:
unsigned char acquiredStreamIdTag = 0;
unsigned currentParserOffset = curOffset();
unsigned bytesSkipped = currentParserOffset - savedParserOffset;
if (stream_id == RAW_PES) {
restoreSavedParserState(); // so we deliver from the beginning of the PES packet
PES_packet_length += 6; // to include the whole of the PES packet
bytesSkipped = 0;
}
if (PES_packet_length < bytesSkipped) {
fUsingDemux->envir() << "StreamParser::parsePESPacket(): saw inconsistent PES_packet_length "
<< PES_packet_length << " < "
<< bytesSkipped << "\n";
} else {
PES_packet_length -= bytesSkipped;
#ifdef DEBUG
unsigned next4Bytes = test4Bytes();
#endif
// Check whether our using source is interested in this stream type.
// If so, deliver the frame to him:
MPEG1or2Demux::OutputDescriptor_t& out = fUsingDemux->fOutput[stream_id];
if (out.isCurrentlyAwaitingData) {
unsigned numBytesToCopy;
if (PES_packet_length > out.maxSize) {
fUsingDemux->envir() << "MPEGProgramStreamParser::parsePESPacket() error: PES_packet_length ("
<< PES_packet_length
<< ") exceeds max frame size asked for ("
<< out.maxSize << ")\n";
numBytesToCopy = out.maxSize;
} else {
numBytesToCopy = PES_packet_length;
}
getBytes(out.to, numBytesToCopy);
out.frameSize = numBytesToCopy;
#ifdef DEBUG
fprintf(stderr, "%d, %d bytes of PES_packet_data (out.maxSize: %d); first 4 bytes: 0x%08x\n", frameCount, numBytesToCopy, out.maxSize, next4Bytes); fflush(stderr);
#endif
// set out.presentationTime later #####
acquiredStreamIdTag = stream_id;
PES_packet_length -= numBytesToCopy;
} else if (out.isCurrentlyActive) {
// Someone has been reading this stream, but isn't right now.
// We can't deliver this frame until he asks for it, so punt for now.
// The next time he asks for a frame, he'll get it.
#ifdef DEBUG
fprintf(stderr, "%d, currently undeliverable PES data; first 4 bytes: 0x%08x - currently undeliverable!\n", frameCount, next4Bytes); fflush(stderr);
#endif
restoreSavedParserState(); // so we read from the beginning next time
fUsingDemux->fHaveUndeliveredData = True;
throw READER_NOT_READY;
} else if (out.isPotentiallyReadable &&
out.savedDataTotalSize + PES_packet_length < 1000000 /*limit*/) {
// Someone is interested in this stream, but hasn't begun reading it yet.
// Save this data, so that the reader will get it when he later asks for it.
unsigned char* buf = new unsigned char[PES_packet_length];
getBytes(buf, PES_packet_length);
MPEG1or2Demux::OutputDescriptor::SavedData* savedData
= new MPEG1or2Demux::OutputDescriptor::SavedData(buf, PES_packet_length);
if (out.savedDataHead == NULL) {
out.savedDataHead = out.savedDataTail = savedData;
} else {
out.savedDataTail->next = savedData;
out.savedDataTail = savedData;
}
out.savedDataTotalSize += PES_packet_length;
PES_packet_length = 0;
}
skipBytes(PES_packet_length);
}
// Check for another PES Packet next:
setParseState(PARSING_PES_PACKET);
#ifdef DEBUG
++frameCount;
#endif
return acquiredStreamIdTag;
}
live/liveMedia/RTPSink.cpp 000444 001751 000000 00000027140 12265042432 015640 0 ustar 00rsf wheel 000000 000000 /**********
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the
Free Software Foundation; either version 2.1 of the License, or (at your
option) any later version. (See .)
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
more details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
**********/
// "liveMedia"
// Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved.
// RTP Sinks
// Implementation
#include "RTPSink.hh"
#include "GroupsockHelper.hh"
////////// RTPSink //////////
Boolean RTPSink::lookupByName(UsageEnvironment& env, char const* sinkName,
RTPSink*& resultSink) {
resultSink = NULL; // unless we succeed
MediaSink* sink;
if (!MediaSink::lookupByName(env, sinkName, sink)) return False;
if (!sink->isRTPSink()) {
env.setResultMsg(sinkName, " is not a RTP sink");
return False;
}
resultSink = (RTPSink*)sink;
return True;
}
Boolean RTPSink::isRTPSink() const {
return True;
}
RTPSink::RTPSink(UsageEnvironment& env,
Groupsock* rtpGS, unsigned char rtpPayloadType,
unsigned rtpTimestampFrequency,
char const* rtpPayloadFormatName,
unsigned numChannels)
: MediaSink(env), fRTPInterface(this, rtpGS),
fRTPPayloadType(rtpPayloadType),
fPacketCount(0), fOctetCount(0), fTotalOctetCount(0),
fTimestampFrequency(rtpTimestampFrequency), fNextTimestampHasBeenPreset(False), fEnableRTCPReports(True),
fNumChannels(numChannels) {
fRTPPayloadFormatName
= strDup(rtpPayloadFormatName == NULL ? "???" : rtpPayloadFormatName);
gettimeofday(&fCreationTime, NULL);
fTotalOctetCountStartTime = fCreationTime;
resetPresentationTimes();
fSeqNo = (u_int16_t)our_random();
fSSRC = our_random32();
fTimestampBase = our_random32();
fTransmissionStatsDB = new RTPTransmissionStatsDB(*this);
}
RTPSink::~RTPSink() {
delete fTransmissionStatsDB;
delete[] (char*)fRTPPayloadFormatName;
}
u_int32_t RTPSink::convertToRTPTimestamp(struct timeval tv) {
// Begin by converting from "struct timeval" units to RTP timestamp units:
u_int32_t timestampIncrement = (fTimestampFrequency*tv.tv_sec);
timestampIncrement += (u_int32_t)(fTimestampFrequency*(tv.tv_usec/1000000.0) + 0.5); // note: rounding
// Then add this to our 'timestamp base':
if (fNextTimestampHasBeenPreset) {
// Make the returned timestamp the same as the current "fTimestampBase",
// so that timestamps begin with the value that was previously preset:
fTimestampBase -= timestampIncrement;
fNextTimestampHasBeenPreset = False;
}
u_int32_t const rtpTimestamp = fTimestampBase + timestampIncrement;
#ifdef DEBUG_TIMESTAMPS
fprintf(stderr, "fTimestampBase: 0x%08x, tv: %lu.%06ld\n\t=> RTP timestamp: 0x%08x\n",
fTimestampBase, tv.tv_sec, tv.tv_usec, rtpTimestamp);
fflush(stderr);
#endif
return rtpTimestamp;
}
u_int32_t RTPSink::presetNextTimestamp() {
struct timeval timeNow;
gettimeofday(&timeNow, NULL);
u_int32_t tsNow = convertToRTPTimestamp(timeNow);
fTimestampBase = tsNow;
fNextTimestampHasBeenPreset = True;
return tsNow;
}
void RTPSink::getTotalBitrate(unsigned& outNumBytes, double& outElapsedTime) {
struct timeval timeNow;
gettimeofday(&timeNow, NULL);
outNumBytes = fTotalOctetCount;
outElapsedTime = (double)(timeNow.tv_sec-fTotalOctetCountStartTime.tv_sec)
+ (timeNow.tv_usec-fTotalOctetCountStartTime.tv_usec)/1000000.0;
fTotalOctetCount = 0;
fTotalOctetCountStartTime = timeNow;
}
void RTPSink::resetPresentationTimes() {
fInitialPresentationTime.tv_sec = fMostRecentPresentationTime.tv_sec = 0;
fInitialPresentationTime.tv_usec = fMostRecentPresentationTime.tv_usec = 0;
}
char const* RTPSink::sdpMediaType() const {
return "data";
// default SDP media (m=) type, unless redefined by subclasses
}
char* RTPSink::rtpmapLine() const {
if (rtpPayloadType() >= 96) { // the payload format type is dynamic
char* encodingParamsPart;
if (numChannels() != 1) {
encodingParamsPart = new char[1 + 20 /* max int len */];
sprintf(encodingParamsPart, "/%d", numChannels());
} else {
encodingParamsPart = strDup("");
}
char const* const rtpmapFmt = "a=rtpmap:%d %s/%d%s\r\n";
unsigned rtpmapFmtSize = strlen(rtpmapFmt)
+ 3 /* max char len */ + strlen(rtpPayloadFormatName())
+ 20 /* max int len */ + strlen(encodingParamsPart);
char* rtpmapLine = new char[rtpmapFmtSize];
sprintf(rtpmapLine, rtpmapFmt,
rtpPayloadType(), rtpPayloadFormatName(),
rtpTimestampFrequency(), encodingParamsPart);
delete[] encodingParamsPart;
return rtpmapLine;
} else {
// The payload format is staic, so there's no "a=rtpmap:" line:
return strDup("");
}
}
char const* RTPSink::auxSDPLine() {
return NULL; // by default
}
////////// RTPTransmissionStatsDB //////////
RTPTransmissionStatsDB::RTPTransmissionStatsDB(RTPSink& rtpSink)
: fOurRTPSink(rtpSink),
fTable(HashTable::create(ONE_WORD_HASH_KEYS)) {
fNumReceivers=0;
}
RTPTransmissionStatsDB::~RTPTransmissionStatsDB() {
// First, remove and delete all stats records from the table:
RTPTransmissionStats* stats;
while ((stats = (RTPTransmissionStats*)fTable->RemoveNext()) != NULL) {
delete stats;
}
// Then, delete the table itself:
delete fTable;
}
void RTPTransmissionStatsDB
::noteIncomingRR(u_int32_t SSRC, struct sockaddr_in const& lastFromAddress,
unsigned lossStats, unsigned lastPacketNumReceived,
unsigned jitter, unsigned lastSRTime, unsigned diffSR_RRTime) {
RTPTransmissionStats* stats = lookup(SSRC);
if (stats == NULL) {
// This is the first time we've heard of this SSRC.
// Create a new record for it:
stats = new RTPTransmissionStats(fOurRTPSink, SSRC);
if (stats == NULL) return;
add(SSRC, stats);
#ifdef DEBUG_RR
fprintf(stderr, "Adding new entry for SSRC %x in RTPTransmissionStatsDB\n", SSRC);
#endif
}
stats->noteIncomingRR(lastFromAddress,
lossStats, lastPacketNumReceived, jitter,
lastSRTime, diffSR_RRTime);
}
void RTPTransmissionStatsDB::removeRecord(u_int32_t SSRC) {
RTPTransmissionStats* stats = lookup(SSRC);
if (stats != NULL) {
long SSRC_long = (long)SSRC;
fTable->Remove((char const*)SSRC_long);
--fNumReceivers;
delete stats;
}
}
RTPTransmissionStatsDB::Iterator
::Iterator(RTPTransmissionStatsDB& receptionStatsDB)
: fIter(HashTable::Iterator::create(*(receptionStatsDB.fTable))) {
}
RTPTransmissionStatsDB::Iterator::~Iterator() {
delete fIter;
}
RTPTransmissionStats*
RTPTransmissionStatsDB::Iterator::next() {
char const* key; // dummy
return (RTPTransmissionStats*)(fIter->next(key));
}
RTPTransmissionStats* RTPTransmissionStatsDB::lookup(u_int32_t SSRC) const {
long SSRC_long = (long)SSRC;
return (RTPTransmissionStats*)(fTable->Lookup((char const*)SSRC_long));
}
void RTPTransmissionStatsDB::add(u_int32_t SSRC, RTPTransmissionStats* stats) {
long SSRC_long = (long)SSRC;
fTable->Add((char const*)SSRC_long, stats);
++fNumReceivers;
}
////////// RTPTransmissionStats //////////
RTPTransmissionStats::RTPTransmissionStats(RTPSink& rtpSink, u_int32_t SSRC)
: fOurRTPSink(rtpSink), fSSRC(SSRC), fLastPacketNumReceived(0),
fPacketLossRatio(0), fTotNumPacketsLost(0), fJitter(0),
fLastSRTime(0), fDiffSR_RRTime(0), fAtLeastTwoRRsHaveBeenReceived(False), fFirstPacket(True),
fTotalOctetCount_hi(0), fTotalOctetCount_lo(0),
fTotalPacketCount_hi(0), fTotalPacketCount_lo(0) {
gettimeofday(&fTimeCreated, NULL);
fLastOctetCount = rtpSink.octetCount();
fLastPacketCount = rtpSink.packetCount();
}
RTPTransmissionStats::~RTPTransmissionStats() {}
void RTPTransmissionStats
::noteIncomingRR(struct sockaddr_in const& lastFromAddress,
unsigned lossStats, unsigned lastPacketNumReceived,
unsigned jitter, unsigned lastSRTime,
unsigned diffSR_RRTime) {
if (fFirstPacket) {
fFirstPacket = False;
fFirstPacketNumReported = lastPacketNumReceived;
} else {
fAtLeastTwoRRsHaveBeenReceived = True;
fOldLastPacketNumReceived = fLastPacketNumReceived;
fOldTotNumPacketsLost = fTotNumPacketsLost;
}
gettimeofday(&fTimeReceived, NULL);
fLastFromAddress = lastFromAddress;
fPacketLossRatio = lossStats>>24;
fTotNumPacketsLost = lossStats&0xFFFFFF;
fLastPacketNumReceived = lastPacketNumReceived;
fJitter = jitter;
fLastSRTime = lastSRTime;
fDiffSR_RRTime = diffSR_RRTime;
#ifdef DEBUG_RR
fprintf(stderr, "RTCP RR data (received at %lu.%06ld): lossStats 0x%08x, lastPacketNumReceived 0x%08x, jitter 0x%08x, lastSRTime 0x%08x, diffSR_RRTime 0x%08x\n",
fTimeReceived.tv_sec, fTimeReceived.tv_usec, lossStats, lastPacketNumReceived, jitter, lastSRTime, diffSR_RRTime);
unsigned rtd = roundTripDelay();
fprintf(stderr, "=> round-trip delay: 0x%04x (== %f seconds)\n", rtd, rtd/65536.0);
#endif
// Update our counts of the total number of octets and packets sent towards
// this receiver:
u_int32_t newOctetCount = fOurRTPSink.octetCount();
u_int32_t octetCountDiff = newOctetCount - fLastOctetCount;
fLastOctetCount = newOctetCount;
u_int32_t prevTotalOctetCount_lo = fTotalOctetCount_lo;
fTotalOctetCount_lo += octetCountDiff;
if (fTotalOctetCount_lo < prevTotalOctetCount_lo) { // wrap around
++fTotalOctetCount_hi;
}
u_int32_t newPacketCount = fOurRTPSink.packetCount();
u_int32_t packetCountDiff = newPacketCount - fLastPacketCount;
fLastPacketCount = newPacketCount;
u_int32_t prevTotalPacketCount_lo = fTotalPacketCount_lo;
fTotalPacketCount_lo += packetCountDiff;
if (fTotalPacketCount_lo < prevTotalPacketCount_lo) { // wrap around
++fTotalPacketCount_hi;
}
}
unsigned RTPTransmissionStats::roundTripDelay() const {
// Compute the round-trip delay that was indicated by the most recently-received
// RTCP RR packet. Use the method noted in the RTP/RTCP specification (RFC 3350).
if (fLastSRTime == 0) {
// Either no RTCP RR packet has been received yet, or else the
// reporting receiver has not yet received any RTCP SR packets from us:
return 0;
}
// First, convert the time that we received the last RTCP RR packet to NTP format,
// in units of 1/65536 (2^-16) seconds:
unsigned lastReceivedTimeNTP_high
= fTimeReceived.tv_sec + 0x83AA7E80; // 1970 epoch -> 1900 epoch
double fractionalPart = (fTimeReceived.tv_usec*0x0400)/15625.0; // 2^16/10^6
unsigned lastReceivedTimeNTP
= (unsigned)((lastReceivedTimeNTP_high<<16) + fractionalPart + 0.5);
int rawResult = lastReceivedTimeNTP - fLastSRTime - fDiffSR_RRTime;
if (rawResult < 0) {
// This can happen if there's clock drift between the sender and receiver,
// and if the round-trip time was very small.
rawResult = 0;
}
return (unsigned)rawResult;
}
void RTPTransmissionStats::getTotalOctetCount(u_int32_t& hi, u_int32_t& lo) {
hi = fTotalOctetCount_hi;
lo = fTotalOctetCount_lo;
}
void RTPTransmissionStats::getTotalPacketCount(u_int32_t& hi, u_int32_t& lo) {
hi = fTotalPacketCount_hi;
lo = fTotalPacketCount_lo;
}
unsigned RTPTransmissionStats::packetsReceivedSinceLastRR() const {
if (!fAtLeastTwoRRsHaveBeenReceived) return 0;
return fLastPacketNumReceived-fOldLastPacketNumReceived;
}
int RTPTransmissionStats::packetsLostBetweenRR() const {
if (!fAtLeastTwoRRsHaveBeenReceived) return 0;
return fTotNumPacketsLost - fOldTotNumPacketsLost;
}
live/liveMedia/MP3Internals.cpp 000444 001751 000000 00000063415 12265042432 016632 0 ustar 00rsf wheel 000000 000000 /**********
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the
Free Software Foundation; either version 2.1 of the License, or (at your
option) any later version. (See .)
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
more details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
**********/
// "liveMedia"
// Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved.
// MP3 internal implementation details
// Implementation
#include "MP3InternalsHuffman.hh"
#include
#include
#include
#include
// This is crufty old code that needs to be cleaned up #####
static unsigned const live_tabsel[2][3][16] = {
{ {32,32,64,96,128,160,192,224,256,288,320,352,384,416,448,448},
{32,32,48,56, 64, 80, 96,112,128,160,192,224,256,320,384,384},
{32,32,40,48, 56, 64, 80, 96,112,128,160,192,224,256,320,320} },
{ {32,32,48,56,64,80,96,112,128,144,160,176,192,224,256,256},
{8,8,16,24,32,40,48,56,64,80,96,112,128,144,160,160},
{8,8,16,24,32,40,48,56,64,80,96,112,128,144,160,160} }
};
/* Note: live_tabsel[*][*][0 or 15] shouldn't occur; use dummy values there */
static long const live_freqs[]
= { 44100, 48000, 32000, 22050, 24000, 16000, 11025, 12000, 8000, 0 };
struct bandInfoStruct {
int longIdx[23];
int longDiff[22];
int shortIdx[14];
int shortDiff[13];
};
static struct bandInfoStruct const bandInfo[7] = {
/* MPEG 1.0 */
{ {0,4,8,12,16,20,24,30,36,44,52,62,74, 90,110,134,162,196,238,288,342,418,576},
{4,4,4,4,4,4,6,6,8, 8,10,12,16,20,24,28,34,42,50,54, 76,158},
{0,4*3,8*3,12*3,16*3,22*3,30*3,40*3,52*3,66*3, 84*3,106*3,136*3,192*3},
{4,4,4,4,6,8,10,12,14,18,22,30,56} } ,
{ {0,4,8,12,16,20,24,30,36,42,50,60,72, 88,106,128,156,190,230,276,330,384,576},
{4,4,4,4,4,4,6,6,6, 8,10,12,16,18,22,28,34,40,46,54, 54,192},
{0,4*3,8*3,12*3,16*3,22*3,28*3,38*3,50*3,64*3, 80*3,100*3,126*3,192*3},
{4,4,4,4,6,6,10,12,14,16,20,26,66} } ,
{ {0,4,8,12,16,20,24,30,36,44,54,66,82,102,126,156,194,240,296,364,448,550,576} ,
{4,4,4,4,4,4,6,6,8,10,12,16,20,24,30,38,46,56,68,84,102, 26} ,
{0,4*3,8*3,12*3,16*3,22*3,30*3,42*3,58*3,78*3,104*3,138*3,180*3,192*3} ,
{4,4,4,4,6,8,12,16,20,26,34,42,12} } ,
/* MPEG 2.0 */
{ {0,6,12,18,24,30,36,44,54,66,80,96,116,140,168,200,238,284,336,396,464,522,576},
{6,6,6,6,6,6,8,10,12,14,16,20,24,28,32,38,46,52,60,68,58,54 } ,
{0,4*3,8*3,12*3,18*3,24*3,32*3,42*3,56*3,74*3,100*3,132*3,174*3,192*3} ,
{4,4,4,6,6,8,10,14,18,26,32,42,18 } } ,
{ {0,6,12,18,24,30,36,44,54,66,80,96,114,136,162,194,232,278,330,394,464,540,576},
{6,6,6,6,6,6,8,10,12,14,16,18,22,26,32,38,46,52,64,70,76,36 } ,
{0,4*3,8*3,12*3,18*3,26*3,36*3,48*3,62*3,80*3,104*3,136*3,180*3,192*3} ,
{4,4,4,6,8,10,12,14,18,24,32,44,12 } } ,
{ {0,6,12,18,24,30,36,44,54,66,80,96,116,140,168,200,238,284,336,396,464,522,576},
{6,6,6,6,6,6,8,10,12,14,16,20,24,28,32,38,46,52,60,68,58,54 },
{0,4*3,8*3,12*3,18*3,26*3,36*3,48*3,62*3,80*3,104*3,134*3,174*3,192*3},
{4,4,4,6,8,10,12,14,18,24,30,40,18 } } ,
/* MPEG 2.5, wrong! table (it's just a copy of MPEG 2.0/44.1kHz) */
{ {0,6,12,18,24,30,36,44,54,66,80,96,116,140,168,200,238,284,336,396,464,522,576},
{6,6,6,6,6,6,8,10,12,14,16,20,24,28,32,38,46,52,60,68,58,54 } ,
{0,4*3,8*3,12*3,18*3,24*3,32*3,42*3,56*3,74*3,100*3,132*3,174*3,192*3} ,
{4,4,4,6,6,8,10,14,18,26,32,42,18 } } ,
};
unsigned int n_slen2[512]; /* MPEG 2.0 slen for 'normal' mode */
unsigned int i_slen2[256]; /* MPEG 2.0 slen for intensity stereo */
#define MPG_MD_MONO 3
////////// MP3FrameParams //////////
MP3FrameParams::MP3FrameParams()
: bv(frameBytes, 0, sizeof frameBytes) /* by default */ {
oldHdr = firstHdr = 0;
static Boolean doneInit = False;
if (doneInit) return;
int i,j,k,l;
for (i=0;i<5;i++) {
for (j=0;j<6;j++) {
for (k=0;k<6;k++) {
int n = k + j * 6 + i * 36;
i_slen2[n] = i|(j<<3)|(k<<6)|(3<<12);
}
}
}
for (i=0;i<4;i++) {
for (j=0;j<4;j++) {
for (k=0;k<4;k++) {
int n = k + j * 4 + i * 16;
i_slen2[n+180] = i|(j<<3)|(k<<6)|(4<<12);
}
}
}
for (i=0;i<4;i++) {
for (j=0;j<3;j++) {
int n = j + i * 3;
i_slen2[n+244] = i|(j<<3) | (5<<12);
n_slen2[n+500] = i|(j<<3) | (2<<12) | (1<<15);
}
}
for (i=0;i<5;i++) {
for (j=0;j<5;j++) {
for (k=0;k<4;k++) {
for (l=0;l<4;l++) {
int n = l + k * 4 + j * 16 + i * 80;
n_slen2[n] = i|(j<<3)|(k<<6)|(l<<9)|(0<<12);
}
}
}
}
for (i=0;i<5;i++) {
for (j=0;j<5;j++) {
for (k=0;k<4;k++) {
int n = k + j * 4 + i * 20;
n_slen2[n+400] = i|(j<<3)|(k<<6)|(1<<12);
}
}
}
doneInit = True;
}
MP3FrameParams::~MP3FrameParams() {
}
void MP3FrameParams::setParamsFromHeader() {
if (hdr & (1<<20)) {
isMPEG2 = (hdr & (1<<19)) ? 0x0 : 0x1;
isMPEG2_5 = 0;
}
else {
isMPEG2 = 1;
isMPEG2_5 = 1;
}
layer = 4-((hdr>>17)&3);
if (layer == 4) layer = 3; // layer==4 is not allowed
bitrateIndex = ((hdr>>12)&0xf);
if (isMPEG2_5) {
samplingFreqIndex = ((hdr>>10)&0x3) + 6;
} else {
samplingFreqIndex = ((hdr>>10)&0x3) + (isMPEG2*3);
}
hasCRC = (hdr & 0x10000) == 0;
padding = ((hdr>>9)&0x1);
extension = ((hdr>>8)&0x1);
mode = ((hdr>>6)&0x3);
mode_ext = ((hdr>>4)&0x3);
copyright = ((hdr>>3)&0x1);
original = ((hdr>>2)&0x1);
emphasis = hdr & 0x3;
stereo = (mode == MPG_MD_MONO) ? 1 : 2;
if (((hdr>>10)&0x3) == 0x3) {
#ifdef DEBUG_ERRORS
fprintf(stderr,"Stream error - hdr: 0x%08x\n", hdr);
#endif
}
bitrate = live_tabsel[isMPEG2][layer-1][bitrateIndex];
samplingFreq = live_freqs[samplingFreqIndex];
isStereo = (stereo > 1);
isFreeFormat = (bitrateIndex == 0);
frameSize
= ComputeFrameSize(bitrate, samplingFreq, padding, isMPEG2, layer);
sideInfoSize = computeSideInfoSize();
}
unsigned MP3FrameParams::computeSideInfoSize() {
unsigned size;
if (isMPEG2) {
size = isStereo ? 17 : 9;
} else {
size = isStereo ? 32 : 17;
}
if (hasCRC) {
size += 2;
}
return size;
}
unsigned ComputeFrameSize(unsigned bitrate, unsigned samplingFreq,
Boolean usePadding, Boolean isMPEG2,
unsigned char layer) {
if (samplingFreq == 0) return 0;
unsigned const bitrateMultiplier = (layer == 1) ? 12000*4 : 144000;
unsigned framesize;
framesize = bitrate*bitrateMultiplier;
framesize /= samplingFreq<<(isMPEG2 ? 1 : 0);
framesize = framesize + usePadding - 4;
return framesize;
}
#define TRUNC_FAIRLY
static unsigned updateSideInfoSizes(MP3SideInfo& sideInfo, Boolean isMPEG2,
unsigned char const* mainDataPtr,
unsigned allowedNumBits,
unsigned& part23Length0a,
unsigned& part23Length0aTruncation,
unsigned& part23Length0b,
unsigned& part23Length0bTruncation,
unsigned& part23Length1a,
unsigned& part23Length1aTruncation,
unsigned& part23Length1b,
unsigned& part23Length1bTruncation) {
unsigned p23L0, p23L1 = 0, p23L0Trunc = 0, p23L1Trunc = 0;
p23L0 = sideInfo.ch[0].gr[0].part2_3_length;
p23L1 = isMPEG2 ? 0 : sideInfo.ch[0].gr[1].part2_3_length;
#ifdef TRUNC_ONLY0
if (p23L0 < allowedNumBits)
allowedNumBits = p23L0;
#endif
#ifdef TRUNC_ONLY1
if (p23L1 < allowedNumBits)
allowedNumBits = p23L1;
#endif
if (p23L0 + p23L1 > allowedNumBits) {
/* We need to shorten one or both fields */
unsigned truncation = p23L0 + p23L1 - allowedNumBits;
#ifdef TRUNC_FAIRLY
p23L0Trunc = (truncation*p23L0)/(p23L0 + p23L1);
p23L1Trunc = truncation - p23L0Trunc;
#endif
#if defined(TRUNC_FAVOR0) || defined(TRUNC_ONLY0)
p23L1Trunc = (truncation>p23L1) ? p23L1 : truncation;
p23L0Trunc = truncation - p23L1Trunc;
#endif
#if defined(TRUNC_FAVOR1) || defined(TRUNC_ONLY1)
p23L0Trunc = (truncation>p23L0) ? p23L0 : truncation;
p23L1Trunc = truncation - p23L0Trunc;
#endif
}
/* ASSERT: (p23L0Trunc <= p23L0) && (p23l1Trunc <= p23L1) */
p23L0 -= p23L0Trunc; p23L1 -= p23L1Trunc;
#ifdef DEBUG
fprintf(stderr, "updateSideInfoSizes (allowed: %d): %d->%d, %d->%d\n", allowedNumBits, p23L0+p23L0Trunc, p23L0, p23L1+p23L1Trunc, p23L1);
#endif
// The truncations computed above are still estimates. We need to
// adjust them so that the new fields will continue to end on
// Huffman-encoded sample boundaries:
updateSideInfoForHuffman(sideInfo, isMPEG2, mainDataPtr,
p23L0, p23L1,
part23Length0a, part23Length0aTruncation,
part23Length0b, part23Length0bTruncation,
part23Length1a, part23Length1aTruncation,
part23Length1b, part23Length1bTruncation);
p23L0 = part23Length0a + part23Length0b;
p23L1 = part23Length1a + part23Length1b;
sideInfo.ch[0].gr[0].part2_3_length = p23L0;
sideInfo.ch[0].gr[1].part2_3_length = p23L1;
part23Length0bTruncation
+= sideInfo.ch[1].gr[0].part2_3_length; /* allow for stereo */
sideInfo.ch[1].gr[0].part2_3_length = 0; /* output mono */
sideInfo.ch[1].gr[1].part2_3_length = 0; /* output mono */
return p23L0 + p23L1;
}
Boolean GetADUInfoFromMP3Frame(unsigned char const* framePtr,
unsigned totFrameSize,
unsigned& hdr, unsigned& frameSize,
MP3SideInfo& sideInfo, unsigned& sideInfoSize,
unsigned& backpointer, unsigned& aduSize) {
if (totFrameSize < 4) return False; // there's not enough data
MP3FrameParams fr;
fr.hdr = ((unsigned)framePtr[0] << 24) | ((unsigned)framePtr[1] << 16)
| ((unsigned)framePtr[2] << 8) | (unsigned)framePtr[3];
fr.setParamsFromHeader();
fr.setBytePointer(framePtr + 4, totFrameSize - 4); // skip hdr
frameSize = 4 + fr.frameSize;
if (fr.layer != 3) {
// Special case for non-layer III frames
backpointer = 0;
sideInfoSize = 0;
aduSize = fr.frameSize;
return True;
}
sideInfoSize = fr.sideInfoSize;
if (totFrameSize < 4 + sideInfoSize) return False; // not enough data
fr.getSideInfo(sideInfo);
hdr = fr.hdr;
backpointer = sideInfo.main_data_begin;
unsigned numBits = sideInfo.ch[0].gr[0].part2_3_length;
numBits += sideInfo.ch[0].gr[1].part2_3_length;
numBits += sideInfo.ch[1].gr[0].part2_3_length;
numBits += sideInfo.ch[1].gr[1].part2_3_length;
aduSize = (numBits+7)/8;
#ifdef DEBUG
fprintf(stderr, "mp3GetADUInfoFromFrame: hdr: %08x, frameSize: %d, part2_3_lengths: %d,%d,%d,%d, aduSize: %d, backpointer: %d\n", hdr, frameSize, sideInfo.ch[0].gr[0].part2_3_length, sideInfo.ch[0].gr[1].part2_3_length, sideInfo.ch[1].gr[0].part2_3_length, sideInfo.ch[1].gr[1].part2_3_length, aduSize, backpointer);
#endif
return True;
}
static void getSideInfo1(MP3FrameParams& fr, MP3SideInfo& si,
int stereo, int ms_stereo, long sfreq,
int /*single*/) {
int ch, gr;
#if 0
int powdiff = (single == 3) ? 4 : 0;
#endif
/* initialize all four "part2_3_length" fields to zero: */
si.ch[0].gr[0].part2_3_length = 0; si.ch[1].gr[0].part2_3_length = 0;
si.ch[0].gr[1].part2_3_length = 0; si.ch[1].gr[1].part2_3_length = 0;
si.main_data_begin = fr.getBits(9);
if (stereo == 1)
si.private_bits = fr.getBits(5);
else
si.private_bits = fr.getBits(3);
for (ch=0; ch win-sw-flag = 0 */
gr_info.window_switching_flag = fr.get1Bit();
if (gr_info.window_switching_flag) {
int i;
gr_info.block_type = fr.getBits(2);
gr_info.mixed_block_flag = fr.get1Bit();
gr_info.table_select[0] = fr.getBits(5);
gr_info.table_select[1] = fr.getBits(5);
/*
* table_select[2] not needed, because there is no region2,
* but to satisfy some verifications tools we set it either.
*/
gr_info.table_select[2] = 0;
for (i=0;i<3;i++) {
gr_info.subblock_gain[i] = fr.getBits(3);
gr_info.full_gain[i]
= gr_info.pow2gain + ((gr_info.subblock_gain[i])<<3);
}
#ifdef DEBUG_ERRORS
if (gr_info.block_type == 0) {
fprintf(stderr,"Blocktype == 0 and window-switching == 1 not allowed.\n");
}
#endif
/* region_count/start parameters are implicit in this case. */
gr_info.region1start = 36>>1;
gr_info.region2start = 576>>1;
}
else
{
int i,r0c,r1c;
for (i=0; i<3; i++) {
gr_info.table_select[i] = fr.getBits(5);
}
r0c = gr_info.region0_count = fr.getBits(4);
r1c = gr_info.region1_count = fr.getBits(3);
gr_info.region1start = bandInfo[sfreq].longIdx[r0c+1] >> 1 ;
gr_info.region2start = bandInfo[sfreq].longIdx[r0c+1+r1c+1] >> 1;
gr_info.block_type = 0;
gr_info.mixed_block_flag = 0;
}
gr_info.preflag = fr.get1Bit();
gr_info.scalefac_scale = fr.get1Bit();
gr_info.count1table_select = fr.get1Bit();
}
}
}
static void getSideInfo2(MP3FrameParams& fr, MP3SideInfo& si,
int stereo, int ms_stereo, long sfreq,
int /*single*/) {
int ch;
#if 0
int powdiff = (single == 3) ? 4 : 0;
#endif
/* initialize all four "part2_3_length" fields to zero: */
si.ch[0].gr[0].part2_3_length = 0; si.ch[1].gr[0].part2_3_length = 0;
si.ch[0].gr[1].part2_3_length = 0; si.ch[1].gr[1].part2_3_length = 0;
si.main_data_begin = fr.getBits(8);
if (stereo == 1)
si.private_bits = fr.get1Bit();
else
si.private_bits = fr.getBits(2);
for (ch=0; ch win-sw-flag = 0 */
gr_info.window_switching_flag = fr.get1Bit();
if (gr_info.window_switching_flag) {
int i;
gr_info.block_type = fr.getBits(2);
gr_info.mixed_block_flag = fr.get1Bit();
gr_info.table_select[0] = fr.getBits(5);
gr_info.table_select[1] = fr.getBits(5);
/*
* table_select[2] not needed, because there is no region2,
* but to satisfy some verifications tools we set it either.
*/
gr_info.table_select[2] = 0;
for (i=0;i<3;i++) {
gr_info.subblock_gain[i] = fr.getBits(3);
gr_info.full_gain[i]
= gr_info.pow2gain + ((gr_info.subblock_gain[i])<<3);
}
#ifdef DEBUG_ERRORS
if (gr_info.block_type == 0) {
fprintf(stderr,"Blocktype == 0 and window-switching == 1 not allowed.\n");
}
#endif
/* region_count/start parameters are implicit in this case. */
/* check this again! */
if (gr_info.block_type == 2)
gr_info.region1start = 36>>1;
else {
gr_info.region1start = 54>>1;
}
gr_info.region2start = 576>>1;
}
else
{
int i,r0c,r1c;
for (i=0; i<3; i++) {
gr_info.table_select[i] = fr.getBits(5);
}
r0c = gr_info.region0_count = fr.getBits(4);
r1c = gr_info.region1_count = fr.getBits(3);
gr_info.region1start = bandInfo[sfreq].longIdx[r0c+1] >> 1 ;
gr_info.region2start = bandInfo[sfreq].longIdx[r0c+1+r1c+1] >> 1;
gr_info.block_type = 0;
gr_info.mixed_block_flag = 0;
}
gr_info.scalefac_scale = fr.get1Bit();
gr_info.count1table_select = fr.get1Bit();
}
}
#define MPG_MD_JOINT_STEREO 1
void MP3FrameParams::getSideInfo(MP3SideInfo& si) {
// First skip over the CRC if present:
if (hasCRC) getBits(16);
int single = -1;
int ms_stereo;
int sfreq = samplingFreqIndex;
if (stereo == 1) {
single = 0;
}
ms_stereo = (mode == MPG_MD_JOINT_STEREO) && (mode_ext & 0x2);
if (isMPEG2) {
getSideInfo2(*this, si, stereo, ms_stereo, sfreq, single);
} else {
getSideInfo1(*this, si, stereo, ms_stereo, sfreq, single);
}
}
static void putSideInfo1(BitVector& bv,
MP3SideInfo const& si, Boolean isStereo) {
int ch, gr, i;
int stereo = isStereo ? 2 : 1;
bv.putBits(si.main_data_begin,9);
if (stereo == 1)
bv.putBits(si.private_bits, 5);
else
bv.putBits(si.private_bits, 3);
for (ch=0; ch= bitrate)
return i;
}
// "bitrate" was larger than any possible, so return the largest possible:
return 14;
}
static void outputHeader(unsigned char* toPtr, unsigned hdr) {
toPtr[0] = (unsigned char)(hdr>>24);
toPtr[1] = (unsigned char)(hdr>>16);
toPtr[2] = (unsigned char)(hdr>>8);
toPtr[3] = (unsigned char)(hdr);
}
static void assignADUBackpointer(MP3FrameParams const& fr,
unsigned aduSize,
MP3SideInfo& sideInfo,
unsigned& availableBytesForBackpointer) {
// Give the ADU as large a backpointer as possible:
unsigned maxBackpointerSize = fr.isMPEG2 ? 255 : 511;
unsigned backpointerSize = availableBytesForBackpointer;
if (backpointerSize > maxBackpointerSize) {
backpointerSize = maxBackpointerSize;
}
// Store the new backpointer now:
sideInfo.main_data_begin = backpointerSize;
// Figure out how many bytes are available for the *next* ADU's backpointer:
availableBytesForBackpointer
= backpointerSize + fr.frameSize - fr.sideInfoSize ;
if (availableBytesForBackpointer < aduSize) {
availableBytesForBackpointer = 0;
} else {
availableBytesForBackpointer -= aduSize;
}
}
unsigned TranscodeMP3ADU(unsigned char const* fromPtr, unsigned fromSize,
unsigned toBitrate,
unsigned char* toPtr, unsigned toMaxSize,
unsigned& availableBytesForBackpointer) {
// Begin by parsing the input ADU's parameters:
unsigned hdr, inFrameSize, inSideInfoSize, backpointer, inAduSize;
MP3SideInfo sideInfo;
if (!GetADUInfoFromMP3Frame(fromPtr, fromSize,
hdr, inFrameSize, sideInfo, inSideInfoSize,
backpointer, inAduSize)) {
return 0;
}
fromPtr += (4+inSideInfoSize); // skip to 'main data'
// Alter the 4-byte MPEG header to reflect the output ADU:
// (different bitrate; mono; no CRC)
Boolean isMPEG2 = ((hdr&0x00080000) == 0);
unsigned toBitrateIndex = MP3BitrateToBitrateIndex(toBitrate, isMPEG2);
hdr &=~ 0xF000; hdr |= (toBitrateIndex<<12); // set bitrate index
hdr |= 0x10200; // turn on !error-prot and padding bits
hdr &=~ 0xC0; hdr |= 0xC0; // set mode to 3 (mono)
// Set up the rest of the parameters of the new ADU:
MP3FrameParams outFr;
outFr.hdr = hdr;
outFr.setParamsFromHeader();
// Figure out how big to make the output ADU:
unsigned inAveAduSize = inFrameSize - inSideInfoSize;
unsigned outAveAduSize = outFr.frameSize - outFr.sideInfoSize;
unsigned desiredOutAduSize /*=inAduSize*outAveAduSize/inAveAduSize*/
= (2*inAduSize*outAveAduSize + inAveAduSize)/(2*inAveAduSize);
// this rounds to the nearest integer
if (toMaxSize < (4 + outFr.sideInfoSize)) return 0;
unsigned maxOutAduSize = toMaxSize - (4 + outFr.sideInfoSize);
if (desiredOutAduSize > maxOutAduSize) {
desiredOutAduSize = maxOutAduSize;
}
// Figure out the new sizes of the various 'part23 lengths',
// and how much they are truncated:
unsigned part23Length0a, part23Length0aTruncation;
unsigned part23Length0b, part23Length0bTruncation;
unsigned part23Length1a, part23Length1aTruncation;
unsigned part23Length1b, part23Length1bTruncation;
unsigned numAduBits
= updateSideInfoSizes(sideInfo, outFr.isMPEG2,
fromPtr, 8*desiredOutAduSize,
part23Length0a, part23Length0aTruncation,
part23Length0b, part23Length0bTruncation,
part23Length1a, part23Length1aTruncation,
part23Length1b, part23Length1bTruncation);
#ifdef DEBUG
fprintf(stderr, "shrinkage %d->%d [(%d,%d),(%d,%d)] (trunc: [(%d,%d),(%d,%d)]) {%d}\n", inAduSize, (numAduBits+7)/8,
part23Length0a, part23Length0b, part23Length1a, part23Length1b,
part23Length0aTruncation, part23Length0bTruncation,
part23Length1aTruncation, part23Length1bTruncation,
maxOutAduSize);
#endif
unsigned actualOutAduSize = (numAduBits+7)/8;
// Give the new ADU an appropriate 'backpointer':
assignADUBackpointer(outFr, actualOutAduSize, sideInfo, availableBytesForBackpointer);
///// Now output the new ADU:
// 4-byte header
outputHeader(toPtr, hdr); toPtr += 4;
// side info
PutMP3SideInfoIntoFrame(sideInfo, outFr, toPtr); toPtr += outFr.sideInfoSize;
// 'main data', using the new lengths
unsigned toBitOffset = 0;
unsigned fromBitOffset = 0;
/* rebuild portion 0a: */
memmove(toPtr, fromPtr, (part23Length0a+7)/8);
toBitOffset += part23Length0a;
fromBitOffset += part23Length0a + part23Length0aTruncation;
/* rebuild portion 0b: */
shiftBits(toPtr, toBitOffset, fromPtr, fromBitOffset, part23Length0b);
toBitOffset += part23Length0b;
fromBitOffset += part23Length0b + part23Length0bTruncation;
/* rebuild portion 1a: */
shiftBits(toPtr, toBitOffset, fromPtr, fromBitOffset, part23Length1a);
toBitOffset += part23Length1a;
fromBitOffset += part23Length1a + part23Length1aTruncation;
/* rebuild portion 1b: */
shiftBits(toPtr, toBitOffset, fromPtr, fromBitOffset, part23Length1b);
toBitOffset += part23Length1b;
/* zero out any remaining bits (probably unnecessary, but...) */
unsigned char const zero = '\0';
shiftBits(toPtr, toBitOffset, &zero, 0,
actualOutAduSize*8 - numAduBits);
return 4 + outFr.sideInfoSize + actualOutAduSize;
}
live/liveMedia/FramedFileSource.cpp 000444 001751 000000 00000002143 12265042432 017521 0 ustar 00rsf wheel 000000 000000 /**********
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the
Free Software Foundation; either version 2.1 of the License, or (at your
option) any later version. (See .)
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
more details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
**********/
// "liveMedia"
// Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved.
// Framed File Sources
// Implementation
#include "FramedFileSource.hh"
////////// FramedFileSource //////////
FramedFileSource::FramedFileSource(UsageEnvironment& env, FILE* fid)
: FramedSource(env), fFid(fid) {
}
FramedFileSource::~FramedFileSource() {
}
live/liveMedia/MP3ADUdescriptor.hh 000444 001751 000000 00000003465 12265042432 017217 0 ustar 00rsf wheel 000000 000000 /**********
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the
Free Software Foundation; either version 2.1 of the License, or (at your
option) any later version. (See .)
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
more details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
**********/
// "liveMedia"
// Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved.
// Descriptor preceding frames of 'ADU' MP3 streams (for improved loss-tolerance)
// C++ header
#ifndef _MP3_ADU_DESCRIPTOR_HH
#define _MP3_ADU_DESCRIPTOR_HH
// A class for handling the descriptor that begins each ADU frame:
// (Note: We don't yet implement fragmentation)
class ADUdescriptor {
public:
// Operations for generating a new descriptor
static unsigned computeSize(unsigned remainingFrameSize) {
return remainingFrameSize >= 64 ? 2 : 1;
}
static unsigned generateDescriptor(unsigned char*& toPtr, unsigned remainingFrameSize);
// returns descriptor size; increments "toPtr" afterwards
static void generateTwoByteDescriptor(unsigned char*& toPtr, unsigned remainingFrameSize);
// always generates a 2-byte descriptor, even if "remainingFrameSize" is
// small enough for a 1-byte descriptor
// Operations for reading a descriptor
static unsigned getRemainingFrameSize(unsigned char*& fromPtr);
// increments "fromPtr" afterwards
};
#endif
live/liveMedia/JPEGVideoRTPSink.cpp 000444 001751 000000 00000011505 12265042432 017273 0 ustar 00rsf wheel 000000 000000 /**********
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the
Free Software Foundation; either version 2.1 of the License, or (at your
option) any later version. (See .)
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
more details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
**********/
// "liveMedia"
// Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved.
// RTP sink for JPEG video (RFC 2435)
// Implementation
#include "JPEGVideoRTPSink.hh"
#include "JPEGVideoSource.hh"
JPEGVideoRTPSink
::JPEGVideoRTPSink(UsageEnvironment& env, Groupsock* RTPgs)
: VideoRTPSink(env, RTPgs, 26, 90000, "JPEG") {
}
JPEGVideoRTPSink::~JPEGVideoRTPSink() {
}
JPEGVideoRTPSink*
JPEGVideoRTPSink::createNew(UsageEnvironment& env, Groupsock* RTPgs) {
return new JPEGVideoRTPSink(env, RTPgs);
}
Boolean JPEGVideoRTPSink::sourceIsCompatibleWithUs(MediaSource& source) {
return source.isJPEGVideoSource();
}
Boolean JPEGVideoRTPSink
::frameCanAppearAfterPacketStart(unsigned char const* /*frameStart*/,
unsigned /*numBytesInFrame*/) const {
// A packet can contain only one frame
return False;
}
void JPEGVideoRTPSink
::doSpecialFrameHandling(unsigned fragmentationOffset,
unsigned char* /*frameStart*/,
unsigned /*numBytesInFrame*/,
struct timeval framePresentationTime,
unsigned numRemainingBytes) {
// Our source is known to be a JPEGVideoSource
JPEGVideoSource* source = (JPEGVideoSource*)fSource;
if (source == NULL) return; // sanity check
u_int8_t mainJPEGHeader[8]; // the special header
u_int8_t const type = source->type();
mainJPEGHeader[0] = 0; // Type-specific
mainJPEGHeader[1] = fragmentationOffset >> 16;
mainJPEGHeader[2] = fragmentationOffset >> 8;
mainJPEGHeader[3] = fragmentationOffset;
mainJPEGHeader[4] = type;
mainJPEGHeader[5] = source->qFactor();
mainJPEGHeader[6] = source->width();
mainJPEGHeader[7] = source->height();
setSpecialHeaderBytes(mainJPEGHeader, sizeof mainJPEGHeader);
unsigned restartMarkerHeaderSize = 0; // by default
if (type >= 64 && type <= 127) {
// There is also a Restart Marker Header:
restartMarkerHeaderSize = 4;
u_int16_t const restartInterval = source->restartInterval(); // should be non-zero
u_int8_t restartMarkerHeader[4];
restartMarkerHeader[0] = restartInterval>>8;
restartMarkerHeader[1] = restartInterval&0xFF;
restartMarkerHeader[2] = restartMarkerHeader[3] = 0xFF; // F=L=1; Restart Count = 0x3FFF
setSpecialHeaderBytes(restartMarkerHeader, restartMarkerHeaderSize,
sizeof mainJPEGHeader/* start position */);
}
if (fragmentationOffset == 0 && source->qFactor() >= 128) {
// There is also a Quantization Header:
u_int8_t precision;
u_int16_t length;
u_int8_t const* quantizationTables
= source->quantizationTables(precision, length);
unsigned const quantizationHeaderSize = 4 + length;
u_int8_t* quantizationHeader = new u_int8_t[quantizationHeaderSize];
quantizationHeader[0] = 0; // MBZ
quantizationHeader[1] = precision;
quantizationHeader[2] = length >> 8;
quantizationHeader[3] = length&0xFF;
if (quantizationTables != NULL) { // sanity check
for (u_int16_t i = 0; i < length; ++i) {
quantizationHeader[4+i] = quantizationTables[i];
}
}
setSpecialHeaderBytes(quantizationHeader, quantizationHeaderSize,
sizeof mainJPEGHeader + restartMarkerHeaderSize/* start position */);
delete[] quantizationHeader;
}
if (numRemainingBytes == 0) {
// This packet contains the last (or only) fragment of the frame.
// Set the RTP 'M' ('marker') bit:
setMarkerBit();
}
// Also set the RTP timestamp:
setTimestamp(framePresentationTime);
}
unsigned JPEGVideoRTPSink::specialHeaderSize() const {
// Our source is known to be a JPEGVideoSource
JPEGVideoSource* source = (JPEGVideoSource*)fSource;
if (source == NULL) return 0; // sanity check
unsigned headerSize = 8; // by default
u_int8_t const type = source->type();
if (type >= 64 && type <= 127) {
// There is also a Restart Marker Header:
headerSize += 4;
}
if (curFragmentationOffset() == 0 && source->qFactor() >= 128) {
// There is also a Quantization Header:
u_int8_t dummy;
u_int16_t quantizationTablesSize;
(void)(source->quantizationTables(dummy, quantizationTablesSize));
headerSize += 4 + quantizationTablesSize;
}
return headerSize;
}
live/liveMedia/MPEG4LATMAudioRTPSource.cpp 000444 001751 000000 00000017523 12265042432 020375 0 ustar 00rsf wheel 000000 000000 /**********
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the
Free Software Foundation; either version 2.1 of the License, or (at your
option) any later version. (See .)
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
more details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
**********/
// "liveMedia"
// Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved.
// MPEG-4 audio, using LATM multiplexing
// Implementation
#include "MPEG4LATMAudioRTPSource.hh"
////////// LATMBufferedPacket and LATMBufferedPacketFactory //////////
class LATMBufferedPacket: public BufferedPacket {
public:
LATMBufferedPacket(Boolean includeLATMDataLengthField);
virtual ~LATMBufferedPacket();
private: // redefined virtual functions
virtual unsigned nextEnclosedFrameSize(unsigned char*& framePtr,
unsigned dataSize);
private:
Boolean fIncludeLATMDataLengthField;
};
class LATMBufferedPacketFactory: public BufferedPacketFactory {
private: // redefined virtual functions
virtual BufferedPacket* createNewPacket(MultiFramedRTPSource* ourSource);
};
///////// MPEG4LATMAudioRTPSource implementation ////////
MPEG4LATMAudioRTPSource*
MPEG4LATMAudioRTPSource::createNew(UsageEnvironment& env, Groupsock* RTPgs,
unsigned char rtpPayloadFormat,
unsigned rtpTimestampFrequency) {
return new MPEG4LATMAudioRTPSource(env, RTPgs, rtpPayloadFormat,
rtpTimestampFrequency);
}
MPEG4LATMAudioRTPSource
::MPEG4LATMAudioRTPSource(UsageEnvironment& env, Groupsock* RTPgs,
unsigned char rtpPayloadFormat,
unsigned rtpTimestampFrequency)
: MultiFramedRTPSource(env, RTPgs,
rtpPayloadFormat, rtpTimestampFrequency,
new LATMBufferedPacketFactory),
fIncludeLATMDataLengthField(True) {
}
MPEG4LATMAudioRTPSource::~MPEG4LATMAudioRTPSource() {
}
void MPEG4LATMAudioRTPSource::omitLATMDataLengthField() {
fIncludeLATMDataLengthField = False;
}
Boolean MPEG4LATMAudioRTPSource
::processSpecialHeader(BufferedPacket* packet,
unsigned& resultSpecialHeaderSize) {
fCurrentPacketBeginsFrame = fCurrentPacketCompletesFrame;
// whether the *previous* packet ended a frame
// The RTP "M" (marker) bit indicates the last fragment of a frame:
fCurrentPacketCompletesFrame = packet->rtpMarkerBit();
// There is no special header
resultSpecialHeaderSize = 0;
return True;
}
char const* MPEG4LATMAudioRTPSource::MIMEtype() const {
return "audio/MP4A-LATM";
}
////////// LATMBufferedPacket and LATMBufferedPacketFactory implementation
LATMBufferedPacket::LATMBufferedPacket(Boolean includeLATMDataLengthField)
: fIncludeLATMDataLengthField(includeLATMDataLengthField) {
}
LATMBufferedPacket::~LATMBufferedPacket() {
}
unsigned LATMBufferedPacket
::nextEnclosedFrameSize(unsigned char*& framePtr, unsigned dataSize) {
// Look at the LATM data length byte(s), to determine the size
// of the LATM payload.
unsigned resultFrameSize = 0;
unsigned i;
for (i = 0; i < dataSize; ++i) {
resultFrameSize += framePtr[i];
if (framePtr[i] != 0xFF) break;
}
++i;
if (fIncludeLATMDataLengthField) {
resultFrameSize += i;
} else {
framePtr += i;
dataSize -= i;
}
return (resultFrameSize <= dataSize) ? resultFrameSize : dataSize;
}
BufferedPacket* LATMBufferedPacketFactory
::createNewPacket(MultiFramedRTPSource* ourSource) {
MPEG4LATMAudioRTPSource* source = (MPEG4LATMAudioRTPSource*)ourSource;
return new LATMBufferedPacket(source->returnedFrameIncludesLATMDataLengthField());
}
////////// parseStreamMuxConfigStr() implementation //////////
static Boolean getNibble(char const*& configStr,
unsigned char& resultNibble) {
char c = configStr[0];
if (c == '\0') return False; // we've reached the end
if (c >= '0' && c <= '9') {
resultNibble = c - '0';
} else if (c >= 'A' && c <= 'F') {
resultNibble = 10 + c - 'A';
} else if (c >= 'a' && c <= 'f') {
resultNibble = 10 + c - 'a';
} else {
return False;
}
++configStr; // move to the next nibble
return True;
}
static Boolean getByte(char const*& configStr, unsigned char& resultByte) {
resultByte = 0; // by default, in case parsing fails
unsigned char firstNibble;
if (!getNibble(configStr, firstNibble)) return False;
resultByte = firstNibble<<4;
unsigned char secondNibble = 0;
if (!getNibble(configStr, secondNibble) && configStr[0] != '\0') {
// There's a second nibble, but it's malformed
return False;
}
resultByte |= secondNibble;
return True;
}
Boolean
parseStreamMuxConfigStr(char const* configStr,
// result parameters:
Boolean& audioMuxVersion,
Boolean& allStreamsSameTimeFraming,
unsigned char& numSubFrames,
unsigned char& numProgram,
unsigned char& numLayer,
unsigned char*& audioSpecificConfig,
unsigned& audioSpecificConfigSize) {
// Set default versions of the result parameters:
audioMuxVersion = False;
allStreamsSameTimeFraming = True;
numSubFrames = numProgram = numLayer = 0;
audioSpecificConfig = NULL;
audioSpecificConfigSize = 0;
do {
if (configStr == NULL) break;
unsigned char nextByte;
if (!getByte(configStr, nextByte)) break;
audioMuxVersion = (nextByte&0x80) != 0;
if (audioMuxVersion) break;
allStreamsSameTimeFraming = ((nextByte&0x40)>>6) != 0;
numSubFrames = (nextByte&0x3F);
if (!getByte(configStr, nextByte)) break;
numProgram = (nextByte&0xF0)>>4;
numLayer = (nextByte&0x0E)>>1;
// The one remaining bit, and the rest of the string,
// are used for "audioSpecificConfig":
unsigned char remainingBit = nextByte&1;
unsigned ascSize = (strlen(configStr)+1)/2 + 1;
audioSpecificConfig = new unsigned char[ascSize];
Boolean parseSuccess;
unsigned i = 0;
do {
nextByte = 0;
parseSuccess = getByte(configStr, nextByte);
audioSpecificConfig[i++] = (remainingBit<<7)|((nextByte&0xFE)>>1);
remainingBit = nextByte&1;
} while (parseSuccess);
if (i != ascSize) break; // part of the remaining string was bad
audioSpecificConfigSize = ascSize;
return True; // parsing succeeded
} while (0);
delete[] audioSpecificConfig;
return False; // parsing failed
}
unsigned char* parseStreamMuxConfigStr(char const* configStr,
// result parameter:
unsigned& audioSpecificConfigSize) {
Boolean audioMuxVersion, allStreamsSameTimeFraming;
unsigned char numSubFrames, numProgram, numLayer;
unsigned char* audioSpecificConfig;
if (!parseStreamMuxConfigStr(configStr,
audioMuxVersion, allStreamsSameTimeFraming,
numSubFrames, numProgram, numLayer,
audioSpecificConfig, audioSpecificConfigSize)) {
audioSpecificConfigSize = 0;
return NULL;
}
return audioSpecificConfig;
}
unsigned char* parseGeneralConfigStr(char const* configStr,
// result parameter:
unsigned& configSize) {
unsigned char* config = NULL;
do {
if (configStr == NULL) break;
configSize = (strlen(configStr)+1)/2;
config = new unsigned char[configSize];
if (config == NULL) break;
unsigned i;
for (i = 0; i < configSize; ++i) {
if (!getByte(configStr, config[i])) break;
}
if (i != configSize) break; // part of the string was bad
return config;
} while (0);
configSize = 0;
delete[] config;
return NULL;
}
live/liveMedia/AC3AudioRTPSink.cpp 000444 001751 000000 00000006752 12265042432 017117 0 ustar 00rsf wheel 000000 000000 /**********
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the
Free Software Foundation; either version 2.1 of the License, or (at your
option) any later version. (See .)
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
more details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
**********/
// "liveMedia"
// Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved.
// RTP sink for AC3 audio
// Implementation
#include "AC3AudioRTPSink.hh"
AC3AudioRTPSink::AC3AudioRTPSink(UsageEnvironment& env, Groupsock* RTPgs,
u_int8_t rtpPayloadFormat,
u_int32_t rtpTimestampFrequency)
: AudioRTPSink(env, RTPgs, rtpPayloadFormat,
rtpTimestampFrequency, "AC3"),
fTotNumFragmentsUsed(0) {
}
AC3AudioRTPSink::~AC3AudioRTPSink() {
}
AC3AudioRTPSink*
AC3AudioRTPSink::createNew(UsageEnvironment& env, Groupsock* RTPgs,
u_int8_t rtpPayloadFormat,
u_int32_t rtpTimestampFrequency) {
return new AC3AudioRTPSink(env, RTPgs,
rtpPayloadFormat, rtpTimestampFrequency);
}
Boolean AC3AudioRTPSink
::frameCanAppearAfterPacketStart(unsigned char const* /*frameStart*/,
unsigned /*numBytesInFrame*/) const {
// (For now) allow at most 1 frame in a single packet:
return False;
}
void AC3AudioRTPSink
::doSpecialFrameHandling(unsigned fragmentationOffset,
unsigned char* frameStart,
unsigned numBytesInFrame,
struct timeval framePresentationTime,
unsigned numRemainingBytes) {
// Set the 2-byte "payload header", as defined in RFC 4184.
unsigned char headers[2];
Boolean isFragment = numRemainingBytes > 0 || fragmentationOffset > 0;
if (!isFragment) {
headers[0] = 0; // One or more complete frames
headers[1] = 1; // because we (for now) allow at most 1 frame per packet
} else {
if (fragmentationOffset > 0) {
headers[0] = 3; // Fragment of frame other than initial fragment
} else {
// An initial fragment of the frame
unsigned const totalFrameSize = fragmentationOffset + numBytesInFrame + numRemainingBytes;
unsigned const fiveEighthsPoint = totalFrameSize/2 + totalFrameSize/8;
headers[0] = numBytesInFrame >= fiveEighthsPoint ? 1 : 2;
// Because this outgoing packet will be full (because it's an initial fragment), we can compute how many total
// fragments (and thus packets) will make up the complete AC-3 frame:
fTotNumFragmentsUsed = (totalFrameSize + (numBytesInFrame-1))/numBytesInFrame;
}
headers[1] = fTotNumFragmentsUsed;
}
setSpecialHeaderBytes(headers, sizeof headers);
if (numRemainingBytes == 0) {
// This packet contains the last (or only) fragment of the frame.
// Set the RTP 'M' ('marker') bit:
setMarkerBit();
}
// Important: Also call our base class's doSpecialFrameHandling(),
// to set the packet's timestamp:
MultiFramedRTPSink::doSpecialFrameHandling(fragmentationOffset,
frameStart, numBytesInFrame,
framePresentationTime,
numRemainingBytes);
}
unsigned AC3AudioRTPSink::specialHeaderSize() const {
return 2;
}
live/liveMedia/FramedFilter.cpp 000444 001751 000000 00000003504 12265042432 016710 0 ustar 00rsf wheel 000000 000000 /**********
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the
Free Software Foundation; either version 2.1 of the License, or (at your
option) any later version. (See .)
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
more details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
**********/
// "liveMedia"
// Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved.
// Framed Filters
// Implementation
#include "FramedFilter.hh"
////////// FramedFilter //////////
#include
void FramedFilter::detachInputSource() {
if (fInputSource != NULL) {
fInputSource->stopGettingFrames();
reassignInputSource(NULL);
}
}
FramedFilter::FramedFilter(UsageEnvironment& env,
FramedSource* inputSource)
: FramedSource(env),
fInputSource(inputSource) {
}
FramedFilter::~FramedFilter() {
Medium::close(fInputSource);
}
// Default implementations of needed virtual functions. These merely
// call the same function in the input source - i.e., act like a 'null filter
char const* FramedFilter::MIMEtype() const {
if (fInputSource == NULL) return "";
return fInputSource->MIMEtype();
}
void FramedFilter::getAttributes() const {
if (fInputSource != NULL) fInputSource->getAttributes();
}
void FramedFilter::doStopGettingFrames() {
FramedSource::doStopGettingFrames();
if (fInputSource != NULL) fInputSource->stopGettingFrames();
}
live/liveMedia/FramedSource.cpp 000444 001751 000000 00000007773 12265042432 016737 0 ustar 00rsf wheel 000000 000000 /**********
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the
Free Software Foundation; either version 2.1 of the License, or (at your
option) any later version. (See .)
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
more details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
**********/
// "liveMedia"
// Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved.
// Framed Sources
// Implementation
#include "FramedSource.hh"
#include
////////// FramedSource //////////
FramedSource::FramedSource(UsageEnvironment& env)
: MediaSource(env),
fAfterGettingFunc(NULL), fAfterGettingClientData(NULL),
fOnCloseFunc(NULL), fOnCloseClientData(NULL),
fIsCurrentlyAwaitingData(False) {
fPresentationTime.tv_sec = fPresentationTime.tv_usec = 0; // initially
}
FramedSource::~FramedSource() {
}
Boolean FramedSource::isFramedSource() const {
return True;
}
Boolean FramedSource::lookupByName(UsageEnvironment& env, char const* sourceName,
FramedSource*& resultSource) {
resultSource = NULL; // unless we succeed
MediaSource* source;
if (!MediaSource::lookupByName(env, sourceName, source)) return False;
if (!source->isFramedSource()) {
env.setResultMsg(sourceName, " is not a framed source");
return False;
}
resultSource = (FramedSource*)source;
return True;
}
void FramedSource::getNextFrame(unsigned char* to, unsigned maxSize,
afterGettingFunc* afterGettingFunc,
void* afterGettingClientData,
onCloseFunc* onCloseFunc,
void* onCloseClientData) {
// Make sure we're not already being read:
if (fIsCurrentlyAwaitingData) {
envir() << "FramedSource[" << this << "]::getNextFrame(): attempting to read more than once at the same time!\n";
envir().internalError();
}
fTo = to;
fMaxSize = maxSize;
fNumTruncatedBytes = 0; // by default; could be changed by doGetNextFrame()
fDurationInMicroseconds = 0; // by default; could be changed by doGetNextFrame()
fAfterGettingFunc = afterGettingFunc;
fAfterGettingClientData = afterGettingClientData;
fOnCloseFunc = onCloseFunc;
fOnCloseClientData = onCloseClientData;
fIsCurrentlyAwaitingData = True;
doGetNextFrame();
}
void FramedSource::afterGetting(FramedSource* source) {
source->fIsCurrentlyAwaitingData = False;
// indicates that we can be read again
// Note that this needs to be done here, in case the "fAfterFunc"
// called below tries to read another frame (which it usually will)
if (source->fAfterGettingFunc != NULL) {
(*(source->fAfterGettingFunc))(source->fAfterGettingClientData,
source->fFrameSize, source->fNumTruncatedBytes,
source->fPresentationTime,
source->fDurationInMicroseconds);
}
}
void FramedSource::handleClosure(void* clientData) {
FramedSource* source = (FramedSource*)clientData;
source->fIsCurrentlyAwaitingData = False; // because we got a close instead
if (source->fOnCloseFunc != NULL) {
(*(source->fOnCloseFunc))(source->fOnCloseClientData);
}
}
void FramedSource::stopGettingFrames() {
fIsCurrentlyAwaitingData = False; // indicates that we can be read again
fAfterGettingFunc = NULL;
fOnCloseFunc = NULL;
// Perform any specialized action now:
doStopGettingFrames();
}
void FramedSource::doStopGettingFrames() {
// Default implementation: Do nothing except cancel any pending 'delivery' task:
envir().taskScheduler().unscheduleDelayedTask(nextTask());
// Subclasses may wish to redefine this function.
}
unsigned FramedSource::maxFrameSize() const {
// By default, this source has no maximum frame size.
return 0;
}
live/liveMedia/AMRAudioFileSource.cpp 000444 001751 000000 00000013276 12265042432 017735 0 ustar 00rsf wheel 000000 000000 /**********
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the
Free Software Foundation; either version 2.1 of the License, or (at your
option) any later version. (See .)
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
more details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
**********/
// "liveMedia"
// Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved.
// A source object for AMR audio files (as defined in RFC 4867, section 5)
// Implementation
#include "AMRAudioFileSource.hh"
#include "InputFile.hh"
#include "GroupsockHelper.hh"
////////// AMRAudioFileSource //////////
AMRAudioFileSource*
AMRAudioFileSource::createNew(UsageEnvironment& env, char const* fileName) {
FILE* fid = NULL;
Boolean magicNumberOK = True;
do {
fid = OpenInputFile(env, fileName);
if (fid == NULL) break;
// Now, having opened the input file, read the first few bytes, to
// check the required 'magic number':
magicNumberOK = False; // until we learn otherwise
Boolean isWideband = False; // by default
unsigned numChannels = 1; // by default
char buf[100];
// Start with the first 6 bytes (the first 5 of which must be "#!AMR"):
if (fread(buf, 1, 6, fid) < 6) break;
if (strncmp(buf, "#!AMR", 5) != 0) break; // bad magic #
unsigned bytesRead = 6;
// The next bytes must be "\n", "-WB\n", "_MC1.0\n", or "-WB_MC1.0\n"
if (buf[5] == '-') {
// The next bytes must be "WB\n" or "WB_MC1.0\n"
if (fread(&buf[bytesRead], 1, 3, fid) < 3) break;
if (strncmp(&buf[bytesRead], "WB", 2) != 0) break; // bad magic #
isWideband = True;
bytesRead += 3;
}
if (buf[bytesRead-1] == '_') {
// The next bytes must be "MC1.0\n"
if (fread(&buf[bytesRead], 1, 6, fid) < 6) break;
if (strncmp(&buf[bytesRead], "MC1.0\n", 6) != 0) break; // bad magic #
bytesRead += 6;
// The next 4 bytes contain the number of channels:
char channelDesc[4];
if (fread(channelDesc, 1, 4, fid) < 4) break;
numChannels = channelDesc[3]&0xF;
} else if (buf[bytesRead-1] != '\n') {
break; // bad magic #
}
// If we get here, the magic number was OK:
magicNumberOK = True;
#ifdef DEBUG
fprintf(stderr, "isWideband: %d, numChannels: %d\n",
isWideband, numChannels);
#endif
return new AMRAudioFileSource(env, fid, isWideband, numChannels);
} while (0);
// An error occurred:
CloseInputFile(fid);
if (!magicNumberOK) {
env.setResultMsg("Bad (or nonexistent) AMR file header");
}
return NULL;
}
AMRAudioFileSource
::AMRAudioFileSource(UsageEnvironment& env, FILE* fid,
Boolean isWideband, unsigned numChannels)
: AMRAudioSource(env, isWideband, numChannels),
fFid(fid) {
}
AMRAudioFileSource::~AMRAudioFileSource() {
CloseInputFile(fFid);
}
// The mapping from the "FT" field to frame size.
// Values of 65535 are invalid.
#define FT_INVALID 65535
static unsigned short const frameSize[16] = {
12, 13, 15, 17,
19, 20, 26, 31,
5, FT_INVALID, FT_INVALID, FT_INVALID,
FT_INVALID, FT_INVALID, FT_INVALID, 0
};
static unsigned short const frameSizeWideband[16] = {
17, 23, 32, 36,
40, 46, 50, 58,
60, 5, FT_INVALID, FT_INVALID,
FT_INVALID, FT_INVALID, 0, 0
};
// Note: We should change the following to use asynchronous file reading, #####
// as we now do with ByteStreamFileSource. #####
void AMRAudioFileSource::doGetNextFrame() {
if (feof(fFid) || ferror(fFid)) {
handleClosure(this);
return;
}
// Begin by reading the 1-byte frame header (and checking it for validity)
while (1) {
if (fread(&fLastFrameHeader, 1, 1, fFid) < 1) {
handleClosure(this);
return;
}
if ((fLastFrameHeader&0x83) != 0) {
#ifdef DEBUG
fprintf(stderr, "Invalid frame header 0x%02x (padding bits (0x83) are not zero)\n", fLastFrameHeader);
#endif
} else {
unsigned char ft = (fLastFrameHeader&0x78)>>3;
fFrameSize = fIsWideband ? frameSizeWideband[ft] : frameSize[ft];
if (fFrameSize == FT_INVALID) {
#ifdef DEBUG
fprintf(stderr, "Invalid FT field %d (from frame header 0x%02x)\n",
ft, fLastFrameHeader);
#endif
} else {
// The frame header is OK
#ifdef DEBUG
fprintf(stderr, "Valid frame header 0x%02x -> ft %d -> frame size %d\n", fLastFrameHeader, ft, fFrameSize);
#endif
break;
}
}
}
// Next, read the frame-block into the buffer provided:
fFrameSize *= fNumChannels; // because multiple channels make up a frame-block
if (fFrameSize > fMaxSize) {
fNumTruncatedBytes = fFrameSize - fMaxSize;
fFrameSize = fMaxSize;
}
fFrameSize = fread(fTo, 1, fFrameSize, fFid);
// Set the 'presentation time':
if (fPresentationTime.tv_sec == 0 && fPresentationTime.tv_usec == 0) {
// This is the first frame, so use the current time:
gettimeofday(&fPresentationTime, NULL);
} else {
// Increment by the play time of the previous frame (20 ms)
unsigned uSeconds = fPresentationTime.tv_usec + 20000;
fPresentationTime.tv_sec += uSeconds/1000000;
fPresentationTime.tv_usec = uSeconds%1000000;
}
fDurationInMicroseconds = 20000; // each frame is 20 ms
// Switch to another task, and inform the reader that he has data:
nextTask() = envir().taskScheduler().scheduleDelayedTask(0,
(TaskFunc*)FramedSource::afterGetting, this);
}
live/liveMedia/MP3ADUdescriptor.cpp 000444 001751 000000 00000004136 12265042432 017376 0 ustar 00rsf wheel 000000 000000 /**********
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the
Free Software Foundation; either version 2.1 of the License, or (at your
option) any later version. (See .)
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
more details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
**********/
// "liveMedia"
// Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved.
// Descriptor preceding frames of 'ADU' MP3 streams (for improved loss-tolerance)
// Implementation
#include "MP3ADUdescriptor.hh"
////////// ADUdescriptor //////////
//##### NOTE: For now, ignore fragmentation. Fix this later! #####
#define TWO_BYTE_DESCR_FLAG 0x40
unsigned ADUdescriptor::generateDescriptor(unsigned char*& toPtr,
unsigned remainingFrameSize) {
unsigned descriptorSize = ADUdescriptor::computeSize(remainingFrameSize);
switch (descriptorSize) {
case 1: {
*toPtr++ = (unsigned char)remainingFrameSize;
break;
}
case 2: {
generateTwoByteDescriptor(toPtr, remainingFrameSize);
break;
}
}
return descriptorSize;
}
void ADUdescriptor::generateTwoByteDescriptor(unsigned char*& toPtr,
unsigned remainingFrameSize) {
*toPtr++ = (TWO_BYTE_DESCR_FLAG|(unsigned char)(remainingFrameSize>>8));
*toPtr++ = (unsigned char)(remainingFrameSize&0xFF);
}
unsigned ADUdescriptor::getRemainingFrameSize(unsigned char*& fromPtr) {
unsigned char firstByte = *fromPtr++;
if (firstByte&TWO_BYTE_DESCR_FLAG) {
// This is a 2-byte descriptor
unsigned char secondByte = *fromPtr++;
return ((firstByte&0x3F)<<8) | secondByte;
} else {
// This is a 1-byte descriptor
return (firstByte&0x3F);
}
}
live/liveMedia/MP3ADUinterleaving.cpp 000444 001751 000000 00000040723 12265042432 017711 0 ustar 00rsf wheel 000000 000000 /**********
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the
Free Software Foundation; either version 2.1 of the License, or (at your
option) any later version. (See .)
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
more details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
**********/
// "liveMedia"
// Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved.
// Interleaving of MP3 ADUs
// Implementation
#include "MP3ADUinterleaving.hh"
#include "MP3ADUdescriptor.hh"
#include
#ifdef TEST_LOSS
#include "GroupsockHelper.hh"
#endif
////////// Interleaving //////////
Interleaving::Interleaving(unsigned cycleSize,
unsigned char const* cycleArray)
: fCycleSize(cycleSize) {
for (unsigned i = 0; i < fCycleSize; ++i) {
fInverseCycle[cycleArray[i]] = i;
}
}
Interleaving::~Interleaving() {
}
////////// MP3ADUinterleaverBase //////////
MP3ADUinterleaverBase::MP3ADUinterleaverBase(UsageEnvironment& env,
FramedSource* inputSource)
: FramedFilter(env, inputSource) {
}
MP3ADUinterleaverBase::~MP3ADUinterleaverBase() {
}
FramedSource* MP3ADUinterleaverBase::getInputSource(UsageEnvironment& env,
char const* inputSourceName) {
FramedSource* inputSource;
if (!FramedSource::lookupByName(env, inputSourceName, inputSource))
return NULL;
if (strcmp(inputSource->MIMEtype(), "audio/MPA-ROBUST") != 0) {
env.setResultMsg(inputSourceName, " is not an MP3 ADU source");
return NULL;
}
return inputSource;
}
void MP3ADUinterleaverBase::afterGettingFrame(void* clientData,
unsigned numBytesRead,
unsigned /*numTruncatedBytes*/,
struct timeval presentationTime,
unsigned durationInMicroseconds) {
MP3ADUinterleaverBase* interleaverBase = (MP3ADUinterleaverBase*)clientData;
// Finish up after reading:
interleaverBase->afterGettingFrame(numBytesRead,
presentationTime, durationInMicroseconds);
// Then, continue to deliver an outgoing frame:
interleaverBase->doGetNextFrame();
}
////////// InterleavingFrames (definition) //////////
class InterleavingFrames {
public:
InterleavingFrames(unsigned maxCycleSize);
virtual ~InterleavingFrames();
Boolean haveReleaseableFrame();
void getIncomingFrameParams(unsigned char index,
unsigned char*& dataPtr,
unsigned& bytesAvailable);
void getReleasingFrameParams(unsigned char index,
unsigned char*& dataPtr,
unsigned& bytesInUse,
struct timeval& presentationTime,
unsigned& durationInMicroseconds);
void setFrameParams(unsigned char index,
unsigned char icc, unsigned char ii,
unsigned frameSize, struct timeval presentationTime,
unsigned durationInMicroseconds);
unsigned nextIndexToRelease() {return fNextIndexToRelease;}
void releaseNext();
private:
unsigned fMaxCycleSize;
unsigned fNextIndexToRelease;
class InterleavingFrameDescriptor* fDescriptors;
};
////////// MP3ADUinterleaver //////////
MP3ADUinterleaver::MP3ADUinterleaver(UsageEnvironment& env,
Interleaving const& interleaving,
FramedSource* inputSource)
: MP3ADUinterleaverBase(env, inputSource),
fInterleaving(interleaving),
fFrames(new InterleavingFrames(interleaving.cycleSize())),
fII(0), fICC(0) {
}
MP3ADUinterleaver::~MP3ADUinterleaver() {
delete fFrames;
}
MP3ADUinterleaver* MP3ADUinterleaver::createNew(UsageEnvironment& env,
Interleaving const& interleaving,
FramedSource* inputSource) {
return new MP3ADUinterleaver(env, interleaving, inputSource);
}
void MP3ADUinterleaver::doGetNextFrame() {
// If there's a frame immediately available, deliver it, otherwise get new
// frames from the source until one's available:
if (fFrames->haveReleaseableFrame()) {
releaseOutgoingFrame();
// Call our own 'after getting' function. Because we're not a 'leaf'
// source, we can call this directly, without risking infinite recursion.
afterGetting(this);
} else {
fPositionOfNextIncomingFrame = fInterleaving.lookupInverseCycle(fII);
unsigned char* dataPtr;
unsigned bytesAvailable;
fFrames->getIncomingFrameParams(fPositionOfNextIncomingFrame,
dataPtr, bytesAvailable);
// Read the next incoming frame (asynchronously)
fInputSource->getNextFrame(dataPtr, bytesAvailable,
&MP3ADUinterleaverBase::afterGettingFrame, this,
handleClosure, this);
}
}
void MP3ADUinterleaver::releaseOutgoingFrame() {
unsigned char* fromPtr;
fFrames->getReleasingFrameParams(fFrames->nextIndexToRelease(),
fromPtr, fFrameSize,
fPresentationTime, fDurationInMicroseconds);
if (fFrameSize > fMaxSize) {
fNumTruncatedBytes = fFrameSize - fMaxSize;
fFrameSize = fMaxSize;
}
memmove(fTo, fromPtr, fFrameSize);
fFrames->releaseNext();
}
void MP3ADUinterleaver::afterGettingFrame(unsigned numBytesRead,
struct timeval presentationTime,
unsigned durationInMicroseconds) {
// Set the (icc,ii) and frame size of the newly-read frame:
fFrames->setFrameParams(fPositionOfNextIncomingFrame,
fICC, fII, numBytesRead,
presentationTime, durationInMicroseconds);
// Prepare our counters for the next frame:
if (++fII == fInterleaving.cycleSize()) {
fII = 0;
fICC = (fICC+1)%8;
}
}
////////// DeinterleavingFrames (definition) //////////
class DeinterleavingFrames {
public:
DeinterleavingFrames();
virtual ~DeinterleavingFrames();
Boolean haveReleaseableFrame();
void getIncomingFrameParams(unsigned char*& dataPtr,
unsigned& bytesAvailable);
void getIncomingFrameParamsAfter(unsigned frameSize,
struct timeval presentationTime,
unsigned durationInMicroseconds,
unsigned char& icc, unsigned char& ii);
void getReleasingFrameParams(unsigned char*& dataPtr,
unsigned& bytesInUse,
struct timeval& presentationTime,
unsigned& durationInMicroseconds);
void moveIncomingFrameIntoPlace();
void releaseNext();
void startNewCycle();
private:
unsigned fNextIndexToRelease;
Boolean fHaveEndedCycle;
unsigned fIIlastSeen;
unsigned fMinIndexSeen, fMaxIndexSeen; // actually, max+1
class DeinterleavingFrameDescriptor* fDescriptors;
};
////////// MP3ADUdeinterleaver //////////
MP3ADUdeinterleaver::MP3ADUdeinterleaver(UsageEnvironment& env,
FramedSource* inputSource)
: MP3ADUinterleaverBase(env, inputSource),
fFrames(new DeinterleavingFrames),
fIIlastSeen(~0), fICClastSeen(~0) {
}
MP3ADUdeinterleaver::~MP3ADUdeinterleaver() {
delete fFrames;
}
MP3ADUdeinterleaver* MP3ADUdeinterleaver::createNew(UsageEnvironment& env,
FramedSource* inputSource) {
return new MP3ADUdeinterleaver(env, inputSource);
}
void MP3ADUdeinterleaver::doGetNextFrame() {
// If there's a frame immediately available, deliver it, otherwise get new
// frames from the source until one's available:
if (fFrames->haveReleaseableFrame()) {
releaseOutgoingFrame();
// Call our own 'after getting' function. Because we're not a 'leaf'
// source, we can call this directly, without risking infinite recursion.
afterGetting(this);
} else {
#ifdef TEST_LOSS
NOTE: This code no longer works, because it uses synchronous reads,
which are no longer supported.
static unsigned const framesPerPacket = 3;
static unsigned const frameCount = 0;
static Boolean packetIsLost;
while (1) {
unsigned packetCount = frameCount/framesPerPacket;
if ((frameCount++)%framesPerPacket == 0) {
packetIsLost = (our_random()%10 == 0); // simulate 10% packet loss #####
}
if (packetIsLost) {
// Read and discard the next input frame (that would be part of
// a lost packet):
unsigned char dummyBuf[2000];
unsigned numBytesRead;
struct timeval presentationTime;
// (this works only if the source can be read synchronously)
fInputSource->syncGetNextFrame(dummyBuf, sizeof dummyBuf,
numBytesRead, presentationTime);
} else {
break; // from while (1)
}
}
#endif
unsigned char* dataPtr;
unsigned bytesAvailable;
fFrames->getIncomingFrameParams(dataPtr, bytesAvailable);
// Read the next incoming frame (asynchronously)
fInputSource->getNextFrame(dataPtr, bytesAvailable,
&MP3ADUinterleaverBase::afterGettingFrame, this,
handleClosure, this);
}
}
void MP3ADUdeinterleaver::afterGettingFrame(unsigned numBytesRead,
struct timeval presentationTime,
unsigned durationInMicroseconds) {
// Get the (icc,ii) and set the frame size of the newly-read frame:
unsigned char icc, ii;
fFrames->getIncomingFrameParamsAfter(numBytesRead,
presentationTime, durationInMicroseconds,
icc, ii);
// Compare these to the values we saw last:
if (icc != fICClastSeen || ii == fIIlastSeen) {
// We've started a new interleave cycle
// (or interleaving was not used). Release all
// pending ADU frames to the ADU->MP3 conversion step:
fFrames->startNewCycle();
} else {
// We're still in the same cycle as before.
// Move the newly-read frame into place, so it can be used:
fFrames->moveIncomingFrameIntoPlace();
}
fICClastSeen = icc;
fIIlastSeen = ii;
}
void MP3ADUdeinterleaver::releaseOutgoingFrame() {
unsigned char* fromPtr;
fFrames->getReleasingFrameParams(fromPtr, fFrameSize,
fPresentationTime, fDurationInMicroseconds);
if (fFrameSize > fMaxSize) {
fNumTruncatedBytes = fFrameSize - fMaxSize;
fFrameSize = fMaxSize;
}
memmove(fTo, fromPtr, fFrameSize);
fFrames->releaseNext();
}
////////// InterleavingFrames (implementation) //////////
#define MAX_FRAME_SIZE 2000 /* conservatively high */
class InterleavingFrameDescriptor {
public:
InterleavingFrameDescriptor() {frameDataSize = 0;}
unsigned frameDataSize; // includes ADU descriptor and (modified) MPEG hdr
struct timeval presentationTime;
unsigned durationInMicroseconds;
unsigned char frameData[MAX_FRAME_SIZE]; // ditto
};
InterleavingFrames::InterleavingFrames(unsigned maxCycleSize)
: fMaxCycleSize(maxCycleSize), fNextIndexToRelease(0),
fDescriptors(new InterleavingFrameDescriptor[maxCycleSize]) {
}
InterleavingFrames::~InterleavingFrames() {
delete[] fDescriptors;
}
Boolean InterleavingFrames::haveReleaseableFrame() {
return fDescriptors[fNextIndexToRelease].frameDataSize > 0;
}
void InterleavingFrames::getIncomingFrameParams(unsigned char index,
unsigned char*& dataPtr,
unsigned& bytesAvailable) {
InterleavingFrameDescriptor& desc = fDescriptors[index];
dataPtr = &desc.frameData[0];
bytesAvailable = MAX_FRAME_SIZE;
}
void InterleavingFrames::getReleasingFrameParams(unsigned char index,
unsigned char*& dataPtr,
unsigned& bytesInUse,
struct timeval& presentationTime,
unsigned& durationInMicroseconds) {
InterleavingFrameDescriptor& desc = fDescriptors[index];
dataPtr = &desc.frameData[0];
bytesInUse = desc.frameDataSize;
presentationTime = desc.presentationTime;
durationInMicroseconds = desc.durationInMicroseconds;
}
void InterleavingFrames::setFrameParams(unsigned char index,
unsigned char icc,
unsigned char ii,
unsigned frameSize,
struct timeval presentationTime,
unsigned durationInMicroseconds) {
InterleavingFrameDescriptor& desc = fDescriptors[index];
desc.frameDataSize = frameSize;
desc.presentationTime = presentationTime;
desc.durationInMicroseconds = durationInMicroseconds;
// Advance over the ADU descriptor, to get to the MPEG 'syncword':
unsigned char* ptr = &desc.frameData[0];
(void)ADUdescriptor::getRemainingFrameSize(ptr);
// Replace the next 11 bits with (ii,icc):
*ptr++ = ii;
*ptr &=~ 0xE0;
*ptr |= (icc<<5);
}
void InterleavingFrames::releaseNext() {
fDescriptors[fNextIndexToRelease].frameDataSize = 0;
fNextIndexToRelease = (fNextIndexToRelease+1)%fMaxCycleSize;
}
////////// DeinterleavingFrames (implementation) //////////
class DeinterleavingFrameDescriptor {
public:
DeinterleavingFrameDescriptor() {frameDataSize = 0; frameData = NULL;}
virtual ~DeinterleavingFrameDescriptor() {delete[] frameData;}
unsigned frameDataSize; // includes ADU descriptor and (modified) MPEG hdr
struct timeval presentationTime;
unsigned durationInMicroseconds;
unsigned char* frameData;
};
DeinterleavingFrames::DeinterleavingFrames()
: fNextIndexToRelease(0), fHaveEndedCycle(False),
fMinIndexSeen(MAX_CYCLE_SIZE), fMaxIndexSeen(0),
fDescriptors(new DeinterleavingFrameDescriptor[MAX_CYCLE_SIZE+1]) {
}
DeinterleavingFrames::~DeinterleavingFrames() {
delete[] fDescriptors;
}
Boolean DeinterleavingFrames::haveReleaseableFrame() {
if (!fHaveEndedCycle) {
// Check just the next frame in the sequence
return fDescriptors[fNextIndexToRelease].frameDataSize > 0;
} else {
// We've just ended a cycle, so we can skip over frames that didn't
// get filled in (due to packet loss):
if (fNextIndexToRelease < fMinIndexSeen) {
fNextIndexToRelease = fMinIndexSeen;
}
while (fNextIndexToRelease < fMaxIndexSeen
&& fDescriptors[fNextIndexToRelease].frameDataSize == 0) {
++fNextIndexToRelease;
}
if (fNextIndexToRelease >= fMaxIndexSeen) {
// No more frames are available from the cycle that we just ended, so
// clear out all previously stored frames, then make available
// the last-read frame, and return false for now:
for (unsigned i = fMinIndexSeen; i < fMaxIndexSeen; ++i) {
fDescriptors[i].frameDataSize = 0;
}
fMinIndexSeen = MAX_CYCLE_SIZE; fMaxIndexSeen = 0;
moveIncomingFrameIntoPlace();
fHaveEndedCycle = False;
fNextIndexToRelease = 0;
return False;
}
return True;
}
}
void DeinterleavingFrames::getIncomingFrameParams(unsigned char*& dataPtr,
unsigned& bytesAvailable) {
// Use fDescriptors[MAX_CYCLE_SIZE] to store the incoming frame,
// prior to figuring out its real position:
DeinterleavingFrameDescriptor& desc = fDescriptors[MAX_CYCLE_SIZE];
if (desc.frameData == NULL) {
// There's no buffer yet, so allocate a new one:
desc.frameData = new unsigned char[MAX_FRAME_SIZE];
}
dataPtr = desc.frameData;
bytesAvailable = MAX_FRAME_SIZE;
}
void DeinterleavingFrames
::getIncomingFrameParamsAfter(unsigned frameSize,
struct timeval presentationTime,
unsigned durationInMicroseconds,
unsigned char& icc, unsigned char& ii) {
DeinterleavingFrameDescriptor& desc = fDescriptors[MAX_CYCLE_SIZE];
desc.frameDataSize = frameSize;
desc.presentationTime = presentationTime;
desc.durationInMicroseconds = durationInMicroseconds;
// Advance over the ADU descriptor, to get to the MPEG 'syncword':
unsigned char* ptr = desc.frameData;
(void)ADUdescriptor::getRemainingFrameSize(ptr);
// Read the next 11 bits into (ii,icc), and replace them with all-1s:
fIIlastSeen = ii = *ptr; *ptr++ = 0xFF;
icc = (*ptr&0xE0)>>5; *ptr |= 0xE0;
}
void DeinterleavingFrames::getReleasingFrameParams(unsigned char*& dataPtr,
unsigned& bytesInUse,
struct timeval& presentationTime,
unsigned& durationInMicroseconds) {
DeinterleavingFrameDescriptor& desc = fDescriptors[fNextIndexToRelease];
dataPtr = desc.frameData;
bytesInUse = desc.frameDataSize;
presentationTime = desc.presentationTime;
durationInMicroseconds = desc.durationInMicroseconds;
}
void DeinterleavingFrames::moveIncomingFrameIntoPlace() {
DeinterleavingFrameDescriptor& fromDesc = fDescriptors[MAX_CYCLE_SIZE];
DeinterleavingFrameDescriptor& toDesc = fDescriptors[fIIlastSeen];
toDesc.frameDataSize = fromDesc.frameDataSize;
toDesc.presentationTime = fromDesc.presentationTime;
// Move the data pointer into place by swapping the data pointers:
unsigned char* tmp = toDesc.frameData;
toDesc.frameData = fromDesc.frameData;
fromDesc.frameData = tmp;
if (fIIlastSeen < fMinIndexSeen) {
fMinIndexSeen = fIIlastSeen;
}
if (fIIlastSeen + 1 > fMaxIndexSeen) {
fMaxIndexSeen = fIIlastSeen + 1;
}
}
void DeinterleavingFrames::releaseNext() {
fDescriptors[fNextIndexToRelease].frameDataSize = 0;
fNextIndexToRelease = (fNextIndexToRelease+1)%MAX_CYCLE_SIZE;
}
void DeinterleavingFrames::startNewCycle() {
fHaveEndedCycle = True;
}
live/liveMedia/AMRAudioRTPSink.cpp 000444 001751 000000 00000012134 12265042432 017157 0 ustar 00rsf wheel 000000 000000 /**********
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the
Free Software Foundation; either version 2.1 of the License, or (at your
option) any later version. (See .)
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
more details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
**********/
// "liveMedia"
// Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved.
// RTP sink for AMR audio (RFC 4867)
// Implementation
// NOTE: At present, this is just a limited implementation, supporting:
// octet-alignment only; no interleaving; no frame CRC; no robust-sorting.
#include "AMRAudioRTPSink.hh"
#include "AMRAudioSource.hh"
AMRAudioRTPSink*
AMRAudioRTPSink::createNew(UsageEnvironment& env, Groupsock* RTPgs,
unsigned char rtpPayloadFormat,
Boolean sourceIsWideband,
unsigned numChannelsInSource) {
return new AMRAudioRTPSink(env, RTPgs, rtpPayloadFormat,
sourceIsWideband, numChannelsInSource);
}
AMRAudioRTPSink
::AMRAudioRTPSink(UsageEnvironment& env, Groupsock* RTPgs,
unsigned char rtpPayloadFormat,
Boolean sourceIsWideband, unsigned numChannelsInSource)
: AudioRTPSink(env, RTPgs, rtpPayloadFormat,
sourceIsWideband ? 16000 : 8000,
sourceIsWideband ? "AMR-WB": "AMR",
numChannelsInSource),
fSourceIsWideband(sourceIsWideband), fFmtpSDPLine(NULL) {
}
AMRAudioRTPSink::~AMRAudioRTPSink() {
delete[] fFmtpSDPLine;
}
Boolean AMRAudioRTPSink::sourceIsCompatibleWithUs(MediaSource& source) {
// Our source must be an AMR audio source:
if (!source.isAMRAudioSource()) return False;
// Also, the source must be wideband iff we asked for this:
AMRAudioSource& amrSource = (AMRAudioSource&)source;
if ((amrSource.isWideband()^fSourceIsWideband) != 0) return False;
// Also, the source must have the same number of channels that we
// specified. (It could, in principle, have more, but we don't
// support that.)
if (amrSource.numChannels() != numChannels()) return False;
// Also, because in our current implementation we output only one
// frame in each RTP packet, this means that for multi-channel audio,
// each 'frame-block' will be split over multiple RTP packets, which
// may violate the spec. Warn about this:
if (amrSource.numChannels() > 1) {
envir() << "AMRAudioRTPSink: Warning: Input source has " << amrSource.numChannels()
<< " audio channels. In the current implementation, the multi-frame frame-block will be split over multiple RTP packets\n";
}
return True;
}
void AMRAudioRTPSink::doSpecialFrameHandling(unsigned fragmentationOffset,
unsigned char* frameStart,
unsigned numBytesInFrame,
struct timeval framePresentationTime,
unsigned numRemainingBytes) {
// If this is the 1st frame in the 1st packet, set the RTP 'M' (marker)
// bit (because this is considered the start of a talk spurt):
if (isFirstPacket() && isFirstFrameInPacket()) {
setMarkerBit();
}
// If this is the first frame in the packet, set the 1-byte payload
// header (using CMR 15)
if (isFirstFrameInPacket()) {
u_int8_t payloadHeader = 0xF0;
setSpecialHeaderBytes(&payloadHeader, 1, 0);
}
// Set the TOC field for the current frame, based on the "FT" and "Q"
// values from our source:
AMRAudioSource* amrSource = (AMRAudioSource*)fSource;
if (amrSource == NULL) return; // sanity check
u_int8_t toc = amrSource->lastFrameHeader();
// Clear the "F" bit, because we're the last frame in this packet: #####
toc &=~ 0x80;
setSpecialHeaderBytes(&toc, 1, 1+numFramesUsedSoFar());
// Important: Also call our base class's doSpecialFrameHandling(),
// to set the packet's timestamp:
MultiFramedRTPSink::doSpecialFrameHandling(fragmentationOffset,
frameStart, numBytesInFrame,
framePresentationTime,
numRemainingBytes);
}
Boolean AMRAudioRTPSink
::frameCanAppearAfterPacketStart(unsigned char const* /*frameStart*/,
unsigned /*numBytesInFrame*/) const {
// For now, pack only one AMR frame into each outgoing RTP packet: #####
return False;
}
unsigned AMRAudioRTPSink::specialHeaderSize() const {
// For now, because we're packing only one frame per packet,
// there's just a 1-byte payload header, plus a 1-byte TOC #####
return 2;
}
char const* AMRAudioRTPSink::auxSDPLine() {
if (fFmtpSDPLine == NULL) {
// Generate a "a=fmtp:" line with "octet-aligned=1"
// (That is the only non-default parameter.)
char buf[100];
sprintf(buf, "a=fmtp:%d octet-align=1\r\n", rtpPayloadType());
delete[] fFmtpSDPLine; fFmtpSDPLine = strDup(buf);
}
return fFmtpSDPLine;
}
live/liveMedia/MediaSession.cpp 000444 001751 000000 00000144024 12265042432 016732 0 ustar 00rsf wheel 000000 000000 /**********
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the
Free Software Foundation; either version 2.1 of the License, or (at your
option) any later version. (See .)
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
more details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
**********/
// "liveMedia"
// Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved.
// A data structure that represents a session that consists of
// potentially multiple (audio and/or video) sub-sessions
// Implementation
#include "liveMedia.hh"
#include "Locale.hh"
#include "GroupsockHelper.hh"
#include
////////// MediaSession //////////
MediaSession* MediaSession::createNew(UsageEnvironment& env,
char const* sdpDescription) {
MediaSession* newSession = new MediaSession(env);
if (newSession != NULL) {
if (!newSession->initializeWithSDP(sdpDescription)) {
delete newSession;
return NULL;
}
}
return newSession;
}
Boolean MediaSession::lookupByName(UsageEnvironment& env,
char const* instanceName,
MediaSession*& resultSession) {
resultSession = NULL; // unless we succeed
Medium* medium;
if (!Medium::lookupByName(env, instanceName, medium)) return False;
if (!medium->isMediaSession()) {
env.setResultMsg(instanceName, " is not a 'MediaSession' object");
return False;
}
resultSession = (MediaSession*)medium;
return True;
}
MediaSession::MediaSession(UsageEnvironment& env)
: Medium(env),
fSubsessionsHead(NULL), fSubsessionsTail(NULL),
fConnectionEndpointName(NULL),
fMaxPlayStartTime(0.0f), fMaxPlayEndTime(0.0f), fAbsStartTime(NULL), fAbsEndTime(NULL),
fScale(1.0f), fMediaSessionType(NULL), fSessionName(NULL), fSessionDescription(NULL),
fControlPath(NULL) {
fSourceFilterAddr.s_addr = 0;
// Get our host name, and use this for the RTCP CNAME:
const unsigned maxCNAMElen = 100;
char CNAME[maxCNAMElen+1];
#ifndef CRIS
gethostname((char*)CNAME, maxCNAMElen);
#else
// "gethostname()" isn't defined for this platform
sprintf(CNAME, "unknown host %d", (unsigned)(our_random()*0x7FFFFFFF));
#endif
CNAME[maxCNAMElen] = '\0'; // just in case
fCNAME = strDup(CNAME);
}
MediaSession::~MediaSession() {
delete fSubsessionsHead;
delete[] fCNAME;
delete[] fConnectionEndpointName;
delete[] fAbsStartTime; delete[] fAbsEndTime;
delete[] fMediaSessionType;
delete[] fSessionName;
delete[] fSessionDescription;
delete[] fControlPath;
}
Boolean MediaSession::isMediaSession() const {
return True;
}
MediaSubsession* MediaSession::createNewMediaSubsession() {
// default implementation:
return new MediaSubsession(*this);
}
Boolean MediaSession::initializeWithSDP(char const* sdpDescription) {
if (sdpDescription == NULL) return False;
// Begin by processing all SDP lines until we see the first "m="
char const* sdpLine = sdpDescription;
char const* nextSDPLine;
while (1) {
if (!parseSDPLine(sdpLine, nextSDPLine)) return False;
//##### We should really check for the correct SDP version (v=0)
if (sdpLine[0] == 'm') break;
sdpLine = nextSDPLine;
if (sdpLine == NULL) break; // there are no m= lines at all
// Check for various special SDP lines that we understand:
if (parseSDPLine_s(sdpLine)) continue;
if (parseSDPLine_i(sdpLine)) continue;
if (parseSDPLine_c(sdpLine)) continue;
if (parseSDPAttribute_control(sdpLine)) continue;
if (parseSDPAttribute_range(sdpLine)) continue;
if (parseSDPAttribute_type(sdpLine)) continue;
if (parseSDPAttribute_source_filter(sdpLine)) continue;
}
while (sdpLine != NULL) {
// We have a "m=" line, representing a new sub-session:
MediaSubsession* subsession = createNewMediaSubsession();
if (subsession == NULL) {
envir().setResultMsg("Unable to create new MediaSubsession");
return False;
}
// Parse the line as "m= RTP/AVP "
// or "m=/ RTP/AVP "
// (Should we be checking for >1 payload format number here?)#####
char* mediumName = strDupSize(sdpLine); // ensures we have enough space
char const* protocolName = NULL;
unsigned payloadFormat;
if ((sscanf(sdpLine, "m=%s %hu RTP/AVP %u",
mediumName, &subsession->fClientPortNum, &payloadFormat) == 3 ||
sscanf(sdpLine, "m=%s %hu/%*u RTP/AVP %u",
mediumName, &subsession->fClientPortNum, &payloadFormat) == 3)
&& payloadFormat <= 127) {
protocolName = "RTP";
} else if ((sscanf(sdpLine, "m=%s %hu UDP %u",
mediumName, &subsession->fClientPortNum, &payloadFormat) == 3 ||
sscanf(sdpLine, "m=%s %hu udp %u",
mediumName, &subsession->fClientPortNum, &payloadFormat) == 3 ||
sscanf(sdpLine, "m=%s %hu RAW/RAW/UDP %u",
mediumName, &subsession->fClientPortNum, &payloadFormat) == 3)
&& payloadFormat <= 127) {
// This is a RAW UDP source
protocolName = "UDP";
} else {
// This "m=" line is bad; output an error message saying so:
char* sdpLineStr;
if (nextSDPLine == NULL) {
sdpLineStr = (char*)sdpLine;
} else {
sdpLineStr = strDup(sdpLine);
sdpLineStr[nextSDPLine-sdpLine] = '\0';
}
envir() << "Bad SDP \"m=\" line: " << sdpLineStr << "\n";
if (sdpLineStr != (char*)sdpLine) delete[] sdpLineStr;
delete[] mediumName;
delete subsession;
// Skip the following SDP lines, up until the next "m=":
while (1) {
sdpLine = nextSDPLine;
if (sdpLine == NULL) break; // we've reached the end
if (!parseSDPLine(sdpLine, nextSDPLine)) return False;
if (sdpLine[0] == 'm') break; // we've reached the next subsession
}
continue;
}
// Insert this subsession at the end of the list:
if (fSubsessionsTail == NULL) {
fSubsessionsHead = fSubsessionsTail = subsession;
} else {
fSubsessionsTail->setNext(subsession);
fSubsessionsTail = subsession;
}
subsession->serverPortNum = subsession->fClientPortNum; // by default
char const* mStart = sdpLine;
subsession->fSavedSDPLines = strDup(mStart);
subsession->fMediumName = strDup(mediumName);
delete[] mediumName;
subsession->fProtocolName = strDup(protocolName);
subsession->fRTPPayloadFormat = payloadFormat;
// Process the following SDP lines, up until the next "m=":
while (1) {
sdpLine = nextSDPLine;
if (sdpLine == NULL) break; // we've reached the end
if (!parseSDPLine(sdpLine, nextSDPLine)) return False;
if (sdpLine[0] == 'm') break; // we've reached the next subsession
// Check for various special SDP lines that we understand:
if (subsession->parseSDPLine_c(sdpLine)) continue;
if (subsession->parseSDPLine_b(sdpLine)) continue;
if (subsession->parseSDPAttribute_rtpmap(sdpLine)) continue;
if (subsession->parseSDPAttribute_control(sdpLine)) continue;
if (subsession->parseSDPAttribute_range(sdpLine)) continue;
if (subsession->parseSDPAttribute_fmtp(sdpLine)) continue;
if (subsession->parseSDPAttribute_source_filter(sdpLine)) continue;
if (subsession->parseSDPAttribute_x_dimensions(sdpLine)) continue;
if (subsession->parseSDPAttribute_framerate(sdpLine)) continue;
// (Later, check for malformed lines, and other valid SDP lines#####)
}
if (sdpLine != NULL) subsession->fSavedSDPLines[sdpLine-mStart] = '\0';
// If we don't yet know the codec name, try looking it up from the
// list of static payload types:
if (subsession->fCodecName == NULL) {
subsession->fCodecName
= lookupPayloadFormat(subsession->fRTPPayloadFormat,
subsession->fRTPTimestampFrequency,
subsession->fNumChannels);
if (subsession->fCodecName == NULL) {
char typeStr[20];
sprintf(typeStr, "%d", subsession->fRTPPayloadFormat);
envir().setResultMsg("Unknown codec name for RTP payload type ",
typeStr);
return False;
}
}
// If we don't yet know this subsession's RTP timestamp frequency
// (because it uses a dynamic payload type and the corresponding
// SDP "rtpmap" attribute erroneously didn't specify it),
// then guess it now:
if (subsession->fRTPTimestampFrequency == 0) {
subsession->fRTPTimestampFrequency
= guessRTPTimestampFrequency(subsession->fMediumName,
subsession->fCodecName);
}
}
return True;
}
Boolean MediaSession::parseSDPLine(char const* inputLine,
char const*& nextLine){
// Begin by finding the start of the next line (if any):
nextLine = NULL;
for (char const* ptr = inputLine; *ptr != '\0'; ++ptr) {
if (*ptr == '\r' || *ptr == '\n') {
// We found the end of the line
++ptr;
while (*ptr == '\r' || *ptr == '\n') ++ptr;
nextLine = ptr;
if (nextLine[0] == '\0') nextLine = NULL; // special case for end
break;
}
}
// Then, check that this line is a SDP line of the form =
// (However, we also accept blank lines in the input.)
if (inputLine[0] == '\r' || inputLine[0] == '\n') return True;
if (strlen(inputLine) < 2 || inputLine[1] != '='
|| inputLine[0] < 'a' || inputLine[0] > 'z') {
envir().setResultMsg("Invalid SDP line: ", inputLine);
return False;
}
return True;
}
static char* parseCLine(char const* sdpLine) {
char* resultStr = NULL;
char* buffer = strDupSize(sdpLine); // ensures we have enough space
if (sscanf(sdpLine, "c=IN IP4 %[^/\r\n]", buffer) == 1) {
// Later, handle the optional / and / #####
resultStr = strDup(buffer);
}
delete[] buffer;
return resultStr;
}
Boolean MediaSession::parseSDPLine_s(char const* sdpLine) {
// Check for "s=" line
char* buffer = strDupSize(sdpLine);
Boolean parseSuccess = False;
if (sscanf(sdpLine, "s=%[^\r\n]", buffer) == 1) {
delete[] fSessionName; fSessionName = strDup(buffer);
parseSuccess = True;
}
delete[] buffer;
return parseSuccess;
}
Boolean MediaSession::parseSDPLine_i(char const* sdpLine) {
// Check for "i=" line
char* buffer = strDupSize(sdpLine);
Boolean parseSuccess = False;
if (sscanf(sdpLine, "i=%[^\r\n]", buffer) == 1) {
delete[] fSessionDescription; fSessionDescription = strDup(buffer);
parseSuccess = True;
}
delete[] buffer;
return parseSuccess;
}
Boolean MediaSession::parseSDPLine_c(char const* sdpLine) {
// Check for "c=IN IP4 "
// or "c=IN IP4 /"
// (Later, do something with also #####)
char* connectionEndpointName = parseCLine(sdpLine);
if (connectionEndpointName != NULL) {
delete[] fConnectionEndpointName;
fConnectionEndpointName = connectionEndpointName;
return True;
}
return False;
}
Boolean MediaSession::parseSDPAttribute_type(char const* sdpLine) {
// Check for a "a=type:broadcast|meeting|moderated|test|H.332|recvonly" line:
Boolean parseSuccess = False;
char* buffer = strDupSize(sdpLine);
if (sscanf(sdpLine, "a=type: %[^ ]", buffer) == 1) {
delete[] fMediaSessionType;
fMediaSessionType = strDup(buffer);
parseSuccess = True;
}
delete[] buffer;
return parseSuccess;
}
Boolean MediaSession::parseSDPAttribute_control(char const* sdpLine) {
// Check for a "a=control:" line:
Boolean parseSuccess = False;
char* controlPath = strDupSize(sdpLine); // ensures we have enough space
if (sscanf(sdpLine, "a=control: %s", controlPath) == 1) {
parseSuccess = True;
delete[] fControlPath; fControlPath = strDup(controlPath);
}
delete[] controlPath;
return parseSuccess;
}
static Boolean parseRangeAttribute(char const* sdpLine, double& startTime, double& endTime) {
return sscanf(sdpLine, "a=range: npt = %lg - %lg", &startTime, &endTime) == 2;
}
static Boolean parseRangeAttribute(char const* sdpLine, char*& absStartTime, char*& absEndTime) {
size_t len = strlen(sdpLine) + 1;
char* as = new char[len];
char* ae = new char[len];
int sscanfResult = sscanf(sdpLine, "a=range: clock = %[^-\r\n]-%[^\r\n]", as, ae);
if (sscanfResult == 2) {
absStartTime = as;
absEndTime = ae;
} else if (sscanfResult == 1) {
absStartTime = as;
delete[] ae;
} else {
delete[] as; delete[] ae;
return False;
}
return True;
}
Boolean MediaSession::parseSDPAttribute_range(char const* sdpLine) {
// Check for a "a=range:npt=-" line:
// (Later handle other kinds of "a=range" attributes also???#####)
Boolean parseSuccess = False;
double playStartTime;
double playEndTime;
if (parseRangeAttribute(sdpLine, playStartTime, playEndTime)) {
parseSuccess = True;
if (playStartTime > fMaxPlayStartTime) {
fMaxPlayStartTime = playStartTime;
}
if (playEndTime > fMaxPlayEndTime) {
fMaxPlayEndTime = playEndTime;
}
} else if (parseRangeAttribute(sdpLine, _absStartTime(), _absEndTime())) {
parseSuccess = True;
}
return parseSuccess;
}
static Boolean parseSourceFilterAttribute(char const* sdpLine,
struct in_addr& sourceAddr) {
// Check for a "a=source-filter:incl IN IP4 " line.
// Note: At present, we don't check that really matches
// one of our multicast addresses. We also don't support more than
// one #####
Boolean result = False; // until we succeed
char* sourceName = strDupSize(sdpLine); // ensures we have enough space
do {
if (sscanf(sdpLine, "a=source-filter: incl IN IP4 %*s %s",
sourceName) != 1) break;
// Now, convert this name to an address, if we can:
NetAddressList addresses(sourceName);
if (addresses.numAddresses() == 0) break;
netAddressBits sourceAddrBits
= *(netAddressBits*)(addresses.firstAddress()->data());
if (sourceAddrBits == 0) break;
sourceAddr.s_addr = sourceAddrBits;
result = True;
} while (0);
delete[] sourceName;
return result;
}
Boolean MediaSession
::parseSDPAttribute_source_filter(char const* sdpLine) {
return parseSourceFilterAttribute(sdpLine, fSourceFilterAddr);
}
char* MediaSession::lookupPayloadFormat(unsigned char rtpPayloadType,
unsigned& freq, unsigned& nCh) {
// Look up the codec name and timestamp frequency for known (static)
// RTP payload formats.
char const* temp = NULL;
switch (rtpPayloadType) {
case 0: {temp = "PCMU"; freq = 8000; nCh = 1; break;}
case 2: {temp = "G726-32"; freq = 8000; nCh = 1; break;}
case 3: {temp = "GSM"; freq = 8000; nCh = 1; break;}
case 4: {temp = "G723"; freq = 8000; nCh = 1; break;}
case 5: {temp = "DVI4"; freq = 8000; nCh = 1; break;}
case 6: {temp = "DVI4"; freq = 16000; nCh = 1; break;}
case 7: {temp = "LPC"; freq = 8000; nCh = 1; break;}
case 8: {temp = "PCMA"; freq = 8000; nCh = 1; break;}
case 9: {temp = "G722"; freq = 8000; nCh = 1; break;}
case 10: {temp = "L16"; freq = 44100; nCh = 2; break;}
case 11: {temp = "L16"; freq = 44100; nCh = 1; break;}
case 12: {temp = "QCELP"; freq = 8000; nCh = 1; break;}
case 14: {temp = "MPA"; freq = 90000; nCh = 1; break;}
// 'number of channels' is actually encoded in the media stream
case 15: {temp = "G728"; freq = 8000; nCh = 1; break;}
case 16: {temp = "DVI4"; freq = 11025; nCh = 1; break;}
case 17: {temp = "DVI4"; freq = 22050; nCh = 1; break;}
case 18: {temp = "G729"; freq = 8000; nCh = 1; break;}
case 25: {temp = "CELB"; freq = 90000; nCh = 1; break;}
case 26: {temp = "JPEG"; freq = 90000; nCh = 1; break;}
case 28: {temp = "NV"; freq = 90000; nCh = 1; break;}
case 31: {temp = "H261"; freq = 90000; nCh = 1; break;}
case 32: {temp = "MPV"; freq = 90000; nCh = 1; break;}
case 33: {temp = "MP2T"; freq = 90000; nCh = 1; break;}
case 34: {temp = "H263"; freq = 90000; nCh = 1; break;}
};
return strDup(temp);
}
unsigned MediaSession::guessRTPTimestampFrequency(char const* mediumName,
char const* codecName) {
// By default, we assume that audio sessions use a frequency of 8000,
// video sessions use a frequency of 90000,
// and text sessions use a frequency of 1000.
// Begin by checking for known exceptions to this rule
// (where the frequency is known unambiguously (e.g., not like "DVI4"))
if (strcmp(codecName, "L16") == 0) return 44100;
if (strcmp(codecName, "MPA") == 0
|| strcmp(codecName, "MPA-ROBUST") == 0
|| strcmp(codecName, "X-MP3-DRAFT-00") == 0) return 90000;
// Now, guess default values:
if (strcmp(mediumName, "video") == 0) return 90000;
else if (strcmp(mediumName, "text") == 0) return 1000;
return 8000; // for "audio", and any other medium
}
char* MediaSession::absStartTime() const {
if (fAbsStartTime != NULL) return fAbsStartTime;
// If a subsession has an 'absolute' start time, then use that:
MediaSubsessionIterator iter(*this);
MediaSubsession* subsession;
while ((subsession = iter.next()) != NULL) {
if (subsession->_absStartTime() != NULL) return subsession->_absStartTime();
}
return NULL;
}
char* MediaSession::absEndTime() const {
if (fAbsEndTime != NULL) return fAbsEndTime;
// If a subsession has an 'absolute' end time, then use that:
MediaSubsessionIterator iter(*this);
MediaSubsession* subsession;
while ((subsession = iter.next()) != NULL) {
if (subsession->_absEndTime() != NULL) return subsession->_absEndTime();
}
return NULL;
}
Boolean MediaSession
::initiateByMediaType(char const* mimeType,
MediaSubsession*& resultSubsession,
int useSpecialRTPoffset) {
// Look through this session's subsessions for media that match "mimeType"
resultSubsession = NULL;
MediaSubsessionIterator iter(*this);
MediaSubsession* subsession;
while ((subsession = iter.next()) != NULL) {
Boolean wasAlreadyInitiated = subsession->readSource() != NULL;
if (!wasAlreadyInitiated) {
// Try to create a source for this subsession:
if (!subsession->initiate(useSpecialRTPoffset)) return False;
}
// Make sure the source's MIME type is one that we handle:
if (strcmp(subsession->readSource()->MIMEtype(), mimeType) != 0) {
if (!wasAlreadyInitiated) subsession->deInitiate();
continue;
}
resultSubsession = subsession;
break; // use this
}
if (resultSubsession == NULL) {
envir().setResultMsg("Session has no usable media subsession");
return False;
}
return True;
}
////////// MediaSubsessionIterator //////////
MediaSubsessionIterator::MediaSubsessionIterator(MediaSession const& session)
: fOurSession(session) {
reset();
}
MediaSubsessionIterator::~MediaSubsessionIterator() {
}
MediaSubsession* MediaSubsessionIterator::next() {
MediaSubsession* result = fNextPtr;
if (fNextPtr != NULL) fNextPtr = fNextPtr->fNext;
return result;
}
void MediaSubsessionIterator::reset() {
fNextPtr = fOurSession.fSubsessionsHead;
}
////////// MediaSubsession //////////
MediaSubsession::MediaSubsession(MediaSession& parent)
: serverPortNum(0), sink(NULL), miscPtr(NULL),
fParent(parent), fNext(NULL),
fConnectionEndpointName(NULL),
fClientPortNum(0), fRTPPayloadFormat(0xFF),
fSavedSDPLines(NULL), fMediumName(NULL), fCodecName(NULL), fProtocolName(NULL),
fRTPTimestampFrequency(0), fControlPath(NULL),
fSourceFilterAddr(parent.sourceFilterAddr()), fBandwidth(0),
fAuxiliarydatasizelength(0), fConstantduration(0), fConstantsize(0),
fCRC(0), fCtsdeltalength(0), fDe_interleavebuffersize(0), fDtsdeltalength(0),
fIndexdeltalength(0), fIndexlength(0), fInterleaving(0), fMaxdisplacement(0),
fObjecttype(0), fOctetalign(0), fProfile_level_id(0), fRobustsorting(0),
fSizelength(0), fStreamstateindication(0), fStreamtype(0),
fCpresent(False), fRandomaccessindication(False),
fConfig(NULL), fMode(NULL), fSpropParameterSets(NULL), fEmphasis(NULL), fChannelOrder(NULL),
fPlayStartTime(0.0), fPlayEndTime(0.0), fAbsStartTime(NULL), fAbsEndTime(NULL),
fVideoWidth(0), fVideoHeight(0), fVideoFPS(0), fNumChannels(1), fScale(1.0f), fNPT_PTS_Offset(0.0f),
fRTPSocket(NULL), fRTCPSocket(NULL),
fRTPSource(NULL), fRTCPInstance(NULL), fReadSource(NULL),
fReceiveRawMP3ADUs(False), fReceiveRawJPEGFrames(False),
fSessionId(NULL) {
rtpInfo.seqNum = 0; rtpInfo.timestamp = 0; rtpInfo.infoIsNew = False;
}
MediaSubsession::~MediaSubsession() {
deInitiate();
delete[] fConnectionEndpointName; delete[] fSavedSDPLines;
delete[] fMediumName; delete[] fCodecName; delete[] fProtocolName;
delete[] fControlPath;
delete[] fConfig; delete[] fMode; delete[] fSpropParameterSets; delete[] fEmphasis; delete[] fChannelOrder;
delete[] fAbsStartTime; delete[] fAbsEndTime;
delete[] fSessionId;
delete fNext;
}
void MediaSubsession::addFilter(FramedFilter* filter){
if (filter == NULL || filter->inputSource() != fReadSource) return; // sanity check
fReadSource = filter;
}
double MediaSubsession::playStartTime() const {
if (fPlayStartTime > 0) return fPlayStartTime;
return fParent.playStartTime();
}
double MediaSubsession::playEndTime() const {
if (fPlayEndTime > 0) return fPlayEndTime;
return fParent.playEndTime();
}
char* MediaSubsession::absStartTime() const {
if (fAbsStartTime != NULL) return fAbsStartTime;
return fParent.absStartTime();
}
char* MediaSubsession::absEndTime() const {
if (fAbsEndTime != NULL) return fAbsEndTime;
return fParent.absEndTime();
}
static Boolean const honorSDPPortChoice
#ifdef IGNORE_UNICAST_SDP_PORTS
= False;
#else
= True;
#endif
Boolean MediaSubsession::initiate(int useSpecialRTPoffset) {
if (fReadSource != NULL) return True; // has already been initiated
do {
if (fCodecName == NULL) {
env().setResultMsg("Codec is unspecified");
break;
}
// Create RTP and RTCP 'Groupsocks' on which to receive incoming data.
// (Groupsocks will work even for unicast addresses)
struct in_addr tempAddr;
tempAddr.s_addr = connectionEndpointAddress();
// This could get changed later, as a result of a RTSP "SETUP"
if (fClientPortNum != 0 && (honorSDPPortChoice || IsMulticastAddress(tempAddr.s_addr))) {
// The sockets' port numbers were specified for us. Use these:
Boolean const protocolIsRTP = strcmp(fProtocolName, "RTP") == 0;
if (protocolIsRTP) {
fClientPortNum = fClientPortNum&~1;
// use an even-numbered port for RTP, and the next (odd-numbered) port for RTCP
}
if (isSSM()) {
fRTPSocket = new Groupsock(env(), tempAddr, fSourceFilterAddr, fClientPortNum);
} else {
fRTPSocket = new Groupsock(env(), tempAddr, fClientPortNum, 255);
}
if (fRTPSocket == NULL) {
env().setResultMsg("Failed to create RTP socket");
break;
}
if (protocolIsRTP) {
// Set our RTCP port to be the RTP port +1
portNumBits const rtcpPortNum = fClientPortNum|1;
if (isSSM()) {
fRTCPSocket = new Groupsock(env(), tempAddr, fSourceFilterAddr, rtcpPortNum);
} else {
fRTCPSocket = new Groupsock(env(), tempAddr, rtcpPortNum, 255);
}
}
} else {
// Port numbers were not specified in advance, so we use ephemeral port numbers.
// Create sockets until we get a port-number pair (even: RTP; even+1: RTCP).
// We need to make sure that we don't keep trying to use the same bad port numbers over
// and over again, so we store bad sockets in a table, and delete them all when we're done.
HashTable* socketHashTable = HashTable::create(ONE_WORD_HASH_KEYS);
if (socketHashTable == NULL) break;
Boolean success = False;
NoReuse dummy(env());
// ensures that our new ephemeral port number won't be one that's already in use
while (1) {
// Create a new socket:
if (isSSM()) {
fRTPSocket = new Groupsock(env(), tempAddr, fSourceFilterAddr, 0);
} else {
fRTPSocket = new Groupsock(env(), tempAddr, 0, 255);
}
if (fRTPSocket == NULL) {
env().setResultMsg("MediaSession::initiate(): unable to create RTP and RTCP sockets");
break;
}
// Get the client port number, and check whether it's even (for RTP):
Port clientPort(0);
if (!getSourcePort(env(), fRTPSocket->socketNum(), clientPort)) {
break;
}
fClientPortNum = ntohs(clientPort.num());
if ((fClientPortNum&1) != 0) { // it's odd
// Record this socket in our table, and keep trying:
unsigned key = (unsigned)fClientPortNum;
Groupsock* existing = (Groupsock*)socketHashTable->Add((char const*)key, fRTPSocket);
delete existing; // in case it wasn't NULL
continue;
}
// Make sure we can use the next (i.e., odd) port number, for RTCP:
portNumBits rtcpPortNum = fClientPortNum|1;
if (isSSM()) {
fRTCPSocket = new Groupsock(env(), tempAddr, fSourceFilterAddr, rtcpPortNum);
} else {
fRTCPSocket = new Groupsock(env(), tempAddr, rtcpPortNum, 255);
}
if (fRTCPSocket != NULL && fRTCPSocket->socketNum() >= 0) {
// Success! Use these two sockets.
success = True;
break;
} else {
// We couldn't create the RTCP socket (perhaps that port number's already in use elsewhere?).
delete fRTCPSocket; fRTCPSocket = NULL;
// Record the first socket in our table, and keep trying:
unsigned key = (unsigned)fClientPortNum;
Groupsock* existing = (Groupsock*)socketHashTable->Add((char const*)key, fRTPSocket);
delete existing; // in case it wasn't NULL
continue;
}
}
// Clean up the socket hash table (and contents):
Groupsock* oldGS;
while ((oldGS = (Groupsock*)socketHashTable->RemoveNext()) != NULL) {
delete oldGS;
}
delete socketHashTable;
if (!success) break; // a fatal error occurred trying to create the RTP and RTCP sockets; we can't continue
}
// Try to use a big receive buffer for RTP - at least 0.1 second of
// specified bandwidth and at least 50 KB
unsigned rtpBufSize = fBandwidth * 25 / 2; // 1 kbps * 0.1 s = 12.5 bytes
if (rtpBufSize < 50 * 1024)
rtpBufSize = 50 * 1024;
increaseReceiveBufferTo(env(), fRTPSocket->socketNum(), rtpBufSize);
if (isSSM() && fRTCPSocket != NULL) {
// Special case for RTCP SSM: Send RTCP packets back to the source via unicast:
fRTCPSocket->changeDestinationParameters(fSourceFilterAddr,0,~0);
}
// Create "fRTPSource" and "fReadSource":
if (!createSourceObjects(useSpecialRTPoffset)) break;
if (fReadSource == NULL) {
env().setResultMsg("Failed to create read source");
break;
}
// Finally, create our RTCP instance. (It starts running automatically)
if (fRTPSource != NULL && fRTCPSocket != NULL) {
// If bandwidth is specified, use it and add 5% for RTCP overhead.
// Otherwise make a guess at 500 kbps.
unsigned totSessionBandwidth
= fBandwidth ? fBandwidth + fBandwidth / 20 : 500;
fRTCPInstance = RTCPInstance::createNew(env(), fRTCPSocket,
totSessionBandwidth,
(unsigned char const*)
fParent.CNAME(),
NULL /* we're a client */,
fRTPSource);
if (fRTCPInstance == NULL) {
env().setResultMsg("Failed to create RTCP instance");
break;
}
}
return True;
} while (0);
deInitiate();
fClientPortNum = 0;
return False;
}
void MediaSubsession::deInitiate() {
Medium::close(fRTCPInstance); fRTCPInstance = NULL;
Medium::close(fReadSource); // this is assumed to also close fRTPSource
fReadSource = NULL; fRTPSource = NULL;
delete fRTPSocket; fRTPSocket = NULL;
delete fRTCPSocket; fRTCPSocket = NULL;
}
Boolean MediaSubsession::setClientPortNum(unsigned short portNum) {
if (fReadSource != NULL) {
env().setResultMsg("A read source has already been created");
return False;
}
fClientPortNum = portNum;
return True;
}
netAddressBits MediaSubsession::connectionEndpointAddress() const {
do {
// Get the endpoint name from with us, or our parent session:
char const* endpointString = connectionEndpointName();
if (endpointString == NULL) {
endpointString = parentSession().connectionEndpointName();
}
if (endpointString == NULL) break;
// Now, convert this name to an address, if we can:
NetAddressList addresses(endpointString);
if (addresses.numAddresses() == 0) break;
return *(netAddressBits*)(addresses.firstAddress()->data());
} while (0);
// No address known:
return 0;
}
void MediaSubsession::setDestinations(netAddressBits defaultDestAddress) {
// Get the destination address from the connection endpoint name
// (This will be 0 if it's not known, in which case we use the default)
netAddressBits destAddress = connectionEndpointAddress();
if (destAddress == 0) destAddress = defaultDestAddress;
struct in_addr destAddr; destAddr.s_addr = destAddress;
// The destination TTL remains unchanged:
int destTTL = ~0; // means: don't change
if (fRTPSocket != NULL) {
Port destPort(serverPortNum);
fRTPSocket->changeDestinationParameters(destAddr, destPort, destTTL);
}
if (fRTCPSocket != NULL && !isSSM()) {
// Note: For SSM sessions, the dest address for RTCP was already set.
Port destPort(serverPortNum+1);
fRTCPSocket->changeDestinationParameters(destAddr, destPort, destTTL);
}
}
void MediaSubsession::setSessionId(char const* sessionId) {
delete[] fSessionId;
fSessionId = strDup(sessionId);
}
double MediaSubsession::getNormalPlayTime(struct timeval const& presentationTime) {
if (rtpSource() == NULL || rtpSource()->timestampFrequency() == 0) return 0.0; // no RTP source, or bad freq!
// First, check whether our "RTPSource" object has already been synchronized using RTCP.
// If it hasn't, then - as a special case - we need to use the RTP timestamp to compute the NPT.
if (!rtpSource()->hasBeenSynchronizedUsingRTCP()) {
if (!rtpInfo.infoIsNew) return 0.0; // the "rtpInfo" structure has not been filled in
u_int32_t timestampOffset = rtpSource()->curPacketRTPTimestamp() - rtpInfo.timestamp;
double nptOffset = (timestampOffset/(double)(rtpSource()->timestampFrequency()))*scale();
double npt = playStartTime() + nptOffset;
return npt;
} else {
// Common case: We have been synchronized using RTCP. This means that the "presentationTime" parameter
// will be accurate, and so we should use this to compute the NPT.
double ptsDouble = (double)(presentationTime.tv_sec + presentationTime.tv_usec/1000000.0);
if (rtpInfo.infoIsNew) {
// This is the first time we've been called with a synchronized presentation time since the "rtpInfo"
// structure was last filled in. Use this "presentationTime" to compute "fNPT_PTS_Offset":
if (seqNumLT(rtpSource()->curPacketRTPSeqNum(), rtpInfo.seqNum)) return -0.1; // sanity check; ignore old packets
u_int32_t timestampOffset = rtpSource()->curPacketRTPTimestamp() - rtpInfo.timestamp;
double nptOffset = (timestampOffset/(double)(rtpSource()->timestampFrequency()))*scale();
double npt = playStartTime() + nptOffset;
fNPT_PTS_Offset = npt - ptsDouble*scale();
rtpInfo.infoIsNew = False; // for next time
return npt;
} else {
// Use the precomputed "fNPT_PTS_Offset" to compute the NPT from the PTS:
if (fNPT_PTS_Offset == 0.0) return 0.0; // error: The "rtpInfo" structure was apparently never filled in
return (double)(ptsDouble*scale() + fNPT_PTS_Offset);
}
}
}
Boolean MediaSubsession::parseSDPLine_c(char const* sdpLine) {
// Check for "c=IN IP4 "
// or "c=IN IP4 /"
// (Later, do something with also #####)
char* connectionEndpointName = parseCLine(sdpLine);
if (connectionEndpointName != NULL) {
delete[] fConnectionEndpointName;
fConnectionEndpointName = connectionEndpointName;
return True;
}
return False;
}
Boolean MediaSubsession::parseSDPLine_b(char const* sdpLine) {
// Check for "b=:" line
// RTP applications are expected to use bwtype="AS"
return sscanf(sdpLine, "b=AS:%u", &fBandwidth) == 1;
}
Boolean MediaSubsession::parseSDPAttribute_rtpmap(char const* sdpLine) {
// Check for a "a=rtpmap:/" line:
// (Also check without the "/"; RealNetworks omits this)
// Also check for a trailing "/".
Boolean parseSuccess = False;
unsigned rtpmapPayloadFormat;
char* codecName = strDupSize(sdpLine); // ensures we have enough space
unsigned rtpTimestampFrequency = 0;
unsigned numChannels = 1;
if (sscanf(sdpLine, "a=rtpmap: %u %[^/]/%u/%u",
&rtpmapPayloadFormat, codecName, &rtpTimestampFrequency,
&numChannels) == 4
|| sscanf(sdpLine, "a=rtpmap: %u %[^/]/%u",
&rtpmapPayloadFormat, codecName, &rtpTimestampFrequency) == 3
|| sscanf(sdpLine, "a=rtpmap: %u %s",
&rtpmapPayloadFormat, codecName) == 2) {
parseSuccess = True;
if (rtpmapPayloadFormat == fRTPPayloadFormat) {
// This "rtpmap" matches our payload format, so set our
// codec name and timestamp frequency:
// (First, make sure the codec name is upper case)
{
Locale l("POSIX");
for (char* p = codecName; *p != '\0'; ++p) *p = toupper(*p);
}
delete[] fCodecName; fCodecName = strDup(codecName);
fRTPTimestampFrequency = rtpTimestampFrequency;
fNumChannels = numChannels;
}
}
delete[] codecName;
return parseSuccess;
}
Boolean MediaSubsession::parseSDPAttribute_control(char const* sdpLine) {
// Check for a "a=control:" line:
Boolean parseSuccess = False;
char* controlPath = strDupSize(sdpLine); // ensures we have enough space
if (sscanf(sdpLine, "a=control: %s", controlPath) == 1) {
parseSuccess = True;
delete[] fControlPath; fControlPath = strDup(controlPath);
}
delete[] controlPath;
return parseSuccess;
}
Boolean MediaSubsession::parseSDPAttribute_range(char const* sdpLine) {
// Check for a "a=range:npt=-" line:
// (Later handle other kinds of "a=range" attributes also???#####)
Boolean parseSuccess = False;
double playStartTime;
double playEndTime;
if (parseRangeAttribute(sdpLine, playStartTime, playEndTime)) {
parseSuccess = True;
if (playStartTime > fPlayStartTime) {
fPlayStartTime = playStartTime;
if (playStartTime > fParent.playStartTime()) {
fParent.playStartTime() = playStartTime;
}
}
if (playEndTime > fPlayEndTime) {
fPlayEndTime = playEndTime;
if (playEndTime > fParent.playEndTime()) {
fParent.playEndTime() = playEndTime;
}
}
} else if (parseRangeAttribute(sdpLine, _absStartTime(), _absEndTime())) {
parseSuccess = True;
}
return parseSuccess;
}
Boolean MediaSubsession::parseSDPAttribute_fmtp(char const* sdpLine) {
// Check for a "a=fmtp:" line:
// TEMP: We check only for a handful of expected parameter names #####
// Later: (i) check that payload format number matches; #####
// (ii) look for other parameters also (generalize?) #####
do {
if (strncmp(sdpLine, "a=fmtp:", 7) != 0) break; sdpLine += 7;
while (isdigit(*sdpLine)) ++sdpLine;
// The remaining "sdpLine" should be a sequence of
// =;
// parameter assignments. Look at each of these.
// First, convert the line to lower-case, to ease comparison:
char* const lineCopy = strDup(sdpLine); char* line = lineCopy;
{
Locale l("POSIX");
for (char* c = line; *c != '\0'; ++c) *c = tolower(*c);
}
while (*line != '\0' && *line != '\r' && *line != '\n') {
unsigned u;
char* valueStr = strDupSize(line);
if (sscanf(line, " auxiliarydatasizelength = %u", &u) == 1) {
fAuxiliarydatasizelength = u;
} else if (sscanf(line, " constantduration = %u", &u) == 1) {
fConstantduration = u;
} else if (sscanf(line, " constantsize; = %u", &u) == 1) {
fConstantsize = u;
} else if (sscanf(line, " crc = %u", &u) == 1) {
fCRC = u;
} else if (sscanf(line, " ctsdeltalength = %u", &u) == 1) {
fCtsdeltalength = u;
} else if (sscanf(line, " de-interleavebuffersize = %u", &u) == 1) {
fDe_interleavebuffersize = u;
} else if (sscanf(line, " dtsdeltalength = %u", &u) == 1) {
fDtsdeltalength = u;
} else if (sscanf(line, " indexdeltalength = %u", &u) == 1) {
fIndexdeltalength = u;
} else if (sscanf(line, " indexlength = %u", &u) == 1) {
fIndexlength = u;
} else if (sscanf(line, " interleaving = %u", &u) == 1) {
fInterleaving = u;
} else if (sscanf(line, " maxdisplacement = %u", &u) == 1) {
fMaxdisplacement = u;
} else if (sscanf(line, " objecttype = %u", &u) == 1) {
fObjecttype = u;
} else if (sscanf(line, " octet-align = %u", &u) == 1) {
fOctetalign = u;
} else if (sscanf(line, " profile-level-id = %x", &u) == 1) {
// Note that the "profile-level-id" parameter is assumed to be hexadecimal
fProfile_level_id = u;
} else if (sscanf(line, " robust-sorting = %u", &u) == 1) {
fRobustsorting = u;
} else if (sscanf(line, " sizelength = %u", &u) == 1) {
fSizelength = u;
} else if (sscanf(line, " streamstateindication = %u", &u) == 1) {
fStreamstateindication = u;
} else if (sscanf(line, " streamtype = %u", &u) == 1) {
fStreamtype = u;
} else if (sscanf(line, " cpresent = %u", &u) == 1) {
fCpresent = u != 0;
} else if (sscanf(line, " randomaccessindication = %u", &u) == 1) {
fRandomaccessindication = u != 0;
} else if (sscanf(sdpLine, " config = %[^; \t\r\n]", valueStr) == 1 ||
sscanf(sdpLine, " configuration = %[^; \t\r\n]", valueStr) == 1) {
// Note: We used "sdpLine" here, because the value may be case-sensitive (if it's Base-64).
delete[] fConfig; fConfig = strDup(valueStr);
} else if (sscanf(line, " mode = %[^; \t\r\n]", valueStr) == 1) {
delete[] fMode; fMode = strDup(valueStr);
} else if (sscanf(sdpLine, " sprop-parameter-sets = %[^; \t\r\n]", valueStr) == 1) {
// Note: We used "sdpLine" here, because the value is case-sensitive.
delete[] fSpropParameterSets; fSpropParameterSets = strDup(valueStr);
} else if (sscanf(line, " emphasis = %[^; \t\r\n]", valueStr) == 1) {
delete[] fEmphasis; fEmphasis = strDup(valueStr);
} else if (sscanf(sdpLine, " channel-order = %[^; \t\r\n]", valueStr) == 1) {
// Note: We used "sdpLine" here, because the value is case-sensitive.
delete[] fChannelOrder; fChannelOrder = strDup(valueStr);
} else if (sscanf(line, " width = %u", &u) == 1) {
// A non-standard parameter, but one that's often used:
fVideoWidth = u;
} else if (sscanf(line, " height = %u", &u) == 1) {
// A non-standard parameter, but one that's often used:
fVideoHeight = u;
} else {
// Some of the above parameters are Boolean. Check whether the parameter
// names appear alone, without a "= 1" at the end:
if (sscanf(line, " %[^; \t\r\n]", valueStr) == 1) {
if (strcmp(valueStr, "octet-align") == 0) {
fOctetalign = 1;
} else if (strcmp(valueStr, "cpresent") == 0) {
fCpresent = True;
} else if (strcmp(valueStr, "crc") == 0) {
fCRC = 1;
} else if (strcmp(valueStr, "robust-sorting") == 0) {
fRobustsorting = 1;
} else if (strcmp(valueStr, "randomaccessindication") == 0) {
fRandomaccessindication = True;
}
}
}
delete[] valueStr;
// Move to the next parameter assignment string:
while (*line != '\0' && *line != '\r' && *line != '\n'
&& *line != ';') ++line;
while (*line == ';') ++line;
// Do the same with sdpLine; needed for finding case sensitive values:
while (*sdpLine != '\0' && *sdpLine != '\r' && *sdpLine != '\n'
&& *sdpLine != ';') ++sdpLine;
while (*sdpLine == ';') ++sdpLine;
}
delete[] lineCopy;
return True;
} while (0);
return False;
}
Boolean MediaSubsession
::parseSDPAttribute_source_filter(char const* sdpLine) {
return parseSourceFilterAttribute(sdpLine, fSourceFilterAddr);
}
Boolean MediaSubsession::parseSDPAttribute_x_dimensions(char const* sdpLine) {
// Check for a "a=x-dimensions:," line:
Boolean parseSuccess = False;
int width, height;
if (sscanf(sdpLine, "a=x-dimensions:%d,%d", &width, &height) == 2) {
parseSuccess = True;
fVideoWidth = (unsigned short)width;
fVideoHeight = (unsigned short)height;
}
return parseSuccess;
}
Boolean MediaSubsession::parseSDPAttribute_framerate(char const* sdpLine) {
// Check for a "a=framerate: " or "a=x-framerate: " line:
Boolean parseSuccess = False;
float frate;
int rate;
if (sscanf(sdpLine, "a=framerate: %f", &frate) == 1 || sscanf(sdpLine, "a=framerate:%f", &frate) == 1) {
parseSuccess = True;
fVideoFPS = (unsigned)frate;
} else if (sscanf(sdpLine, "a=x-framerate: %d", &rate) == 1) {
parseSuccess = True;
fVideoFPS = (unsigned)rate;
}
return parseSuccess;
}
Boolean MediaSubsession::createSourceObjects(int useSpecialRTPoffset) {
do {
// First, check "fProtocolName"
if (strcmp(fProtocolName, "UDP") == 0) {
// A UDP-packetized stream (*not* a RTP stream)
fReadSource = BasicUDPSource::createNew(env(), fRTPSocket);
fRTPSource = NULL; // Note!
if (strcmp(fCodecName, "MP2T") == 0) { // MPEG-2 Transport Stream
fReadSource = MPEG2TransportStreamFramer::createNew(env(), fReadSource);
// this sets "durationInMicroseconds" correctly, based on the PCR values
}
} else {
// Check "fCodecName" against the set of codecs that we support,
// and create our RTP source accordingly
// (Later make this code more efficient, as this set grows #####)
// (Also, add more fmts that can be implemented by SimpleRTPSource#####)
Boolean createSimpleRTPSource = False; // by default; can be changed below
Boolean doNormalMBitRule = False; // default behavior if "createSimpleRTPSource" is True
if (strcmp(fCodecName, "QCELP") == 0) { // QCELP audio
fReadSource =
QCELPAudioRTPSource::createNew(env(), fRTPSocket, fRTPSource,
fRTPPayloadFormat,
fRTPTimestampFrequency);
// Note that fReadSource will differ from fRTPSource in this case
} else if (strcmp(fCodecName, "AMR") == 0) { // AMR audio (narrowband)
fReadSource =
AMRAudioRTPSource::createNew(env(), fRTPSocket, fRTPSource,
fRTPPayloadFormat, False /*isWideband*/,
fNumChannels, fOctetalign != 0, fInterleaving,
fRobustsorting != 0, fCRC != 0);
// Note that fReadSource will differ from fRTPSource in this case
} else if (strcmp(fCodecName, "AMR-WB") == 0) { // AMR audio (wideband)
fReadSource =
AMRAudioRTPSource::createNew(env(), fRTPSocket, fRTPSource,
fRTPPayloadFormat, True /*isWideband*/,
fNumChannels, fOctetalign != 0, fInterleaving,
fRobustsorting != 0, fCRC != 0);
// Note that fReadSource will differ from fRTPSource in this case
} else if (strcmp(fCodecName, "MPA") == 0) { // MPEG-1 or 2 audio
fReadSource = fRTPSource
= MPEG1or2AudioRTPSource::createNew(env(), fRTPSocket,
fRTPPayloadFormat,
fRTPTimestampFrequency);
} else if (strcmp(fCodecName, "MPA-ROBUST") == 0) { // robust MP3 audio
fReadSource = fRTPSource
= MP3ADURTPSource::createNew(env(), fRTPSocket, fRTPPayloadFormat,
fRTPTimestampFrequency);
if (fRTPSource == NULL) break;
if (!fReceiveRawMP3ADUs) {
// Add a filter that deinterleaves the ADUs after depacketizing them:
MP3ADUdeinterleaver* deinterleaver
= MP3ADUdeinterleaver::createNew(env(), fRTPSource);
if (deinterleaver == NULL) break;
// Add another filter that converts these ADUs to MP3 frames:
fReadSource = MP3FromADUSource::createNew(env(), deinterleaver);
}
} else if (strcmp(fCodecName, "X-MP3-DRAFT-00") == 0) {
// a non-standard variant of "MPA-ROBUST" used by RealNetworks
// (one 'ADU'ized MP3 frame per packet; no headers)
fRTPSource
= SimpleRTPSource::createNew(env(), fRTPSocket, fRTPPayloadFormat,
fRTPTimestampFrequency,
"audio/MPA-ROBUST" /*hack*/);
if (fRTPSource == NULL) break;
// Add a filter that converts these ADUs to MP3 frames:
fReadSource = MP3FromADUSource::createNew(env(), fRTPSource,
False /*no ADU header*/);
} else if (strcmp(fCodecName, "MP4A-LATM") == 0) { // MPEG-4 LATM audio
fReadSource = fRTPSource
= MPEG4LATMAudioRTPSource::createNew(env(), fRTPSocket,
fRTPPayloadFormat,
fRTPTimestampFrequency);
} else if (strcmp(fCodecName, "VORBIS") == 0) { // Vorbis audio
fReadSource = fRTPSource
= VorbisAudioRTPSource::createNew(env(), fRTPSocket,
fRTPPayloadFormat,
fRTPTimestampFrequency);
} else if (strcmp(fCodecName, "VP8") == 0) { // VP8 video
fReadSource = fRTPSource
= VP8VideoRTPSource::createNew(env(), fRTPSocket,
fRTPPayloadFormat,
fRTPTimestampFrequency);
} else if (strcmp(fCodecName, "AC3") == 0 || strcmp(fCodecName, "EAC3") == 0) { // AC3 audio
fReadSource = fRTPSource
= AC3AudioRTPSource::createNew(env(), fRTPSocket,
fRTPPayloadFormat,
fRTPTimestampFrequency);
} else if (strcmp(fCodecName, "MP4V-ES") == 0) { // MPEG-4 Elementary Stream video
fReadSource = fRTPSource
= MPEG4ESVideoRTPSource::createNew(env(), fRTPSocket,
fRTPPayloadFormat,
fRTPTimestampFrequency);
} else if (strcmp(fCodecName, "MPEG4-GENERIC") == 0) {
fReadSource = fRTPSource
= MPEG4GenericRTPSource::createNew(env(), fRTPSocket,
fRTPPayloadFormat,
fRTPTimestampFrequency,
fMediumName, fMode,
fSizelength, fIndexlength,
fIndexdeltalength);
} else if (strcmp(fCodecName, "MPV") == 0) { // MPEG-1 or 2 video
fReadSource = fRTPSource
= MPEG1or2VideoRTPSource::createNew(env(), fRTPSocket,
fRTPPayloadFormat,
fRTPTimestampFrequency);
} else if (strcmp(fCodecName, "MP2T") == 0) { // MPEG-2 Transport Stream
fRTPSource = SimpleRTPSource::createNew(env(), fRTPSocket, fRTPPayloadFormat,
fRTPTimestampFrequency, "video/MP2T",
0, False);
fReadSource = MPEG2TransportStreamFramer::createNew(env(), fRTPSource);
// this sets "durationInMicroseconds" correctly, based on the PCR values
} else if (strcmp(fCodecName, "H261") == 0) { // H.261
fReadSource = fRTPSource
= H261VideoRTPSource::createNew(env(), fRTPSocket,
fRTPPayloadFormat,
fRTPTimestampFrequency);
} else if (strcmp(fCodecName, "H263-1998") == 0 ||
strcmp(fCodecName, "H263-2000") == 0) { // H.263+
fReadSource = fRTPSource
= H263plusVideoRTPSource::createNew(env(), fRTPSocket,
fRTPPayloadFormat,
fRTPTimestampFrequency);
} else if (strcmp(fCodecName, "H264") == 0) {
fReadSource = fRTPSource
= H264VideoRTPSource::createNew(env(), fRTPSocket,
fRTPPayloadFormat,
fRTPTimestampFrequency);
} else if (strcmp(fCodecName, "DV") == 0) {
fReadSource = fRTPSource
= DVVideoRTPSource::createNew(env(), fRTPSocket,
fRTPPayloadFormat,
fRTPTimestampFrequency);
} else if (strcmp(fCodecName, "JPEG") == 0) { // motion JPEG
if (fReceiveRawJPEGFrames) {
// Special case (used when proxying JPEG/RTP streams): Receive each JPEG/RTP packet, including the special RTP headers:
fReadSource = fRTPSource
= SimpleRTPSource::createNew(env(), fRTPSocket, fRTPPayloadFormat,
fRTPTimestampFrequency, "video/JPEG",
0/*special offset*/, False/*doNormalMBitRule => ignore the 'M' bit*/);
} else {
// Normal case: Receive each JPEG frame as a complete, displayable JPEG image:
fReadSource = fRTPSource
= JPEGVideoRTPSource::createNew(env(), fRTPSocket,
fRTPPayloadFormat,
fRTPTimestampFrequency,
videoWidth(),
videoHeight());
}
} else if (strcmp(fCodecName, "X-QT") == 0
|| strcmp(fCodecName, "X-QUICKTIME") == 0) {
// Generic QuickTime streams, as defined in
//
char* mimeType
= new char[strlen(mediumName()) + strlen(codecName()) + 2] ;
sprintf(mimeType, "%s/%s", mediumName(), codecName());
fReadSource = fRTPSource
= QuickTimeGenericRTPSource::createNew(env(), fRTPSocket,
fRTPPayloadFormat,
fRTPTimestampFrequency,
mimeType);
delete[] mimeType;
} else if ( strcmp(fCodecName, "PCMU") == 0 // PCM u-law audio
|| strcmp(fCodecName, "GSM") == 0 // GSM audio
|| strcmp(fCodecName, "DVI4") == 0 // DVI4 (IMA ADPCM) audio
|| strcmp(fCodecName, "PCMA") == 0 // PCM a-law audio
|| strcmp(fCodecName, "MP1S") == 0 // MPEG-1 System Stream
|| strcmp(fCodecName, "MP2P") == 0 // MPEG-2 Program Stream
|| strcmp(fCodecName, "L8") == 0 // 8-bit linear audio
|| strcmp(fCodecName, "L16") == 0 // 16-bit linear audio
|| strcmp(fCodecName, "L20") == 0 // 20-bit linear audio (RFC 3190)
|| strcmp(fCodecName, "L24") == 0 // 24-bit linear audio (RFC 3190)
|| strcmp(fCodecName, "G726-16") == 0 // G.726, 16 kbps
|| strcmp(fCodecName, "G726-24") == 0 // G.726, 24 kbps
|| strcmp(fCodecName, "G726-32") == 0 // G.726, 32 kbps
|| strcmp(fCodecName, "G726-40") == 0 // G.726, 40 kbps
|| strcmp(fCodecName, "SPEEX") == 0 // SPEEX audio
|| strcmp(fCodecName, "ILBC") == 0 // iLBC audio
|| strcmp(fCodecName, "OPUS") == 0 // Opus audio
|| strcmp(fCodecName, "T140") == 0 // T.140 text (RFC 4103)
|| strcmp(fCodecName, "DAT12") == 0 // 12-bit nonlinear audio (RFC 3190)
|| strcmp(fCodecName, "VND.ONVIF.METADATA") == 0 // 'ONVIF' 'metadata' (a XML document)
) {
createSimpleRTPSource = True;
useSpecialRTPoffset = 0;
if (strcmp(fCodecName, "VND.ONVIF.METADATA") == 0) {
// This RTP payload format uses the RTP "M" bit to indicate the end of the content (a XML document):
doNormalMBitRule = True;
}
} else if (useSpecialRTPoffset >= 0) {
// We don't know this RTP payload format, but try to receive
// it using a 'SimpleRTPSource' with the specified header offset:
createSimpleRTPSource = True;
} else {
env().setResultMsg("RTP payload format unknown or not supported");
break;
}
if (createSimpleRTPSource) {
char* mimeType
= new char[strlen(mediumName()) + strlen(codecName()) + 2] ;
sprintf(mimeType, "%s/%s", mediumName(), codecName());
fReadSource = fRTPSource
= SimpleRTPSource::createNew(env(), fRTPSocket, fRTPPayloadFormat,
fRTPTimestampFrequency, mimeType,
(unsigned)useSpecialRTPoffset,
doNormalMBitRule);
delete[] mimeType;
}
}
return True;
} while (0);
return False; // an error occurred
}
live/liveMedia/MP3Internals.hh 000444 001751 000000 00000010216 12265042432 016436 0 ustar 00rsf wheel 000000 000000 /**********
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the
Free Software Foundation; either version 2.1 of the License, or (at your
option) any later version. (See .)
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
more details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
**********/
// "liveMedia"
// Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved.
// MP3 internal implementation details
// C++ header
#ifndef _MP3_INTERNALS_HH
#define _MP3_INTERNALS_HH
#ifndef _BOOLEAN_HH
#include "Boolean.hh"
#endif
#ifndef _BIT_VECTOR_HH
#include "BitVector.hh"
#endif
typedef struct MP3SideInfo {
unsigned main_data_begin;
unsigned private_bits;
typedef struct gr_info_s {
int scfsi;
unsigned part2_3_length;
unsigned big_values;
unsigned global_gain;
unsigned scalefac_compress;
unsigned window_switching_flag;
unsigned block_type;
unsigned mixed_block_flag;
unsigned table_select[3];
unsigned region0_count;
unsigned region1_count;
unsigned subblock_gain[3];
unsigned maxband[3];
unsigned maxbandl;
unsigned maxb;
unsigned region1start;
unsigned region2start;
unsigned preflag;
unsigned scalefac_scale;
unsigned count1table_select;
double *full_gain[3];
double *pow2gain;
} gr_info_s_t;
struct {
gr_info_s_t gr[2];
} ch[2];
} MP3SideInfo_t;
#define SBLIMIT 32
#define MAX_MP3_FRAME_SIZE 2500 /* also big enough for an 'ADU'ized frame */
class MP3FrameParams {
public:
MP3FrameParams();
~MP3FrameParams();
// 4-byte MPEG header:
unsigned hdr;
// a buffer that can be used to hold the rest of the frame:
unsigned char frameBytes[MAX_MP3_FRAME_SIZE];
// public parameters derived from the header
void setParamsFromHeader(); // this sets them
Boolean isMPEG2;
unsigned layer; // currently only 3 is supported
unsigned bitrate; // in kbps
unsigned samplingFreq;
Boolean isStereo;
Boolean isFreeFormat;
unsigned frameSize; // doesn't include the initial 4-byte header
unsigned sideInfoSize;
Boolean hasCRC;
void setBytePointer(unsigned char const* restOfFrame,
unsigned totNumBytes) {// called during setup
bv.setup((unsigned char*)restOfFrame, 0, 8*totNumBytes);
}
// other, public parameters used when parsing input (perhaps get rid of)
unsigned oldHdr, firstHdr;
// Extract (unpack) the side info from the frame into a struct:
void getSideInfo(MP3SideInfo& si);
// The bit pointer used for reading data from frame data
unsigned getBits(unsigned numBits) { return bv.getBits(numBits); }
unsigned get1Bit() { return bv.get1Bit(); }
private:
BitVector bv;
// other, private parameters derived from the header
unsigned bitrateIndex;
unsigned samplingFreqIndex;
Boolean isMPEG2_5;
Boolean padding;
Boolean extension;
unsigned mode;
unsigned mode_ext;
Boolean copyright;
Boolean original;
unsigned emphasis;
unsigned stereo;
private:
unsigned computeSideInfoSize();
};
unsigned ComputeFrameSize(unsigned bitrate, unsigned samplingFreq,
Boolean usePadding, Boolean isMPEG2,
unsigned char layer);
Boolean GetADUInfoFromMP3Frame(unsigned char const* framePtr,
unsigned totFrameSize,
unsigned& hdr, unsigned& frameSize,
MP3SideInfo& sideInfo, unsigned& sideInfoSize,
unsigned& backpointer, unsigned& aduSize);
Boolean ZeroOutMP3SideInfo(unsigned char* framePtr, unsigned totFrameSize,
unsigned newBackpointer);
unsigned TranscodeMP3ADU(unsigned char const* fromPtr, unsigned fromSize,
unsigned toBitrate,
unsigned char* toPtr, unsigned toMaxSize,
unsigned& availableBytesForBackpointer);
// returns the size of the resulting ADU (0 on failure)
#endif
live/liveMedia/BitVector.cpp 000444 001751 000000 00000011716 12265042432 016251 0 ustar 00rsf wheel 000000 000000 /**********
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the
Free Software Foundation; either version 2.1 of the License, or (at your
option) any later version. (See .)
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
more details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
**********/
// "liveMedia"
// Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved.
// Bit Vector data structure
// Implementation
#include "BitVector.hh"
BitVector::BitVector(unsigned char* baseBytePtr,
unsigned baseBitOffset,
unsigned totNumBits) {
setup(baseBytePtr, baseBitOffset, totNumBits);
}
void BitVector::setup(unsigned char* baseBytePtr,
unsigned baseBitOffset,
unsigned totNumBits) {
fBaseBytePtr = baseBytePtr;
fBaseBitOffset = baseBitOffset;
fTotNumBits = totNumBits;
fCurBitIndex = 0;
}
static unsigned char const singleBitMask[8]
= {0x80, 0x40, 0x20, 0x10, 0x08, 0x04, 0x02, 0x01};
#define MAX_LENGTH 32
void BitVector::putBits(unsigned from, unsigned numBits) {
if (numBits == 0) return;
unsigned char tmpBuf[4];
unsigned overflowingBits = 0;
if (numBits > MAX_LENGTH) {
numBits = MAX_LENGTH;
}
if (numBits > fTotNumBits - fCurBitIndex) {
overflowingBits = numBits - (fTotNumBits - fCurBitIndex);
}
tmpBuf[0] = (unsigned char)(from>>24);
tmpBuf[1] = (unsigned char)(from>>16);
tmpBuf[2] = (unsigned char)(from>>8);
tmpBuf[3] = (unsigned char)from;
shiftBits(fBaseBytePtr, fBaseBitOffset + fCurBitIndex, /* to */
tmpBuf, MAX_LENGTH - numBits, /* from */
numBits - overflowingBits /* num bits */);
fCurBitIndex += numBits - overflowingBits;
}
void BitVector::put1Bit(unsigned bit) {
// The following is equivalent to "putBits(..., 1)", except faster:
if (fCurBitIndex >= fTotNumBits) { /* overflow */
return;
} else {
unsigned totBitOffset = fBaseBitOffset + fCurBitIndex++;
unsigned char mask = singleBitMask[totBitOffset%8];
if (bit) {
fBaseBytePtr[totBitOffset/8] |= mask;
} else {
fBaseBytePtr[totBitOffset/8] &=~ mask;
}
}
}
unsigned BitVector::getBits(unsigned numBits) {
if (numBits == 0) return 0;
unsigned char tmpBuf[4];
unsigned overflowingBits = 0;
if (numBits > MAX_LENGTH) {
numBits = MAX_LENGTH;
}
if (numBits > fTotNumBits - fCurBitIndex) {
overflowingBits = numBits - (fTotNumBits - fCurBitIndex);
}
shiftBits(tmpBuf, 0, /* to */
fBaseBytePtr, fBaseBitOffset + fCurBitIndex, /* from */
numBits - overflowingBits /* num bits */);
fCurBitIndex += numBits - overflowingBits;
unsigned result
= (tmpBuf[0]<<24) | (tmpBuf[1]<<16) | (tmpBuf[2]<<8) | tmpBuf[3];
result >>= (MAX_LENGTH - numBits); // move into low-order part of word
result &= (0xFFFFFFFF << overflowingBits); // so any overflow bits are 0
return result;
}
unsigned BitVector::get1Bit() {
// The following is equivalent to "getBits(1)", except faster:
if (fCurBitIndex >= fTotNumBits) { /* overflow */
return 0;
} else {
unsigned totBitOffset = fBaseBitOffset + fCurBitIndex++;
unsigned char curFromByte = fBaseBytePtr[totBitOffset/8];
unsigned result = (curFromByte >> (7-(totBitOffset%8))) & 0x01;
return result;
}
}
void BitVector::skipBits(unsigned numBits) {
if (numBits > fTotNumBits - fCurBitIndex) { /* overflow */
fCurBitIndex = fTotNumBits;
} else {
fCurBitIndex += numBits;
}
}
unsigned BitVector::get_expGolomb() {
unsigned numLeadingZeroBits = 0;
unsigned codeStart = 1;
while (get1Bit() == 0 && fCurBitIndex < fTotNumBits) {
++numLeadingZeroBits;
codeStart *= 2;
}
return codeStart - 1 + getBits(numLeadingZeroBits);
}
void shiftBits(unsigned char* toBasePtr, unsigned toBitOffset,
unsigned char const* fromBasePtr, unsigned fromBitOffset,
unsigned numBits) {
if (numBits == 0) return;
/* Note that from and to may overlap, if from>to */
unsigned char const* fromBytePtr = fromBasePtr + fromBitOffset/8;
unsigned fromBitRem = fromBitOffset%8;
unsigned char* toBytePtr = toBasePtr + toBitOffset/8;
unsigned toBitRem = toBitOffset%8;
while (numBits-- > 0) {
unsigned char fromBitMask = singleBitMask[fromBitRem];
unsigned char fromBit = (*fromBytePtr)&fromBitMask;
unsigned char toBitMask = singleBitMask[toBitRem];
if (fromBit != 0) {
*toBytePtr |= toBitMask;
} else {
*toBytePtr &=~ toBitMask;
}
if (++fromBitRem == 8) {
++fromBytePtr;
fromBitRem = 0;
}
if (++toBitRem == 8) {
++toBytePtr;
toBitRem = 0;
}
}
}
live/liveMedia/MPEG1or2AudioStreamFramer.cpp 000444 001751 000000 00000015243 12265042432 021076 0 ustar 00rsf wheel 000000 000000 /**********
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the
Free Software Foundation; either version 2.1 of the License, or (at your
option) any later version. (See .)
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
more details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
**********/
// "liveMedia"
// Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved.
// A filter that breaks up an MPEG (1,2) audio elementary stream into frames
// Implementation
#include "MPEG1or2AudioStreamFramer.hh"
#include "StreamParser.hh"
#include "MP3Internals.hh"
#include
////////// MPEG1or2AudioStreamParser definition //////////
class MPEG1or2AudioStreamParser: public StreamParser {
public:
MPEG1or2AudioStreamParser(MPEG1or2AudioStreamFramer* usingSource,
FramedSource* inputSource);
virtual ~MPEG1or2AudioStreamParser();
public:
unsigned parse(unsigned& numTruncatedBytes);
// returns the size of the frame that was acquired, or 0 if none was
void registerReadInterest(unsigned char* to, unsigned maxSize);
MP3FrameParams const& currentFrame() const { return fCurrentFrame; }
private:
unsigned char* fTo;
unsigned fMaxSize;
// Parameters of the most recently read frame:
MP3FrameParams fCurrentFrame; // also works for layer I or II
};
////////// MPEG1or2AudioStreamFramer implementation //////////
MPEG1or2AudioStreamFramer
::MPEG1or2AudioStreamFramer(UsageEnvironment& env, FramedSource* inputSource,
Boolean syncWithInputSource)
: FramedFilter(env, inputSource),
fSyncWithInputSource(syncWithInputSource) {
reset();
fParser = new MPEG1or2AudioStreamParser(this, inputSource);
}
MPEG1or2AudioStreamFramer::~MPEG1or2AudioStreamFramer() {
delete fParser;
}
MPEG1or2AudioStreamFramer*
MPEG1or2AudioStreamFramer::createNew(UsageEnvironment& env,
FramedSource* inputSource,
Boolean syncWithInputSource) {
// Need to add source type checking here??? #####
return new MPEG1or2AudioStreamFramer(env, inputSource, syncWithInputSource);
}
void MPEG1or2AudioStreamFramer::flushInput() {
reset();
fParser->flushInput();
}
void MPEG1or2AudioStreamFramer::reset() {
// Use the current wallclock time as the initial 'presentation time':
struct timeval timeNow;
gettimeofday(&timeNow, NULL);
resetPresentationTime(timeNow);
}
void MPEG1or2AudioStreamFramer
::resetPresentationTime(struct timeval newPresentationTime) {
fNextFramePresentationTime = newPresentationTime;
}
void MPEG1or2AudioStreamFramer::doGetNextFrame() {
fParser->registerReadInterest(fTo, fMaxSize);
continueReadProcessing();
}
#define MILLION 1000000
static unsigned const numSamplesByLayer[4] = {0, 384, 1152, 1152};
struct timeval MPEG1or2AudioStreamFramer::currentFramePlayTime() const {
MP3FrameParams const& fr = fParser->currentFrame();
unsigned const numSamples = numSamplesByLayer[fr.layer];
struct timeval result;
unsigned const freq = fr.samplingFreq*(1 + fr.isMPEG2);
if (freq == 0) {
result.tv_sec = 0;
result.tv_usec = 0;
return result;
}
// result is numSamples/freq
unsigned const uSeconds
= ((numSamples*2*MILLION)/freq + 1)/2; // rounds to nearest integer
result.tv_sec = uSeconds/MILLION;
result.tv_usec = uSeconds%MILLION;
return result;
}
void MPEG1or2AudioStreamFramer
::continueReadProcessing(void* clientData,
unsigned char* /*ptr*/, unsigned /*size*/,
struct timeval presentationTime) {
MPEG1or2AudioStreamFramer* framer = (MPEG1or2AudioStreamFramer*)clientData;
if (framer->fSyncWithInputSource) {
framer->resetPresentationTime(presentationTime);
}
framer->continueReadProcessing();
}
void MPEG1or2AudioStreamFramer::continueReadProcessing() {
unsigned acquiredFrameSize = fParser->parse(fNumTruncatedBytes);
if (acquiredFrameSize > 0) {
// We were able to acquire a frame from the input.
// It has already been copied to the reader's space.
fFrameSize = acquiredFrameSize;
// Also set the presentation time, and increment it for next time,
// based on the length of this frame:
fPresentationTime = fNextFramePresentationTime;
struct timeval framePlayTime = currentFramePlayTime();
fDurationInMicroseconds = framePlayTime.tv_sec*MILLION + framePlayTime.tv_usec;
fNextFramePresentationTime.tv_usec += framePlayTime.tv_usec;
fNextFramePresentationTime.tv_sec
+= framePlayTime.tv_sec + fNextFramePresentationTime.tv_usec/MILLION;
fNextFramePresentationTime.tv_usec %= MILLION;
// Call our own 'after getting' function. Because we're not a 'leaf'
// source, we can call this directly, without risking infinite recursion.
afterGetting(this);
} else {
// We were unable to parse a complete frame from the input, because:
// - we had to read more data from the source stream, or
// - the source stream has ended.
}
}
////////// MPEG1or2AudioStreamParser implementation //////////
MPEG1or2AudioStreamParser
::MPEG1or2AudioStreamParser(MPEG1or2AudioStreamFramer* usingSource,
FramedSource* inputSource)
: StreamParser(inputSource, FramedSource::handleClosure, usingSource,
&MPEG1or2AudioStreamFramer::continueReadProcessing, usingSource) {
}
MPEG1or2AudioStreamParser::~MPEG1or2AudioStreamParser() {
}
void MPEG1or2AudioStreamParser::registerReadInterest(unsigned char* to,
unsigned maxSize) {
fTo = to;
fMaxSize = maxSize;
}
unsigned MPEG1or2AudioStreamParser::parse(unsigned& numTruncatedBytes) {
try {
saveParserState();
// We expect a MPEG audio header (first 11 bits set to 1) at the start:
while (((fCurrentFrame.hdr = test4Bytes())&0xFFE00000) != 0xFFE00000) {
skipBytes(1);
saveParserState();
}
fCurrentFrame.setParamsFromHeader();
// Copy the frame to the requested destination:
unsigned frameSize = fCurrentFrame.frameSize + 4; // include header
if (frameSize > fMaxSize) {
numTruncatedBytes = frameSize - fMaxSize;
frameSize = fMaxSize;
} else {
numTruncatedBytes = 0;
}
getBytes(fTo, frameSize);
skipBytes(numTruncatedBytes);
return frameSize;
} catch (int /*e*/) {
#ifdef DEBUG
fprintf(stderr, "MPEG1or2AudioStreamParser::parse() EXCEPTION (This is normal behavior - *not* an error)\n");
#endif
return 0; // the parsing got interrupted
}
}
live/liveMedia/MP3ADUTranscoder.cpp 000444 001751 000000 00000006463 12265042432 017331 0 ustar 00rsf wheel 000000 000000 /**********
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the
Free Software Foundation; either version 2.1 of the License, or (at your
option) any later version. (See .)
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
more details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
**********/
// "liveMedia"
// Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved.
// Transcoder for ADUized MP3 frames
// Implementation
#include "MP3ADUTranscoder.hh"
#include "MP3Internals.hh"
#include
MP3ADUTranscoder::MP3ADUTranscoder(UsageEnvironment& env,
unsigned outBitrate /* in kbps */,
FramedSource* inputSource)
: FramedFilter(env, inputSource),
fOutBitrate(outBitrate),
fAvailableBytesForBackpointer(0),
fOrigADU(new unsigned char[MAX_MP3_FRAME_SIZE]) {
}
MP3ADUTranscoder::~MP3ADUTranscoder() {
delete[] fOrigADU;
}
MP3ADUTranscoder* MP3ADUTranscoder::createNew(UsageEnvironment& env,
unsigned outBitrate /* in kbps */,
FramedSource* inputSource) {
// The source must be an MP3 ADU source:
if (strcmp(inputSource->MIMEtype(), "audio/MPA-ROBUST") != 0) {
env.setResultMsg(inputSource->name(), " is not an MP3 ADU source");
return NULL;
}
return new MP3ADUTranscoder(env, outBitrate, inputSource);
}
void MP3ADUTranscoder::getAttributes() const {
// Begin by getting the attributes from our input source:
fInputSource->getAttributes();
// Then modify them by appending the corrected bandwidth
char buffer[30];
sprintf(buffer, " bandwidth %d", outBitrate());
envir().appendToResultMsg(buffer);
}
void MP3ADUTranscoder::doGetNextFrame() {
fInputSource->getNextFrame(fOrigADU, MAX_MP3_FRAME_SIZE,
afterGettingFrame, this, handleClosure, this);
}
void MP3ADUTranscoder::afterGettingFrame(void* clientData,
unsigned numBytesRead,
unsigned numTruncatedBytes,
struct timeval presentationTime,
unsigned durationInMicroseconds) {
MP3ADUTranscoder* transcoder = (MP3ADUTranscoder*)clientData;
transcoder->afterGettingFrame1(numBytesRead, numTruncatedBytes,
presentationTime, durationInMicroseconds);
}
void MP3ADUTranscoder::afterGettingFrame1(unsigned numBytesRead,
unsigned numTruncatedBytes,
struct timeval presentationTime,
unsigned durationInMicroseconds) {
fNumTruncatedBytes = numTruncatedBytes; // but can we handle this being >0? #####
fPresentationTime = presentationTime;
fDurationInMicroseconds = durationInMicroseconds;
fFrameSize = TranscodeMP3ADU(fOrigADU, numBytesRead, fOutBitrate,
fTo, fMaxSize, fAvailableBytesForBackpointer);
if (fFrameSize == 0) { // internal error - bad ADU data?
handleClosure(this);
return;
}
// Call our own 'after getting' function. Because we're not a 'leaf'
// source, we can call this directly, without risking infinite recursion.
afterGetting(this);
}
live/liveMedia/MPEG1or2VideoRTPSink.cpp 000444 001751 000000 00000015165 12265042432 020010 0 ustar 00rsf wheel 000000 000000 /**********
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the
Free Software Foundation; either version 2.1 of the License, or (at your
option) any later version. (See .)
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
more details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
**********/
// "liveMedia"
// Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved.
// RTP sink for MPEG video (RFC 2250)
// Implementation
#include "MPEG1or2VideoRTPSink.hh"
#include "MPEG1or2VideoStreamFramer.hh"
MPEG1or2VideoRTPSink::MPEG1or2VideoRTPSink(UsageEnvironment& env, Groupsock* RTPgs)
: VideoRTPSink(env, RTPgs, 32, 90000, "MPV") {
fPictureState.temporal_reference = 0;
fPictureState.picture_coding_type = fPictureState.vector_code_bits = 0;
}
MPEG1or2VideoRTPSink::~MPEG1or2VideoRTPSink() {
}
MPEG1or2VideoRTPSink*
MPEG1or2VideoRTPSink::createNew(UsageEnvironment& env, Groupsock* RTPgs) {
return new MPEG1or2VideoRTPSink(env, RTPgs);
}
Boolean MPEG1or2VideoRTPSink::sourceIsCompatibleWithUs(MediaSource& source) {
// Our source must be an appropriate framer:
return source.isMPEG1or2VideoStreamFramer();
}
Boolean MPEG1or2VideoRTPSink::allowFragmentationAfterStart() const {
return True;
}
Boolean MPEG1or2VideoRTPSink
::frameCanAppearAfterPacketStart(unsigned char const* frameStart,
unsigned numBytesInFrame) const {
// A 'frame' (which in this context can mean a header or a slice as well as a
// complete picture) can appear at other than the first position in a packet
// in all situations, EXCEPT when it follows the end of (i.e., the last slice
// of) a picture. I.e., the headers at the beginning of a picture must
// appear at the start of a RTP packet.
if (!fPreviousFrameWasSlice) return True;
// A slice is already packed into this packet. We allow this new 'frame'
// to be packed after it, provided that it is also a slice:
return numBytesInFrame >= 4
&& frameStart[0] == 0 && frameStart[1] == 0 && frameStart[2] == 1
&& frameStart[3] >= 1 && frameStart[3] <= 0xAF;
}
#define VIDEO_SEQUENCE_HEADER_START_CODE 0x000001B3
#define PICTURE_START_CODE 0x00000100
void MPEG1or2VideoRTPSink
::doSpecialFrameHandling(unsigned fragmentationOffset,
unsigned char* frameStart,
unsigned numBytesInFrame,
struct timeval framePresentationTime,
unsigned numRemainingBytes) {
Boolean thisFrameIsASlice = False; // until we learn otherwise
if (isFirstFrameInPacket()) {
fSequenceHeaderPresent = fPacketBeginsSlice = fPacketEndsSlice = False;
}
if (fragmentationOffset == 0) {
// Begin by inspecting the 4-byte code at the start of the frame:
if (numBytesInFrame < 4) return; // shouldn't happen
unsigned startCode = (frameStart[0]<<24) | (frameStart[1]<<16)
| (frameStart[2]<<8) | frameStart[3];
if (startCode == VIDEO_SEQUENCE_HEADER_START_CODE) {
// This is a video sequence header
fSequenceHeaderPresent = True;
} else if (startCode == PICTURE_START_CODE) {
// This is a picture header
// Record the parameters of this picture:
if (numBytesInFrame < 8) return; // shouldn't happen
unsigned next4Bytes = (frameStart[4]<<24) | (frameStart[5]<<16)
| (frameStart[6]<<8) | frameStart[7];
unsigned char byte8 = numBytesInFrame == 8 ? 0 : frameStart[8];
fPictureState.temporal_reference = (next4Bytes&0xFFC00000)>>(32-10);
fPictureState.picture_coding_type = (next4Bytes&0x00380000)>>(32-(10+3));
unsigned char FBV, BFC, FFV, FFC;
FBV = BFC = FFV = FFC = 0;
switch (fPictureState.picture_coding_type) {
case 3:
FBV = (byte8&0x40)>>6;
BFC = (byte8&0x38)>>3;
// fall through to:
case 2:
FFV = (next4Bytes&0x00000004)>>2;
FFC = ((next4Bytes&0x00000003)<<1) | ((byte8&0x80)>>7);
}
fPictureState.vector_code_bits = (FBV<<7) | (BFC<<4) | (FFV<<3) | FFC;
} else if ((startCode&0xFFFFFF00) == 0x00000100) {
unsigned char lastCodeByte = startCode&0xFF;
if (lastCodeByte <= 0xAF) {
// This is (the start of) a slice
thisFrameIsASlice = True;
} else {
// This is probably a GOP header; we don't do anything with this
}
} else {
// The first 4 bytes aren't a code that we recognize.
envir() << "Warning: MPEG1or2VideoRTPSink::doSpecialFrameHandling saw strange first 4 bytes "
<< (void*)startCode << ", but we're not a fragment\n";
}
} else {
// We're a fragment (other than the first) of a slice.
thisFrameIsASlice = True;
}
if (thisFrameIsASlice) {
// This packet begins a slice iff there's no fragmentation offset:
fPacketBeginsSlice = (fragmentationOffset == 0);
// This packet also ends a slice iff there are no fragments remaining:
fPacketEndsSlice = (numRemainingBytes == 0);
}
// Set the video-specific header based on the parameters that we've seen.
// Note that this may get done more than once, if several frames appear
// in the packet. That's OK, because this situation happens infrequently,
// and we want the video-specific header to reflect the most up-to-date
// information (in particular, from a Picture Header) anyway.
unsigned videoSpecificHeader =
// T == 0
(fPictureState.temporal_reference<<16) |
// AN == N == 0
(fSequenceHeaderPresent<<13) |
(fPacketBeginsSlice<<12) |
(fPacketEndsSlice<<11) |
(fPictureState.picture_coding_type<<8) |
fPictureState.vector_code_bits;
setSpecialHeaderWord(videoSpecificHeader);
// Also set the RTP timestamp. (As above, we do this for each frame
// in the packet.)
setTimestamp(framePresentationTime);
// Set the RTP 'M' (marker) bit iff this frame ends (i.e., is the last
// slice of) a picture (and there are no fragments remaining).
// This relies on the source being a "MPEG1or2VideoStreamFramer".
MPEG1or2VideoStreamFramer* framerSource = (MPEG1or2VideoStreamFramer*)fSource;
if (framerSource != NULL && framerSource->pictureEndMarker()
&& numRemainingBytes == 0) {
setMarkerBit();
framerSource->pictureEndMarker() = False;
}
fPreviousFrameWasSlice = thisFrameIsASlice;
}
unsigned MPEG1or2VideoRTPSink::specialHeaderSize() const {
// There's a 4 byte special video header:
return 4;
}
live/liveMedia/MP3InternalsHuffman.hh 000444 001751 000000 00000005141 12265042432 017744 0 ustar 00rsf wheel 000000 000000 /**********
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the
Free Software Foundation; either version 2.1 of the License, or (at your
option) any later version. (See .)
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
more details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
**********/
// "liveMedia"
// Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved.
// MP3 internal implementation details (Huffman encoding)
// C++ header
#ifndef _MP3_INTERNALS_HUFFMAN_HH
#define _MP3_INTERNALS_HUFFMAN_HH
#ifndef _MP3_INTERNALS_HH
#include "MP3Internals.hh"
#endif
void updateSideInfoForHuffman(MP3SideInfo& sideInfo, Boolean isMPEG2,
unsigned char const* mainDataPtr,
unsigned p23L0, unsigned p23L1,
unsigned& part23Length0a,
unsigned& part23Length0aTruncation,
unsigned& part23Length0b,
unsigned& part23Length0bTruncation,
unsigned& part23Length1a,
unsigned& part23Length1aTruncation,
unsigned& part23Length1b,
unsigned& part23Length1bTruncation);
#define SSLIMIT 18
class MP3HuffmanEncodingInfo {
public:
MP3HuffmanEncodingInfo(Boolean includeDecodedValues = False);
~MP3HuffmanEncodingInfo();
public:
unsigned numSamples;
unsigned allBitOffsets[SBLIMIT*SSLIMIT + 1];
unsigned reg1Start, reg2Start, bigvalStart; /* special bit offsets */
unsigned* decodedValues;
};
/* forward */
void MP3HuffmanDecode(MP3SideInfo::gr_info_s_t* gr, Boolean isMPEG2,
unsigned char const* fromBasePtr,
unsigned fromBitOffset, unsigned fromLength,
unsigned& scaleFactorsLength,
MP3HuffmanEncodingInfo& hei);
extern unsigned char huffdec[]; // huffman table data
// The following are used if we process Huffman-decoded values
#ifdef FOUR_BYTE_SAMPLES
#define BYTES_PER_SAMPLE_VALUE 4
#else
#ifdef TWO_BYTE_SAMPLES
#define BYTES_PER_SAMPLE_VALUE 2
#else
// ONE_BYTE_SAMPLES
#define BYTES_PER_SAMPLE_VALUE 1
#endif
#endif
#ifdef DO_HUFFMAN_ENCODING
unsigned MP3HuffmanEncode(MP3SideInfo::gr_info_s_t const* gr,
unsigned char const* fromPtr,
unsigned char* toPtr, unsigned toBitOffset,
unsigned numHuffBits);
#endif
#endif
live/liveMedia/MP3InternalsHuffman.cpp 000444 001751 000000 00000070565 12265042432 020143 0 ustar 00rsf wheel 000000 000000 /**********
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the
Free Software Foundation; either version 2.1 of the License, or (at your
option) any later version. (See .)
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
more details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
**********/
// "liveMedia"
// Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved.
// MP3 internal implementation details (Huffman encoding)
// Implementation
#include "MP3InternalsHuffman.hh"
#include
#include
#include
MP3HuffmanEncodingInfo
::MP3HuffmanEncodingInfo(Boolean includeDecodedValues) {
if (includeDecodedValues) {
decodedValues = new unsigned[(SBLIMIT*SSLIMIT + 1)*4];
} else {
decodedValues = NULL;
}
}
MP3HuffmanEncodingInfo::~MP3HuffmanEncodingInfo() {
delete[] decodedValues;
}
// This is crufty old code that needs to be cleaned up #####
static unsigned debugCount = 0; /* for debugging */
#define TRUNC_FAVORa
void updateSideInfoForHuffman(MP3SideInfo& sideInfo, Boolean isMPEG2,
unsigned char const* mainDataPtr,
unsigned p23L0, unsigned p23L1,
unsigned& part23Length0a,
unsigned& part23Length0aTruncation,
unsigned& part23Length0b,
unsigned& part23Length0bTruncation,
unsigned& part23Length1a,
unsigned& part23Length1aTruncation,
unsigned& part23Length1b,
unsigned& part23Length1bTruncation) {
int i, j;
unsigned sfLength, origTotABsize, adjustment;
MP3SideInfo::gr_info_s_t* gr;
/* First, Huffman-decode each part of the segment's main data,
to see at which bit-boundaries the samples appear:
*/
MP3HuffmanEncodingInfo hei;
++debugCount;
#ifdef DEBUG
fprintf(stderr, "usifh-start: p23L0: %d, p23L1: %d\n", p23L0, p23L1);
#endif
/* Process granule 0 */
{
gr = &(sideInfo.ch[0].gr[0]);
origTotABsize = gr->part2_3_length;
MP3HuffmanDecode(gr, isMPEG2, mainDataPtr, 0, origTotABsize, sfLength, hei);
/* Begin by computing new sizes for parts a & b (& their truncations) */
#ifdef DEBUG
fprintf(stderr, "usifh-0: %d, %d:%d, %d:%d, %d:%d, %d:%d, %d:%d\n",
hei.numSamples,
sfLength/8, sfLength%8,
hei.reg1Start/8, hei.reg1Start%8,
hei.reg2Start/8, hei.reg2Start%8,
hei.bigvalStart/8, hei.bigvalStart%8,
origTotABsize/8, origTotABsize%8);
#endif
if (p23L0 < sfLength) {
/* We can't use this, so give it all to the next granule: */
p23L1 += p23L0;
p23L0 = 0;
}
part23Length0a = hei.bigvalStart;
part23Length0b = origTotABsize - hei.bigvalStart;
part23Length0aTruncation = part23Length0bTruncation = 0;
if (origTotABsize > p23L0) {
/* We need to shorten one or both of fields a & b */
unsigned truncation = origTotABsize - p23L0;
#ifdef TRUNC_FAIRLY
part23Length0aTruncation = (truncation*(part23Length0a-sfLength))
/(origTotABsize-sfLength);
part23Length0bTruncation = truncation - part23Length0aTruncation;
#endif
#ifdef TRUNC_FAVORa
part23Length0bTruncation
= (truncation > part23Length0b) ? part23Length0b : truncation;
part23Length0aTruncation = truncation - part23Length0bTruncation;
#endif
#ifdef TRUNC_FAVORb
part23Length0aTruncation = (truncation > part23Length0a-sfLength)
? (part23Length0a-sfLength) : truncation;
part23Length0bTruncation = truncation - part23Length0aTruncation;
#endif
}
/* ASSERT: part23Length0xTruncation <= part23Length0x */
part23Length0a -= part23Length0aTruncation;
part23Length0b -= part23Length0bTruncation;
#ifdef DEBUG
fprintf(stderr, "usifh-0: interim sizes: %d (%d), %d (%d)\n",
part23Length0a, part23Length0aTruncation,
part23Length0b, part23Length0bTruncation);
#endif
/* Adjust these new lengths so they end on sample bit boundaries: */
for (i = 0; i < (int)hei.numSamples; ++i) {
if (hei.allBitOffsets[i] == part23Length0a) break;
else if (hei.allBitOffsets[i] > part23Length0a) {--i; break;}
}
if (i < 0) { /* should happen only if we couldn't fit sfLength */
i = 0; adjustment = 0;
} else {
adjustment = part23Length0a - hei.allBitOffsets[i];
}
#ifdef DEBUG
fprintf(stderr, "%d usifh-0: adjustment 1: %d\n", debugCount, adjustment);
#endif
part23Length0a -= adjustment;
part23Length0aTruncation += adjustment;
/* Assign the bits we just shaved to field b and granule 1: */
if (part23Length0bTruncation < adjustment) {
p23L1 += (adjustment - part23Length0bTruncation);
adjustment = part23Length0bTruncation;
}
part23Length0b += adjustment;
part23Length0bTruncation -= adjustment;
for (j = i; j < (int)hei.numSamples; ++j) {
if (hei.allBitOffsets[j]
== part23Length0a + part23Length0aTruncation + part23Length0b)
break;
else if (hei.allBitOffsets[j]
> part23Length0a + part23Length0aTruncation + part23Length0b)
{--j; break;}
}
if (j < 0) { /* should happen only if we couldn't fit sfLength */
j = 0; adjustment = 0;
} else {
adjustment = part23Length0a+part23Length0aTruncation+part23Length0b
- hei.allBitOffsets[j];
}
#ifdef DEBUG
fprintf(stderr, "%d usifh-0: adjustment 2: %d\n", debugCount, adjustment);
#endif
if (adjustment > part23Length0b) adjustment = part23Length0b; /*sanity*/
part23Length0b -= adjustment;
part23Length0bTruncation += adjustment;
/* Assign the bits we just shaved to granule 1 */
p23L1 += adjustment;
if (part23Length0aTruncation > 0) {
/* Change the granule's 'big_values' field to reflect the truncation */
gr->big_values = i;
}
}
/* Process granule 1 (MPEG-1 only) */
if (isMPEG2) {
part23Length1a = part23Length1b = 0;
part23Length1aTruncation = part23Length1bTruncation = 0;
} else {
unsigned granule1Offset
= origTotABsize + sideInfo.ch[1].gr[0].part2_3_length;
gr = &(sideInfo.ch[0].gr[1]);
origTotABsize = gr->part2_3_length;
MP3HuffmanDecode(gr, isMPEG2, mainDataPtr, granule1Offset,
origTotABsize, sfLength, hei);
/* Begin by computing new sizes for parts a & b (& their truncations) */
#ifdef DEBUG
fprintf(stderr, "usifh-1: %d, %d:%d, %d:%d, %d:%d, %d:%d, %d:%d\n",
hei.numSamples,
sfLength/8, sfLength%8,
hei.reg1Start/8, hei.reg1Start%8,
hei.reg2Start/8, hei.reg2Start%8,
hei.bigvalStart/8, hei.bigvalStart%8,
origTotABsize/8, origTotABsize%8);
#endif
if (p23L1 < sfLength) {
/* We can't use this, so give up on this granule: */
p23L1 = 0;
}
part23Length1a = hei.bigvalStart;
part23Length1b = origTotABsize - hei.bigvalStart;
part23Length1aTruncation = part23Length1bTruncation = 0;
if (origTotABsize > p23L1) {
/* We need to shorten one or both of fields a & b */
unsigned truncation = origTotABsize - p23L1;
#ifdef TRUNC_FAIRLY
part23Length1aTruncation = (truncation*(part23Length1a-sfLength))
/(origTotABsize-sfLength);
part23Length1bTruncation = truncation - part23Length1aTruncation;
#endif
#ifdef TRUNC_FAVORa
part23Length1bTruncation
= (truncation > part23Length1b) ? part23Length1b : truncation;
part23Length1aTruncation = truncation - part23Length1bTruncation;
#endif
#ifdef TRUNC_FAVORb
part23Length1aTruncation = (truncation > part23Length1a-sfLength)
? (part23Length1a-sfLength) : truncation;
part23Length1bTruncation = truncation - part23Length1aTruncation;
#endif
}
/* ASSERT: part23Length1xTruncation <= part23Length1x */
part23Length1a -= part23Length1aTruncation;
part23Length1b -= part23Length1bTruncation;
#ifdef DEBUG
fprintf(stderr, "usifh-1: interim sizes: %d (%d), %d (%d)\n",
part23Length1a, part23Length1aTruncation,
part23Length1b, part23Length1bTruncation);
#endif
/* Adjust these new lengths so they end on sample bit boundaries: */
for (i = 0; i < (int)hei.numSamples; ++i) {
if (hei.allBitOffsets[i] == part23Length1a) break;
else if (hei.allBitOffsets[i] > part23Length1a) {--i; break;}
}
if (i < 0) { /* should happen only if we couldn't fit sfLength */
i = 0; adjustment = 0;
} else {
adjustment = part23Length1a - hei.allBitOffsets[i];
}
#ifdef DEBUG
fprintf(stderr, "%d usifh-1: adjustment 0: %d\n", debugCount, adjustment);
#endif
part23Length1a -= adjustment;
part23Length1aTruncation += adjustment;
/* Assign the bits we just shaved to field b: */
if (part23Length1bTruncation < adjustment) {
adjustment = part23Length1bTruncation;
}
part23Length1b += adjustment;
part23Length1bTruncation -= adjustment;
for (j = i; j < (int)hei.numSamples; ++j) {
if (hei.allBitOffsets[j]
== part23Length1a + part23Length1aTruncation + part23Length1b)
break;
else if (hei.allBitOffsets[j]
> part23Length1a + part23Length1aTruncation + part23Length1b)
{--j; break;}
}
if (j < 0) { /* should happen only if we couldn't fit sfLength */
j = 0; adjustment = 0;
} else {
adjustment = part23Length1a+part23Length1aTruncation+part23Length1b
- hei.allBitOffsets[j];
}
#ifdef DEBUG
fprintf(stderr, "%d usifh-1: adjustment 1: %d\n", debugCount, adjustment);
#endif
if (adjustment > part23Length1b) adjustment = part23Length1b; /*sanity*/
part23Length1b -= adjustment;
part23Length1bTruncation += adjustment;
if (part23Length1aTruncation > 0) {
/* Change the granule's 'big_values' field to reflect the truncation */
gr->big_values = i;
}
}
#ifdef DEBUG
fprintf(stderr, "usifh-end, new vals: %d (%d), %d (%d), %d (%d), %d (%d)\n",
part23Length0a, part23Length0aTruncation,
part23Length0b, part23Length0bTruncation,
part23Length1a, part23Length1aTruncation,
part23Length1b, part23Length1bTruncation);
#endif
}
static void rsf_getline(char* line, unsigned max, unsigned char**fi) {
unsigned i;
for (i = 0; i < max; ++i) {
line[i] = *(*fi)++;
if (line[i] == '\n') {
line[i++] = '\0';
return;
}
}
line[i] = '\0';
}
static void rsfscanf(unsigned char **fi, unsigned int* v) {
while (sscanf((char*)*fi, "%x", v) == 0) {
/* skip past the next '\0' */
while (*(*fi)++ != '\0') {}
}
/* skip past any white-space before the value: */
while (*(*fi) <= ' ') ++(*fi);
/* skip past the value: */
while (*(*fi) > ' ') ++(*fi);
}
#define HUFFBITS unsigned long int
#define SIZEOF_HUFFBITS 4
#define HTN 34
#define MXOFF 250
struct huffcodetab {
char tablename[3]; /*string, containing table_description */
unsigned int xlen; /*max. x-index+ */
unsigned int ylen; /*max. y-index+ */
unsigned int linbits; /*number of linbits */
unsigned int linmax; /*max number to be stored in linbits */
int ref; /*a positive value indicates a reference*/
HUFFBITS *table; /*pointer to array[xlen][ylen] */
unsigned char *hlen; /*pointer to array[xlen][ylen] */
unsigned char(*val)[2];/*decoder tree */
unsigned int treelen; /*length of decoder tree */
};
static struct huffcodetab rsf_ht[HTN]; // array of all huffcodetable headers
/* 0..31 Huffman code table 0..31 */
/* 32,33 count1-tables */
/* read the huffman decoder table */
static int read_decoder_table(unsigned char* fi) {
int n,i,nn,t;
unsigned int v0,v1;
char command[100],line[100];
for (n=0;nscalefac_compress];
int num1 = slen[1][gr_info->scalefac_compress];
if (gr_info->block_type == 2)
{
numbits = (num0 + num1) * 18;
if (gr_info->mixed_block_flag) {
numbits -= num0; /* num0 * 17 + num1 * 18 */
}
}
else
{
int scfsi = gr_info->scfsi;
if(scfsi < 0) { /* scfsi < 0 => granule == 0 */
numbits = (num0 + num1) * 10 + num0;
}
else {
numbits = 0;
if(!(scfsi & 0x8)) {
numbits += num0 * 6;
}
else {
}
if(!(scfsi & 0x4)) {
numbits += num0 * 5;
}
else {
}
if(!(scfsi & 0x2)) {
numbits += num1 * 5;
}
else {
}
if(!(scfsi & 0x1)) {
numbits += num1 * 5;
}
else {
}
}
}
return numbits;
}
extern unsigned n_slen2[];
extern unsigned i_slen2[];
static unsigned rsf_get_scale_factors_2(MP3SideInfo::gr_info_s_t *gr_info) {
unsigned char const* pnt;
int i;
unsigned int slen;
int n = 0;
int numbits = 0;
slen = n_slen2[gr_info->scalefac_compress];
gr_info->preflag = (slen>>15) & 0x1;
n = 0;
if( gr_info->block_type == 2 ) {
n++;
if(gr_info->mixed_block_flag)
n++;
}
pnt = stab[n][(slen>>12)&0x7];
for(i=0;i<4;i++) {
int num = slen & 0x7;
slen >>= 3;
numbits += pnt[i] * num;
}
return numbits;
}
static unsigned getScaleFactorsLength(MP3SideInfo::gr_info_s_t* gr,
Boolean isMPEG2) {
return isMPEG2 ? rsf_get_scale_factors_2(gr)
: rsf_get_scale_factors_1(gr);
}
static int rsf_huffman_decoder(BitVector& bv,
struct huffcodetab const* h,
int* x, int* y, int* v, int* w); // forward
void MP3HuffmanDecode(MP3SideInfo::gr_info_s_t* gr, Boolean isMPEG2,
unsigned char const* fromBasePtr,
unsigned fromBitOffset, unsigned fromLength,
unsigned& scaleFactorsLength,
MP3HuffmanEncodingInfo& hei) {
unsigned i;
int x, y, v, w;
struct huffcodetab *h;
BitVector bv((unsigned char*)fromBasePtr, fromBitOffset, fromLength);
/* Compute the size of the scale factors (& also advance bv): */
scaleFactorsLength = getScaleFactorsLength(gr, isMPEG2);
bv.skipBits(scaleFactorsLength);
initialize_huffman();
hei.reg1Start = hei.reg2Start = hei.numSamples = 0;
/* Read bigvalues area. */
if (gr->big_values < gr->region1start + gr->region2start) {
gr->big_values = gr->region1start + gr->region2start; /* sanity check */
}
for (i = 0; i < gr->big_values; ++i) {
if (i < gr->region1start) {
/* in region 0 */
h = &rsf_ht[gr->table_select[0]];
} else if (i < gr->region2start) {
/* in region 1 */
h = &rsf_ht[gr->table_select[1]];
if (hei.reg1Start == 0) {
hei.reg1Start = bv.curBitIndex();
}
} else {
/* in region 2 */
h = &rsf_ht[gr->table_select[2]];
if (hei.reg2Start == 0) {
hei.reg2Start = bv.curBitIndex();
}
}
hei.allBitOffsets[i] = bv.curBitIndex();
rsf_huffman_decoder(bv, h, &x, &y, &v, &w);
if (hei.decodedValues != NULL) {
// Record the decoded values:
unsigned* ptr = &hei.decodedValues[4*i];
ptr[0] = x; ptr[1] = y; ptr[2] = v; ptr[3] = w;
}
}
hei.bigvalStart = bv.curBitIndex();
/* Read count1 area. */
h = &rsf_ht[gr->count1table_select+32];
while (bv.curBitIndex() < bv.totNumBits() && i < SSLIMIT*SBLIMIT) {
hei.allBitOffsets[i] = bv.curBitIndex();
rsf_huffman_decoder(bv, h, &x, &y, &v, &w);
if (hei.decodedValues != NULL) {
// Record the decoded values:
unsigned* ptr = &hei.decodedValues[4*i];
ptr[0] = x; ptr[1] = y; ptr[2] = v; ptr[3] = w;
}
++i;
}
hei.allBitOffsets[i] = bv.curBitIndex();
hei.numSamples = i;
}
HUFFBITS dmask = 1 << (SIZEOF_HUFFBITS*8-1);
unsigned int hs = SIZEOF_HUFFBITS*8;
/* do the huffman-decoding */
static int rsf_huffman_decoder(BitVector& bv,
struct huffcodetab const* h, // ptr to huffman code record
/* unsigned */ int *x, // returns decoded x value
/* unsigned */ int *y, // returns decoded y value
int* v, int* w) {
HUFFBITS level;
unsigned point = 0;
int error = 1;
level = dmask;
*x = *y = *v = *w = 0;
if (h->val == NULL) return 2;
/* table 0 needs no bits */
if (h->treelen == 0) return 0;
/* Lookup in Huffman table. */
do {
if (h->val[point][0]==0) { /*end of tree*/
*x = h->val[point][1] >> 4;
*y = h->val[point][1] & 0xf;
error = 0;
break;
}
if (bv.get1Bit()) {
while (h->val[point][1] >= MXOFF) point += h->val[point][1];
point += h->val[point][1];
}
else {
while (h->val[point][0] >= MXOFF) point += h->val[point][0];
point += h->val[point][0];
}
level >>= 1;
} while (level || (point < h->treelen) );
///// } while (level || (point < rsf_ht->treelen) );
/* Check for error. */
if (error) { /* set x and y to a medium value as a simple concealment */
printf("Illegal Huffman code in data.\n");
*x = ((h->xlen-1) << 1);
*y = ((h->ylen-1) << 1);
}
/* Process sign encodings for quadruples tables. */
if (h->tablename[0] == '3'
&& (h->tablename[1] == '2' || h->tablename[1] == '3')) {
*v = (*y>>3) & 1;
*w = (*y>>2) & 1;
*x = (*y>>1) & 1;
*y = *y & 1;
if (*v)
if (bv.get1Bit() == 1) *v = -*v;
if (*w)
if (bv.get1Bit() == 1) *w = -*w;
if (*x)
if (bv.get1Bit() == 1) *x = -*x;
if (*y)
if (bv.get1Bit() == 1) *y = -*y;
}
/* Process sign and escape encodings for dual tables. */
else {
if (h->linbits)
if ((h->xlen-1) == (unsigned)*x)
*x += bv.getBits(h->linbits);
if (*x)
if (bv.get1Bit() == 1) *x = -*x;
if (h->linbits)
if ((h->ylen-1) == (unsigned)*y)
*y += bv.getBits(h->linbits);
if (*y)
if (bv.get1Bit() == 1) *y = -*y;
}
return error;
}
#ifdef DO_HUFFMAN_ENCODING
inline int getNextSample(unsigned char const*& fromPtr) {
int sample
#ifdef FOUR_BYTE_SAMPLES
= (fromPtr[0]<<24) | (fromPtr[1]<<16) | (fromPtr[2]<<8) | fromPtr[3];
#else
#ifdef TWO_BYTE_SAMPLES
= (fromPtr[0]<<8) | fromPtr[1];
#else
// ONE_BYTE_SAMPLES
= fromPtr[0];
#endif
#endif
fromPtr += BYTES_PER_SAMPLE_VALUE;
return sample;
}
static void rsf_huffman_encoder(BitVector& bv,
struct huffcodetab* h,
int x, int y, int v, int w); // forward
unsigned MP3HuffmanEncode(MP3SideInfo::gr_info_s_t const* gr,
unsigned char const* fromPtr,
unsigned char* toPtr, unsigned toBitOffset,
unsigned numHuffBits) {
unsigned i;
struct huffcodetab *h;
int x, y, v, w;
BitVector bv(toPtr, toBitOffset, numHuffBits);
initialize_huffman();
// Encode big_values area:
unsigned big_values = gr->big_values;
if (big_values < gr->region1start + gr->region2start) {
big_values = gr->region1start + gr->region2start; /* sanity check */
}
for (i = 0; i < big_values; ++i) {
if (i < gr->region1start) {
/* in region 0 */
h = &rsf_ht[gr->table_select[0]];
} else if (i < gr->region2start) {
/* in region 1 */
h = &rsf_ht[gr->table_select[1]];
} else {
/* in region 2 */
h = &rsf_ht[gr->table_select[2]];
}
x = getNextSample(fromPtr);
y = getNextSample(fromPtr);
v = getNextSample(fromPtr);
w = getNextSample(fromPtr);
rsf_huffman_encoder(bv, h, x, y, v, w);
}
// Encode count1 area:
h = &rsf_ht[gr->count1table_select+32];
while (bv.curBitIndex() < bv.totNumBits() && i < SSLIMIT*SBLIMIT) {
x = getNextSample(fromPtr);
y = getNextSample(fromPtr);
v = getNextSample(fromPtr);
w = getNextSample(fromPtr);
rsf_huffman_encoder(bv, h, x, y, v, w);
++i;
}
return i;
}
static Boolean lookupHuffmanTableEntry(struct huffcodetab const* h,
HUFFBITS bits, unsigned bitsLength,
unsigned char& xy) {
unsigned point = 0;
unsigned mask = 1;
unsigned numBitsTestedSoFar = 0;
do {
if (h->val[point][0]==0) { // end of tree
xy = h->val[point][1];
if (h->hlen[xy] == 0) { // this entry hasn't already been used
h->table[xy] = bits;
h->hlen[xy] = bitsLength;
return True;
} else { // this entry has already been seen
return False;
}
}
if (numBitsTestedSoFar++ == bitsLength) {
// We don't yet have enough bits for this prefix
return False;
}
if (bits&mask) {
while (h->val[point][1] >= MXOFF) point += h->val[point][1];
point += h->val[point][1];
} else {
while (h->val[point][0] >= MXOFF) point += h->val[point][0];
point += h->val[point][0];
}
mask <<= 1;
} while (mask || (point < h->treelen));
return False;
}
static void buildHuffmanEncodingTable(struct huffcodetab* h) {
h->table = new unsigned long[256];
h->hlen = new unsigned char[256];
if (h->table == NULL || h->hlen == NULL) { h->table = NULL; return; }
for (unsigned i = 0; i < 256; ++i) {
h->table[i] = 0; h->hlen[i] = 0;
}
// Look up entries for each possible bit sequence length:
unsigned maxNumEntries = h->xlen * h->ylen;
unsigned numEntries = 0;
unsigned powerOf2 = 1;
for (unsigned bitsLength = 1;
bitsLength <= 8*SIZEOF_HUFFBITS; ++bitsLength) {
powerOf2 *= 2;
for (HUFFBITS bits = 0; bits < powerOf2; ++bits) {
// Find the table value - if any - for 'bits' (length 'bitsLength'):
unsigned char xy;
if (lookupHuffmanTableEntry(h, bits, bitsLength, xy)) {
++numEntries;
if (numEntries == maxNumEntries) return; // we're done
}
}
}
#ifdef DEBUG
fprintf(stderr, "Didn't find enough entries!\n"); // shouldn't happen
#endif
}
static void lookupXYandPutBits(BitVector& bv, struct huffcodetab const* h,
unsigned char xy) {
HUFFBITS bits = h->table[xy];
unsigned bitsLength = h->hlen[xy];
// Note that "bits" is in reverse order, so read them from right-to-left:
while (bitsLength-- > 0) {
bv.put1Bit(bits&0x00000001);
bits >>= 1;
}
}
static void putLinbits(BitVector& bv, struct huffcodetab const* h,
HUFFBITS bits) {
bv.putBits(bits, h->linbits);
}
static void rsf_huffman_encoder(BitVector& bv,
struct huffcodetab* h,
int x, int y, int v, int w) {
if (h->val == NULL) return;
/* table 0 produces no bits */
if (h->treelen == 0) return;
if (h->table == NULL) {
// We haven't yet built the encoding array for this table; do it now:
buildHuffmanEncodingTable(h);
if (h->table == NULL) return;
}
Boolean xIsNeg = False, yIsNeg = False, vIsNeg = False, wIsNeg = False;
unsigned char xy;
#ifdef FOUR_BYTE_SAMPLES
#else
#ifdef TWO_BYTE_SAMPLES
// Convert 2-byte negative numbers to their 4-byte equivalents:
if (x&0x8000) x |= 0xFFFF0000;
if (y&0x8000) y |= 0xFFFF0000;
if (v&0x8000) v |= 0xFFFF0000;
if (w&0x8000) w |= 0xFFFF0000;
#else
// ONE_BYTE_SAMPLES
// Convert 1-byte negative numbers to their 4-byte equivalents:
if (x&0x80) x |= 0xFFFFFF00;
if (y&0x80) y |= 0xFFFFFF00;
if (v&0x80) v |= 0xFFFFFF00;
if (w&0x80) w |= 0xFFFFFF00;
#endif
#endif
if (h->tablename[0] == '3'
&& (h->tablename[1] == '2' || h->tablename[1] == '3')) {// quad tables
if (x < 0) { xIsNeg = True; x = -x; }
if (y < 0) { yIsNeg = True; y = -y; }
if (v < 0) { vIsNeg = True; v = -v; }
if (w < 0) { wIsNeg = True; w = -w; }
// Sanity check: x,y,v,w must all be 0 or 1:
if (x>1 || y>1 || v>1 || w>1) {
#ifdef DEBUG
fprintf(stderr, "rsf_huffman_encoder quad sanity check fails: %x,%x,%x,%x\n", x, y, v, w);
#endif
}
xy = (v<<3)|(w<<2)|(x<<1)|y;
lookupXYandPutBits(bv, h, xy);
if (v) bv.put1Bit(vIsNeg);
if (w) bv.put1Bit(wIsNeg);
if (x) bv.put1Bit(xIsNeg);
if (y) bv.put1Bit(yIsNeg);
} else { // dual tables
// Sanity check: v and w must be 0:
if (v != 0 || w != 0) {
#ifdef DEBUG
fprintf(stderr, "rsf_huffman_encoder dual sanity check 1 fails: %x,%x,%x,%x\n", x, y, v, w);
#endif
}
if (x < 0) { xIsNeg = True; x = -x; }
if (y < 0) { yIsNeg = True; y = -y; }
// Sanity check: x and y must be <= 255:
if (x > 255 || y > 255) {
#ifdef DEBUG
fprintf(stderr, "rsf_huffman_encoder dual sanity check 2 fails: %x,%x,%x,%x\n", x, y, v, w);
#endif
}
int xl1 = h->xlen-1;
int yl1 = h->ylen-1;
unsigned linbitsX = 0; unsigned linbitsY = 0;
if (((x < xl1) || (xl1 == 0)) && (y < yl1)) {
// normal case
xy = (x<<4)|y;
lookupXYandPutBits(bv, h, xy);
if (x) bv.put1Bit(xIsNeg);
if (y) bv.put1Bit(yIsNeg);
} else if (x >= xl1) {
linbitsX = (unsigned)(x - xl1);
if (linbitsX > h->linmax) {
#ifdef DEBUG
fprintf(stderr,"warning: Huffman X table overflow\n");
#endif
linbitsX = h->linmax;
};
if (y >= yl1) {
xy = (xl1<<4)|yl1;
lookupXYandPutBits(bv, h, xy);
linbitsY = (unsigned)(y - yl1);
if (linbitsY > h->linmax) {
#ifdef DEBUG
fprintf(stderr,"warning: Huffman Y table overflow\n");
#endif
linbitsY = h->linmax;
};
if (h->linbits) putLinbits(bv, h, linbitsX);
if (x) bv.put1Bit(xIsNeg);
if (h->linbits) putLinbits(bv, h, linbitsY);
if (y) bv.put1Bit(yIsNeg);
} else { /* x >= h->xlen, y < h->ylen */
xy = (xl1<<4)|y;
lookupXYandPutBits(bv, h, xy);
if (h->linbits) putLinbits(bv, h, linbitsX);
if (x) bv.put1Bit(xIsNeg);
if (y) bv.put1Bit(yIsNeg);
}
} else { /* ((x < h->xlen) && (y >= h->ylen)) */
xy = (x<<4)|yl1;
lookupXYandPutBits(bv, h, xy);
linbitsY = y-yl1;
if (linbitsY > h->linmax) {
#ifdef DEBUG
fprintf(stderr,"warning: Huffman Y table overflow\n");
#endif
linbitsY = h->linmax;
};
if (x) bv.put1Bit(xIsNeg);
if (h->linbits) putLinbits(bv, h, linbitsY);
if (y) bv.put1Bit(yIsNeg);
}
}
}
#endif
live/liveMedia/MP3ADURTPSink.cpp 000444 001751 000000 00000010313 12265042432 016504 0 ustar 00rsf wheel 000000 000000 /**********
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the
Free Software Foundation; either version 2.1 of the License, or (at your
option) any later version. (See .)
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
more details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
**********/
// "liveMedia"
// Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved.
// RTP sink for 'ADUized' MP3 frames ("mpa-robust")
// Implementation
#include "MP3ADURTPSink.hh"
MP3ADURTPSink::MP3ADURTPSink(UsageEnvironment& env, Groupsock* RTPgs,
unsigned char RTPPayloadType)
: AudioRTPSink(env, RTPgs, RTPPayloadType, 90000, "MPA-ROBUST") {
}
MP3ADURTPSink::~MP3ADURTPSink() {
}
MP3ADURTPSink*
MP3ADURTPSink::createNew(UsageEnvironment& env, Groupsock* RTPgs,
unsigned char RTPPayloadType) {
return new MP3ADURTPSink(env, RTPgs, RTPPayloadType);
}
static void badDataSize(UsageEnvironment& env, unsigned numBytesInFrame) {
env << "MP3ADURTPSink::doSpecialFrameHandling(): invalid size ("
<< numBytesInFrame << ") of non-fragmented input ADU!\n";
}
void MP3ADURTPSink::doSpecialFrameHandling(unsigned fragmentationOffset,
unsigned char* frameStart,
unsigned numBytesInFrame,
struct timeval framePresentationTime,
unsigned numRemainingBytes) {
// If this is the first (or only) fragment of an ADU, then
// check the "ADU descriptor" (that should be at the front) for validity:
if (fragmentationOffset == 0) {
unsigned aduDescriptorSize;
if (numBytesInFrame < 1) {
badDataSize(envir(), numBytesInFrame);
return;
}
if (frameStart[0]&0x40) {
// We have a 2-byte ADU descriptor
aduDescriptorSize = 2;
if (numBytesInFrame < 2) {
badDataSize(envir(), numBytesInFrame);
return;
}
fCurADUSize = ((frameStart[0]&~0xC0)<<8) | frameStart[1];
} else {
// We have a 1-byte ADU descriptor
aduDescriptorSize = 1;
fCurADUSize = frameStart[0]&~0x80;
}
if (frameStart[0]&0x80) {
envir() << "Unexpected \"C\" bit seen on non-fragment input ADU!\n";
return;
}
// Now, check whether the ADU size in the ADU descriptor is consistent
// with the total data size of (all fragments of) the input frame:
unsigned expectedADUSize =
fragmentationOffset + numBytesInFrame + numRemainingBytes
- aduDescriptorSize;
if (fCurADUSize != expectedADUSize) {
envir() << "MP3ADURTPSink::doSpecialFrameHandling(): Warning: Input ADU size "
<< expectedADUSize << " (=" << fragmentationOffset
<< "+" << numBytesInFrame << "+" << numRemainingBytes
<< "-" << aduDescriptorSize
<< ") did not match the value (" << fCurADUSize
<< ") in the ADU descriptor!\n";
fCurADUSize = expectedADUSize;
}
} else {
// This is the second (or subsequent) fragment.
// Insert a new ADU descriptor:
unsigned char aduDescriptor[2];
aduDescriptor[0] = 0xC0|(fCurADUSize>>8);
aduDescriptor[1] = fCurADUSize&0xFF;
setSpecialHeaderBytes(aduDescriptor, 2);
}
// Important: Also call our base class's doSpecialFrameHandling(),
// to set the packet's timestamp:
MultiFramedRTPSink::doSpecialFrameHandling(fragmentationOffset,
frameStart, numBytesInFrame,
framePresentationTime,
numRemainingBytes);
}
unsigned MP3ADURTPSink::specialHeaderSize() const {
// Normally there's no special header.
// (The "ADU descriptor" is already present in the data.)
unsigned specialHeaderSize = 0;
// However, if we're about to output the second (or subsequent) fragment
// of a fragmented ADU, then we need to insert a new ADU descriptor at
// the front of the packet:
if (curFragmentationOffset() > 0) {
specialHeaderSize = 2;
}
return specialHeaderSize;
}
live/liveMedia/JPEGVideoRTPSource.cpp 000444 001751 000000 00000040465 12265042432 017636 0 ustar 00rsf wheel 000000 000000 /**********
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the
Free Software Foundation; either version 2.1 of the License, or (at your
option) any later version. (See .)
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
more details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
**********/
// "liveMedia"
// Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved.
// JPEG Video (RFC 2435) RTP Sources
// Implementation
#include "JPEGVideoRTPSource.hh"
////////// JPEGBufferedPacket and JPEGBufferedPacketFactory //////////
class JPEGBufferedPacket: public BufferedPacket {
public:
Boolean completesFrame;
private:
// Redefined virtual functions:
virtual void reset();
virtual unsigned nextEnclosedFrameSize(unsigned char*& framePtr,
unsigned dataSize);
};
class JPEGBufferedPacketFactory: public BufferedPacketFactory {
private: // redefined virtual functions
virtual BufferedPacket* createNewPacket(MultiFramedRTPSource* ourSource);
};
////////// JPEGVideoRTPSource implementation //////////
#define BYTE unsigned char
#define WORD unsigned
#define DWORD unsigned long
JPEGVideoRTPSource*
JPEGVideoRTPSource::createNew(UsageEnvironment& env, Groupsock* RTPgs,
unsigned char rtpPayloadFormat,
unsigned rtpTimestampFrequency,
unsigned defaultWidth, unsigned defaultHeight) {
return new JPEGVideoRTPSource(env, RTPgs, rtpPayloadFormat,
rtpTimestampFrequency, defaultWidth, defaultHeight);
}
JPEGVideoRTPSource::JPEGVideoRTPSource(UsageEnvironment& env,
Groupsock* RTPgs,
unsigned char rtpPayloadFormat,
unsigned rtpTimestampFrequency,
unsigned defaultWidth, unsigned defaultHeight)
: MultiFramedRTPSource(env, RTPgs,
rtpPayloadFormat, rtpTimestampFrequency,
new JPEGBufferedPacketFactory),
fDefaultWidth(defaultWidth), fDefaultHeight(defaultHeight) {
}
JPEGVideoRTPSource::~JPEGVideoRTPSource() {
}
enum {
MARKER_SOF0 = 0xc0, // start-of-frame, baseline scan
MARKER_SOI = 0xd8, // start of image
MARKER_EOI = 0xd9, // end of image
MARKER_SOS = 0xda, // start of scan
MARKER_DRI = 0xdd, // restart interval
MARKER_DQT = 0xdb, // define quantization tables
MARKER_DHT = 0xc4, // huffman tables
MARKER_APP_FIRST = 0xe0,
MARKER_APP_LAST = 0xef,
MARKER_COMMENT = 0xfe,
};
static unsigned char const lum_dc_codelens[] = {
0, 1, 5, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0,
};
static unsigned char const lum_dc_symbols[] = {
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
};
static unsigned char const lum_ac_codelens[] = {
0, 2, 1, 3, 3, 2, 4, 3, 5, 5, 4, 4, 0, 0, 1, 0x7d,
};
static unsigned char const lum_ac_symbols[] = {
0x01, 0x02, 0x03, 0x00, 0x04, 0x11, 0x05, 0x12,
0x21, 0x31, 0x41, 0x06, 0x13, 0x51, 0x61, 0x07,
0x22, 0x71, 0x14, 0x32, 0x81, 0x91, 0xa1, 0x08,
0x23, 0x42, 0xb1, 0xc1, 0x15, 0x52, 0xd1, 0xf0,
0x24, 0x33, 0x62, 0x72, 0x82, 0x09, 0x0a, 0x16,
0x17, 0x18, 0x19, 0x1a, 0x25, 0x26, 0x27, 0x28,
0x29, 0x2a, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39,
0x3a, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49,
0x4a, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59,
0x5a, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69,
0x6a, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79,
0x7a, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89,
0x8a, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98,
0x99, 0x9a, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7,
0xa8, 0xa9, 0xaa, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6,
0xb7, 0xb8, 0xb9, 0xba, 0xc2, 0xc3, 0xc4, 0xc5,
0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xd2, 0xd3, 0xd4,
0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xe1, 0xe2,
0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea,
0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8,
0xf9, 0xfa,
};
static unsigned char const chm_dc_codelens[] = {
0, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0,
};
static unsigned char const chm_dc_symbols[] = {
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
};
static unsigned char const chm_ac_codelens[] = {
0, 2, 1, 2, 4, 4, 3, 4, 7, 5, 4, 4, 0, 1, 2, 0x77,
};
static unsigned char const chm_ac_symbols[] = {
0x00, 0x01, 0x02, 0x03, 0x11, 0x04, 0x05, 0x21,
0x31, 0x06, 0x12, 0x41, 0x51, 0x07, 0x61, 0x71,
0x13, 0x22, 0x32, 0x81, 0x08, 0x14, 0x42, 0x91,
0xa1, 0xb1, 0xc1, 0x09, 0x23, 0x33, 0x52, 0xf0,
0x15, 0x62, 0x72, 0xd1, 0x0a, 0x16, 0x24, 0x34,
0xe1, 0x25, 0xf1, 0x17, 0x18, 0x19, 0x1a, 0x26,
0x27, 0x28, 0x29, 0x2a, 0x35, 0x36, 0x37, 0x38,
0x39, 0x3a, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48,
0x49, 0x4a, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58,
0x59, 0x5a, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68,
0x69, 0x6a, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78,
0x79, 0x7a, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
0x88, 0x89, 0x8a, 0x92, 0x93, 0x94, 0x95, 0x96,
0x97, 0x98, 0x99, 0x9a, 0xa2, 0xa3, 0xa4, 0xa5,
0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xb2, 0xb3, 0xb4,
0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xc2, 0xc3,
0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xd2,
0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda,
0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9,
0xea, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8,
0xf9, 0xfa,
};
static void createHuffmanHeader(unsigned char*& p,
unsigned char const* codelens,
int ncodes,
unsigned char const* symbols,
int nsymbols,
int tableNo, int tableClass) {
*p++ = 0xff; *p++ = MARKER_DHT;
*p++ = 0; /* length msb */
*p++ = 3 + ncodes + nsymbols; /* length lsb */
*p++ = (tableClass << 4) | tableNo;
memcpy(p, codelens, ncodes);
p += ncodes;
memcpy(p, symbols, nsymbols);
p += nsymbols;
}
static unsigned computeJPEGHeaderSize(unsigned qtlen, unsigned dri) {
unsigned qtlen_half = qtlen/2; // in case qtlen is odd; shouldn't happen
qtlen = qtlen_half*2;
unsigned numQtables = qtlen > 64 ? 2 : 1;
return 485 + numQtables*5 + qtlen + (dri > 0 ? 6 : 0);
}
static void createJPEGHeader(unsigned char* buf, unsigned type,
unsigned w, unsigned h,
unsigned char const* qtables, unsigned qtlen,
unsigned dri) {
unsigned char *ptr = buf;
unsigned numQtables = qtlen > 64 ? 2 : 1;
// MARKER_SOI:
*ptr++ = 0xFF; *ptr++ = MARKER_SOI;
// MARKER_APP_FIRST:
*ptr++ = 0xFF; *ptr++ = MARKER_APP_FIRST;
*ptr++ = 0x00; *ptr++ = 0x10; // size of chunk
*ptr++ = 'J'; *ptr++ = 'F'; *ptr++ = 'I'; *ptr++ = 'F'; *ptr++ = 0x00;
*ptr++ = 0x01; *ptr++ = 0x01; // JFIF format version (1.1)
*ptr++ = 0x00; // no units
*ptr++ = 0x00; *ptr++ = 0x01; // Horizontal pixel aspect ratio
*ptr++ = 0x00; *ptr++ = 0x01; // Vertical pixel aspect ratio
*ptr++ = 0x00; *ptr++ = 0x00; // no thumbnail
// MARKER_DRI:
if (dri > 0) {
*ptr++ = 0xFF; *ptr++ = MARKER_DRI;
*ptr++ = 0x00; *ptr++ = 0x04; // size of chunk
*ptr++ = (BYTE)(dri >> 8); *ptr++ = (BYTE)(dri); // restart interval
}
// MARKER_DQT (luma):
unsigned tableSize = numQtables == 1 ? qtlen : qtlen/2;
*ptr++ = 0xFF; *ptr++ = MARKER_DQT;
*ptr++ = 0x00; *ptr++ = tableSize + 3; // size of chunk
*ptr++ = 0x00; // precision(0), table id(0)
memcpy(ptr, qtables, tableSize);
qtables += tableSize;
ptr += tableSize;
if (numQtables > 1) {
unsigned tableSize = qtlen - qtlen/2;
// MARKER_DQT (chroma):
*ptr++ = 0xFF; *ptr++ = MARKER_DQT;
*ptr++ = 0x00; *ptr++ = tableSize + 3; // size of chunk
*ptr++ = 0x01; // precision(0), table id(1)
memcpy(ptr, qtables, tableSize);
qtables += tableSize;
ptr += tableSize;
}
// MARKER_SOF0:
*ptr++ = 0xFF; *ptr++ = MARKER_SOF0;
*ptr++ = 0x00; *ptr++ = 0x11; // size of chunk
*ptr++ = 0x08; // sample precision
*ptr++ = (BYTE)(h >> 8);
*ptr++ = (BYTE)(h); // number of lines (must be a multiple of 8)
*ptr++ = (BYTE)(w >> 8);
*ptr++ = (BYTE)(w); // number of columns (must be a multiple of 8)
*ptr++ = 0x03; // number of components
*ptr++ = 0x01; // id of component
*ptr++ = type ? 0x22 : 0x21; // sampling ratio (h,v)
*ptr++ = 0x00; // quant table id
*ptr++ = 0x02; // id of component
*ptr++ = 0x11; // sampling ratio (h,v)
*ptr++ = numQtables == 1 ? 0x00 : 0x01; // quant table id
*ptr++ = 0x03; // id of component
*ptr++ = 0x11; // sampling ratio (h,v)
*ptr++ = numQtables == 1 ? 0x00 : 0x01; // quant table id
createHuffmanHeader(ptr, lum_dc_codelens, sizeof lum_dc_codelens,
lum_dc_symbols, sizeof lum_dc_symbols, 0, 0);
createHuffmanHeader(ptr, lum_ac_codelens, sizeof lum_ac_codelens,
lum_ac_symbols, sizeof lum_ac_symbols, 0, 1);
createHuffmanHeader(ptr, chm_dc_codelens, sizeof chm_dc_codelens,
chm_dc_symbols, sizeof chm_dc_symbols, 1, 0);
createHuffmanHeader(ptr, chm_ac_codelens, sizeof chm_ac_codelens,
chm_ac_symbols, sizeof chm_ac_symbols, 1, 1);
// MARKER_SOS:
*ptr++ = 0xFF; *ptr++ = MARKER_SOS;
*ptr++ = 0x00; *ptr++ = 0x0C; // size of chunk
*ptr++ = 0x03; // number of components
*ptr++ = 0x01; // id of component
*ptr++ = 0x00; // huffman table id (DC, AC)
*ptr++ = 0x02; // id of component
*ptr++ = 0x11; // huffman table id (DC, AC)
*ptr++ = 0x03; // id of component
*ptr++ = 0x11; // huffman table id (DC, AC)
*ptr++ = 0x00; // start of spectral
*ptr++ = 0x3F; // end of spectral
*ptr++ = 0x00; // successive approximation bit position (high, low)
}
// The default 'luma' and 'chroma' quantizer tables, in zigzag order:
static unsigned char const defaultQuantizers[128] = {
// luma table:
16, 11, 12, 14, 12, 10, 16, 14,
13, 14, 18, 17, 16, 19, 24, 40,
26, 24, 22, 22, 24, 49, 35, 37,
29, 40, 58, 51, 61, 60, 57, 51,
56, 55, 64, 72, 92, 78, 64, 68,
87, 69, 55, 56, 80, 109, 81, 87,
95, 98, 103, 104, 103, 62, 77, 113,
121, 112, 100, 120, 92, 101, 103, 99,
// chroma table:
17, 18, 18, 24, 21, 24, 47, 26,
26, 47, 99, 66, 56, 66, 99, 99,
99, 99, 99, 99, 99, 99, 99, 99,
99, 99, 99, 99, 99, 99, 99, 99,
99, 99, 99, 99, 99, 99, 99, 99,
99, 99, 99, 99, 99, 99, 99, 99,
99, 99, 99, 99, 99, 99, 99, 99,
99, 99, 99, 99, 99, 99, 99, 99
};
static void makeDefaultQtables(unsigned char* resultTables, unsigned Q) {
int factor = Q;
int q;
if (Q < 1) factor = 1;
else if (Q > 99) factor = 99;
if (Q < 50) {
q = 5000 / factor;
} else {
q = 200 - factor*2;
}
for (int i = 0; i < 128; ++i) {
int newVal = (defaultQuantizers[i]*q + 50)/100;
if (newVal < 1) newVal = 1;
else if (newVal > 255) newVal = 255;
resultTables[i] = newVal;
}
}
Boolean JPEGVideoRTPSource
::processSpecialHeader(BufferedPacket* packet,
unsigned& resultSpecialHeaderSize) {
unsigned char* headerStart = packet->data();
unsigned packetSize = packet->dataSize();
unsigned char* qtables = NULL;
unsigned qtlen = 0;
unsigned dri = 0;
// There's at least 8-byte video-specific header
/*
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Type-specific | Fragment Offset |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Type | Q | Width | Height |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*/
if (packetSize < 8) return False;
resultSpecialHeaderSize = 8;
unsigned Offset = (unsigned)((DWORD)headerStart[1] << 16 | (DWORD)headerStart[2] << 8 | (DWORD)headerStart[3]);
unsigned Type = (unsigned)headerStart[4];
unsigned type = Type & 1;
unsigned Q = (unsigned)headerStart[5];
unsigned width = (unsigned)headerStart[6] * 8;
unsigned height = (unsigned)headerStart[7] * 8;
if ((width == 0 || height == 0) && fDefaultWidth != 0 && fDefaultHeight != 0) {
// Use the default width and height parameters instead:
width = fDefaultWidth;
height = fDefaultHeight;
}
if (width == 0) width = 256*8; // special case
if (height == 0) height = 256*8; // special case
if (Type > 63) {
// Restart Marker header present
/*
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Restart Interval |F|L| Restart Count |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*/
if (packetSize < resultSpecialHeaderSize + 4) return False;
unsigned RestartInterval = (unsigned)((WORD)headerStart[resultSpecialHeaderSize] << 8 | (WORD)headerStart[resultSpecialHeaderSize + 1]);
dri = RestartInterval;
resultSpecialHeaderSize += 4;
}
if (Offset == 0) {
if (Q > 127) {
// Quantization Table header present
/*
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| MBZ | Precision | Length |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Quantization Table Data |
| ... |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*/
if (packetSize < resultSpecialHeaderSize + 4) return False;
unsigned MBZ = (unsigned)headerStart[resultSpecialHeaderSize];
if (MBZ == 0) {
// unsigned Precision = (unsigned)headerStart[resultSpecialHeaderSize + 1];
unsigned Length = (unsigned)((WORD)headerStart[resultSpecialHeaderSize + 2] << 8 | (WORD)headerStart[resultSpecialHeaderSize + 3]);
//ASSERT(Length == 128);
resultSpecialHeaderSize += 4;
if (packetSize < resultSpecialHeaderSize + Length) return False;
qtlen = Length;
qtables = &headerStart[resultSpecialHeaderSize];
resultSpecialHeaderSize += Length;
}
}
}
// If this is the first (or only) fragment of a JPEG frame, then we need
// to synthesize a JPEG header, and prepend it to the incoming data.
// Hack: We can do this because we allowed space for it in
// our special "JPEGBufferedPacket" subclass. We also adjust
// "resultSpecialHeaderSize" to compensate for this, by subtracting
// the size of the synthesized header. Note that this will cause
// "resultSpecialHeaderSize" to become negative, but the code that called
// us (in "MultiFramedRTPSource") will handle this properly.
if (Offset == 0) {
unsigned char newQtables[128];
if (qtlen == 0) {
// A quantization table was not present in the RTP JPEG header,
// so use the default tables, scaled according to the "Q" factor:
makeDefaultQtables(newQtables, Q);
qtables = newQtables;
qtlen = sizeof newQtables;
}
unsigned hdrlen = computeJPEGHeaderSize(qtlen, dri);
resultSpecialHeaderSize -= hdrlen; // goes negative
headerStart += (int)resultSpecialHeaderSize; // goes backward
createJPEGHeader(headerStart, type, width, height, qtables, qtlen, dri);
}
fCurrentPacketBeginsFrame = (Offset == 0);
// The RTP "M" (marker) bit indicates the last fragment of a frame:
((JPEGBufferedPacket*)packet)->completesFrame
= fCurrentPacketCompletesFrame = packet->rtpMarkerBit();
return True;
}
char const* JPEGVideoRTPSource::MIMEtype() const {
return "video/JPEG";
}
////////// JPEGBufferedPacket and JPEGBufferedPacketFactory implementation
void JPEGBufferedPacket::reset() {
BufferedPacket::reset();
// Move our "fHead" and "fTail" forward, to allow space for a synthesized
// JPEG header to precede the RTP data that comes in over the network.
unsigned offset = MAX_JPEG_HEADER_SIZE;
if (offset > fPacketSize) offset = fPacketSize; // shouldn't happen
fHead = fTail = offset;
}
unsigned JPEGBufferedPacket
::nextEnclosedFrameSize(unsigned char*& framePtr, unsigned dataSize) {
// Normally, the enclosed frame size is just "dataSize". If, however,
// the frame does not end with the "EOI" marker, then add this now:
if (completesFrame && dataSize >= 2 &&
!(framePtr[dataSize-2] == 0xFF && framePtr[dataSize-1] == MARKER_EOI)) {
framePtr[dataSize++] = 0xFF;
framePtr[dataSize++] = MARKER_EOI;
}
return dataSize;
}
BufferedPacket* JPEGBufferedPacketFactory
::createNewPacket(MultiFramedRTPSource* /*ourSource*/) {
return new JPEGBufferedPacket;
}
live/liveMedia/AudioInputDevice.cpp 000444 001751 000000 00000003165 12265042432 017550 0 ustar 00rsf wheel 000000 000000 /**********
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the
Free Software Foundation; either version 2.1 of the License, or (at your
option) any later version. (See .)
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
more details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
**********/
// Copyright (c) 2001-2003 Live Networks, Inc. All rights reserved.
// Generic audio input device (such as a microphone, or an input sound card)
// Implementation
#include
AudioInputDevice
::AudioInputDevice(UsageEnvironment& env, unsigned char bitsPerSample,
unsigned char numChannels,
unsigned samplingFrequency, unsigned granularityInMS)
: FramedSource(env), fBitsPerSample(bitsPerSample),
fNumChannels(numChannels), fSamplingFrequency(samplingFrequency),
fGranularityInMS(granularityInMS) {
}
AudioInputDevice::~AudioInputDevice() {
}
char** AudioInputDevice::allowedDeviceNames = NULL;
////////// AudioPortNames implementation //////////
AudioPortNames::AudioPortNames()
: numPorts(0), portName(NULL) {
}
AudioPortNames::~AudioPortNames() {
for (unsigned i = 0; i < numPorts; ++i) delete portName[i];
delete portName;
}
live/liveMedia/StreamParser.hh 000444 001751 000000 00000011757 12265042432 016602 0 ustar 00rsf wheel 000000 000000 /**********
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the
Free Software Foundation; either version 2.1 of the License, or (at your
option) any later version. (See .)
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
more details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
**********/
// "liveMedia"
// Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved.
// Abstract class for parsing a byte stream
// C++ header
#ifndef _STREAM_PARSER_HH
#define _STREAM_PARSER_HH
#ifndef _FRAMED_SOURCE_HH
#include "FramedSource.hh"
#endif
class StreamParser {
public:
virtual void flushInput();
protected: // we're a virtual base class
typedef void (clientContinueFunc)(void* clientData,
unsigned char* ptr, unsigned size,
struct timeval presentationTime);
StreamParser(FramedSource* inputSource,
FramedSource::onCloseFunc* onInputCloseFunc,
void* onInputCloseClientData,
clientContinueFunc* clientContinueFunc,
void* clientContinueClientData);
virtual ~StreamParser();
void saveParserState();
virtual void restoreSavedParserState();
u_int32_t get4Bytes() { // byte-aligned; returned in big-endian order
u_int32_t result = test4Bytes();
fCurParserIndex += 4;
fRemainingUnparsedBits = 0;
return result;
}
u_int32_t test4Bytes() { // as above, but doesn't advance ptr
ensureValidBytes(4);
unsigned char const* ptr = nextToParse();
return (ptr[0]<<24)|(ptr[1]<<16)|(ptr[2]<<8)|ptr[3];
}
u_int16_t get2Bytes() {
ensureValidBytes(2);
unsigned char const* ptr = nextToParse();
u_int16_t result = (ptr[0]<<8)|ptr[1];
fCurParserIndex += 2;
fRemainingUnparsedBits = 0;
return result;
}
u_int8_t get1Byte() { // byte-aligned
ensureValidBytes(1);
fRemainingUnparsedBits = 0;
return curBank()[fCurParserIndex++];
}
u_int8_t test1Byte(unsigned numBytes) { // as above, but doesn't advance ptr
ensureValidBytes(1);
return nextToParse()[0];
}
void getBytes(u_int8_t* to, unsigned numBytes) {
testBytes(to, numBytes);
fCurParserIndex += numBytes;
fRemainingUnparsedBits = 0;
}
void testBytes(u_int8_t* to, unsigned numBytes) { // as above, but doesn't advance ptr
ensureValidBytes(numBytes);
memmove(to, nextToParse(), numBytes);
}
void skipBytes(unsigned numBytes) {
ensureValidBytes(numBytes);
fCurParserIndex += numBytes;
}
void skipBits(unsigned numBits);
unsigned getBits(unsigned numBits);
// numBits <= 32; returns data into low-order bits of result
unsigned curOffset() const { return fCurParserIndex; }
unsigned& totNumValidBytes() { return fTotNumValidBytes; }
Boolean haveSeenEOF() const { return fHaveSeenEOF; }
unsigned bankSize() const;
private:
unsigned char* curBank() { return fCurBank; }
unsigned char* nextToParse() { return &curBank()[fCurParserIndex]; }
unsigned char* lastParsed() { return &curBank()[fCurParserIndex-1]; }
// makes sure that at least "numBytes" valid bytes remain:
void ensureValidBytes(unsigned numBytesNeeded) {
// common case: inlined:
if (fCurParserIndex + numBytesNeeded <= fTotNumValidBytes) return;
ensureValidBytes1(numBytesNeeded);
}
void ensureValidBytes1(unsigned numBytesNeeded);
static void afterGettingBytes(void* clientData, unsigned numBytesRead,
unsigned numTruncatedBytes,
struct timeval presentationTime,
unsigned durationInMicroseconds);
void afterGettingBytes1(unsigned numBytesRead, struct timeval presentationTime);
static void onInputClosure(void* clientData);
void onInputClosure1();
private:
FramedSource* fInputSource; // should be a byte-stream source??
FramedSource::onCloseFunc* fClientOnInputCloseFunc;
void* fClientOnInputCloseClientData;
clientContinueFunc* fClientContinueFunc;
void* fClientContinueClientData;
// Use a pair of 'banks', and swap between them as they fill up:
unsigned char* fBank[2];
unsigned char fCurBankNum;
unsigned char* fCurBank;
// The most recent 'saved' parse position:
unsigned fSavedParserIndex; // <= fCurParserIndex
unsigned char fSavedRemainingUnparsedBits;
// The current position of the parser within the current bank:
unsigned fCurParserIndex; // <= fTotNumValidBytes
unsigned char fRemainingUnparsedBits; // in previous byte: [0,7]
// The total number of valid bytes stored in the current bank:
unsigned fTotNumValidBytes; // <= BANK_SIZE
// Whether we have seen EOF on the input source:
Boolean fHaveSeenEOF;
struct timeval fLastSeenPresentationTime; // hack used for EOF handling
};
#endif
live/liveMedia/StreamParser.cpp 000444 001751 000000 00000016653 12265042432 016765 0 ustar 00rsf wheel 000000 000000 /**********
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the
Free Software Foundation; either version 2.1 of the License, or (at your
option) any later version. (See .)
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
more details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
**********/
// "liveMedia"
// Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved.
// Abstract class for parsing a byte stream
// Implementation
#include "StreamParser.hh"
#include
#include
#define BANK_SIZE 150000
void StreamParser::flushInput() {
fCurParserIndex = fSavedParserIndex = 0;
fSavedRemainingUnparsedBits = fRemainingUnparsedBits = 0;
fTotNumValidBytes = 0;
}
StreamParser::StreamParser(FramedSource* inputSource,
FramedSource::onCloseFunc* onInputCloseFunc,
void* onInputCloseClientData,
clientContinueFunc* clientContinueFunc,
void* clientContinueClientData)
: fInputSource(inputSource), fClientOnInputCloseFunc(onInputCloseFunc),
fClientOnInputCloseClientData(onInputCloseClientData),
fClientContinueFunc(clientContinueFunc),
fClientContinueClientData(clientContinueClientData),
fSavedParserIndex(0), fSavedRemainingUnparsedBits(0),
fCurParserIndex(0), fRemainingUnparsedBits(0),
fTotNumValidBytes(0), fHaveSeenEOF(False) {
fBank[0] = new unsigned char[BANK_SIZE];
fBank[1] = new unsigned char[BANK_SIZE];
fCurBankNum = 0;
fCurBank = fBank[fCurBankNum];
fLastSeenPresentationTime.tv_sec = 0; fLastSeenPresentationTime.tv_usec = 0;
}
StreamParser::~StreamParser() {
delete[] fBank[0]; delete[] fBank[1];
}
void StreamParser::saveParserState() {
fSavedParserIndex = fCurParserIndex;
fSavedRemainingUnparsedBits = fRemainingUnparsedBits;
}
void StreamParser::restoreSavedParserState() {
fCurParserIndex = fSavedParserIndex;
fRemainingUnparsedBits = fSavedRemainingUnparsedBits;
}
void StreamParser::skipBits(unsigned numBits) {
if (numBits <= fRemainingUnparsedBits) {
fRemainingUnparsedBits -= numBits;
} else {
numBits -= fRemainingUnparsedBits;
unsigned numBytesToExamine = (numBits+7)/8; // round up
ensureValidBytes(numBytesToExamine);
fCurParserIndex += numBytesToExamine;
fRemainingUnparsedBits = 8*numBytesToExamine - numBits;
}
}
unsigned StreamParser::getBits(unsigned numBits) {
if (numBits <= fRemainingUnparsedBits) {
unsigned char lastByte = *lastParsed();
lastByte >>= (fRemainingUnparsedBits - numBits);
fRemainingUnparsedBits -= numBits;
return (unsigned)lastByte &~ ((~0)< 0) {
lastByte = *lastParsed();
} else {
lastByte = 0;
}
unsigned remainingBits = numBits - fRemainingUnparsedBits; // > 0
// For simplicity, read the next 4 bytes, even though we might not
// need all of them here:
unsigned result = test4Bytes();
result >>= (32 - remainingBits);
result |= (lastByte << remainingBits);
if (numBits < 32) result &=~ ((~0)<maxFrameSize();
if (maxInputFrameSize > numBytesNeeded) numBytesNeeded = maxInputFrameSize;
// First, check whether these new bytes would overflow the current
// bank. If so, start using a new bank now.
if (fCurParserIndex + numBytesNeeded > BANK_SIZE) {
// Swap banks, but save any still-needed bytes from the old bank:
unsigned numBytesToSave = fTotNumValidBytes - fSavedParserIndex;
unsigned char const* from = &curBank()[fSavedParserIndex];
fCurBankNum = (fCurBankNum + 1)%2;
fCurBank = fBank[fCurBankNum];
memmove(curBank(), from, numBytesToSave);
fCurParserIndex = fCurParserIndex - fSavedParserIndex;
fSavedParserIndex = 0;
fTotNumValidBytes = numBytesToSave;
}
// ASSERT: fCurParserIndex + numBytesNeeded > fTotNumValidBytes
// && fCurParserIndex + numBytesNeeded <= BANK_SIZE
if (fCurParserIndex + numBytesNeeded > BANK_SIZE) {
// If this happens, it means that we have too much saved parser state.
// To fix this, increase BANK_SIZE as appropriate.
fInputSource->envir() << "StreamParser internal error ("
<< fCurParserIndex << " + "
<< numBytesNeeded << " > "
<< BANK_SIZE << ")\n";
fInputSource->envir().internalError();
}
// Try to read as many new bytes as will fit in the current bank:
unsigned maxNumBytesToRead = BANK_SIZE - fTotNumValidBytes;
fInputSource->getNextFrame(&curBank()[fTotNumValidBytes],
maxNumBytesToRead,
afterGettingBytes, this,
onInputClosure, this);
throw NO_MORE_BUFFERED_INPUT;
}
void StreamParser::afterGettingBytes(void* clientData,
unsigned numBytesRead,
unsigned /*numTruncatedBytes*/,
struct timeval presentationTime,
unsigned /*durationInMicroseconds*/){
StreamParser* parser = (StreamParser*)clientData;
if (parser != NULL) parser->afterGettingBytes1(numBytesRead, presentationTime);
}
void StreamParser::afterGettingBytes1(unsigned numBytesRead, struct timeval presentationTime) {
// Sanity check: Make sure we didn't get too many bytes for our bank:
if (fTotNumValidBytes + numBytesRead > BANK_SIZE) {
fInputSource->envir()
<< "StreamParser::afterGettingBytes() warning: read "
<< numBytesRead << " bytes; expected no more than "
<< BANK_SIZE - fTotNumValidBytes << "\n";
}
fLastSeenPresentationTime = presentationTime;
unsigned char* ptr = &curBank()[fTotNumValidBytes];
fTotNumValidBytes += numBytesRead;
// Continue our original calling source where it left off:
restoreSavedParserState();
// Sigh... this is a crock; things would have been a lot simpler
// here if we were using threads, with synchronous I/O...
fClientContinueFunc(fClientContinueClientData, ptr, numBytesRead, presentationTime);
}
void StreamParser::onInputClosure(void* clientData) {
StreamParser* parser = (StreamParser*)clientData;
if (parser != NULL) parser->onInputClosure1();
}
void StreamParser::onInputClosure1() {
if (!fHaveSeenEOF) {
// We're hitting EOF for the first time. Set our 'EOF' flag, and continue parsing, as if we'd just read 0 bytes of data.
// This allows the parser to re-parse any remaining unparsed data (perhaps while testing for EOF at the end):
fHaveSeenEOF = True;
afterGettingBytes1(0, fLastSeenPresentationTime);
} else {
// We're hitting EOF for the second time. Now, we handle the source input closure:
fHaveSeenEOF = False;
if (fClientOnInputCloseFunc != NULL) (*fClientOnInputCloseFunc)(fClientOnInputCloseClientData);
}
}
live/liveMedia/MPEG1or2AudioRTPSink.cpp 000444 001751 000000 00000004473 12265042432 020003 0 ustar 00rsf wheel 000000 000000 /**********
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the
Free Software Foundation; either version 2.1 of the License, or (at your
option) any later version. (See .)
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
more details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
**********/
// "liveMedia"
// Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved.
// RTP sink for MPEG audio (RFC 2250)
// Implementation
#include "MPEG1or2AudioRTPSink.hh"
MPEG1or2AudioRTPSink::MPEG1or2AudioRTPSink(UsageEnvironment& env, Groupsock* RTPgs)
: AudioRTPSink(env, RTPgs, 14, 90000, "MPA") {
}
MPEG1or2AudioRTPSink::~MPEG1or2AudioRTPSink() {
}
MPEG1or2AudioRTPSink*
MPEG1or2AudioRTPSink::createNew(UsageEnvironment& env, Groupsock* RTPgs) {
return new MPEG1or2AudioRTPSink(env, RTPgs);
}
void MPEG1or2AudioRTPSink::doSpecialFrameHandling(unsigned fragmentationOffset,
unsigned char* frameStart,
unsigned numBytesInFrame,
struct timeval framePresentationTime,
unsigned numRemainingBytes) {
// If this is the 1st frame in the 1st packet, set the RTP 'M' (marker)
// bit (because this is considered the start of a talk spurt):
if (isFirstPacket() && isFirstFrameInPacket()) {
setMarkerBit();
}
// If this is the first frame in the packet, set the lower half of the
// audio-specific header (to the "fragmentationOffset"):
if (isFirstFrameInPacket()) {
setSpecialHeaderWord(fragmentationOffset&0xFFFF);
}
// Important: Also call our base class's doSpecialFrameHandling(),
// to set the packet's timestamp:
MultiFramedRTPSink::doSpecialFrameHandling(fragmentationOffset,
frameStart, numBytesInFrame,
framePresentationTime,
numRemainingBytes);
}
unsigned MPEG1or2AudioRTPSink::specialHeaderSize() const {
// There's a 4 byte special audio header:
return 4;
}
live/liveMedia/MPEG4VideoStreamFramer.cpp 000444 001751 000000 00000056607 12265042432 020474 0 ustar 00rsf wheel 000000 000000 /**********
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the
Free Software Foundation; either version 2.1 of the License, or (at your
option) any later version. (See .)
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
more details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
**********/
// "liveMedia"
// Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved.
// A filter that breaks up an MPEG-4 video elementary stream into
// frames for:
// - Visual Object Sequence (VS) Header + Visual Object (VO) Header
// + Video Object Layer (VOL) Header
// - Group of VOP (GOV) Header
// - VOP frame
// Implementation
#include "MPEG4VideoStreamFramer.hh"
#include "MPEGVideoStreamParser.hh"
#include "MPEG4LATMAudioRTPSource.hh" // for "parseGeneralConfigStr()"
#include
////////// MPEG4VideoStreamParser definition //////////
// An enum representing the current state of the parser:
enum MPEGParseState {
PARSING_VISUAL_OBJECT_SEQUENCE,
PARSING_VISUAL_OBJECT_SEQUENCE_SEEN_CODE,
PARSING_VISUAL_OBJECT,
PARSING_VIDEO_OBJECT_LAYER,
PARSING_GROUP_OF_VIDEO_OBJECT_PLANE,
PARSING_VIDEO_OBJECT_PLANE,
PARSING_VISUAL_OBJECT_SEQUENCE_END_CODE
};
class MPEG4VideoStreamParser: public MPEGVideoStreamParser {
public:
MPEG4VideoStreamParser(MPEG4VideoStreamFramer* usingSource,
FramedSource* inputSource);
virtual ~MPEG4VideoStreamParser();
private: // redefined virtual functions:
virtual void flushInput();
virtual unsigned parse();
private:
MPEG4VideoStreamFramer* usingSource() {
return (MPEG4VideoStreamFramer*)fUsingSource;
}
void setParseState(MPEGParseState parseState);
unsigned parseVisualObjectSequence(Boolean haveSeenStartCode = False);
unsigned parseVisualObject();
unsigned parseVideoObjectLayer();
unsigned parseGroupOfVideoObjectPlane();
unsigned parseVideoObjectPlane();
unsigned parseVisualObjectSequenceEndCode();
// These are used for parsing within an already-read frame:
Boolean getNextFrameBit(u_int8_t& result);
Boolean getNextFrameBits(unsigned numBits, u_int32_t& result);
// Which are used by:
void analyzeVOLHeader();
private:
MPEGParseState fCurrentParseState;
unsigned fNumBitsSeenSoFar; // used by the getNextFrameBit*() routines
u_int32_t vop_time_increment_resolution;
unsigned fNumVTIRBits;
// # of bits needed to count to "vop_time_increment_resolution"
u_int8_t fixed_vop_rate;
unsigned fixed_vop_time_increment; // used if 'fixed_vop_rate' is set
unsigned fSecondsSinceLastTimeCode, fTotalTicksSinceLastTimeCode, fPrevNewTotalTicks;
unsigned fPrevPictureCountDelta;
Boolean fJustSawTimeCode;
};
////////// MPEG4VideoStreamFramer implementation //////////
MPEG4VideoStreamFramer*
MPEG4VideoStreamFramer::createNew(UsageEnvironment& env,
FramedSource* inputSource) {
// Need to add source type checking here??? #####
return new MPEG4VideoStreamFramer(env, inputSource);
}
unsigned char* MPEG4VideoStreamFramer
::getConfigBytes(unsigned& numBytes) const {
numBytes = fNumConfigBytes;
return fConfigBytes;
}
void MPEG4VideoStreamFramer
::setConfigInfo(u_int8_t profileAndLevelIndication, char const* configStr) {
fProfileAndLevelIndication = profileAndLevelIndication;
delete[] fConfigBytes;
fConfigBytes = parseGeneralConfigStr(configStr, fNumConfigBytes);
}
MPEG4VideoStreamFramer::MPEG4VideoStreamFramer(UsageEnvironment& env,
FramedSource* inputSource,
Boolean createParser)
: MPEGVideoStreamFramer(env, inputSource),
fProfileAndLevelIndication(0),
fConfigBytes(NULL), fNumConfigBytes(0),
fNewConfigBytes(NULL), fNumNewConfigBytes(0) {
fParser = createParser
? new MPEG4VideoStreamParser(this, inputSource)
: NULL;
}
MPEG4VideoStreamFramer::~MPEG4VideoStreamFramer() {
delete[] fConfigBytes; delete[] fNewConfigBytes;
}
void MPEG4VideoStreamFramer::startNewConfig() {
delete[] fNewConfigBytes; fNewConfigBytes = NULL;
fNumNewConfigBytes = 0;
}
void MPEG4VideoStreamFramer
::appendToNewConfig(unsigned char* newConfigBytes, unsigned numNewBytes) {
// Allocate a new block of memory for the new config bytes:
unsigned char* configNew
= new unsigned char[fNumNewConfigBytes + numNewBytes];
// Copy the old, then the new, config bytes there:
memmove(configNew, fNewConfigBytes, fNumNewConfigBytes);
memmove(&configNew[fNumNewConfigBytes], newConfigBytes, numNewBytes);
delete[] fNewConfigBytes; fNewConfigBytes = configNew;
fNumNewConfigBytes += numNewBytes;
}
void MPEG4VideoStreamFramer::completeNewConfig() {
delete[] fConfigBytes; fConfigBytes = fNewConfigBytes;
fNewConfigBytes = NULL;
fNumConfigBytes = fNumNewConfigBytes;
fNumNewConfigBytes = 0;
}
Boolean MPEG4VideoStreamFramer::isMPEG4VideoStreamFramer() const {
return True;
}
////////// MPEG4VideoStreamParser implementation //////////
MPEG4VideoStreamParser
::MPEG4VideoStreamParser(MPEG4VideoStreamFramer* usingSource,
FramedSource* inputSource)
: MPEGVideoStreamParser(usingSource, inputSource),
fCurrentParseState(PARSING_VISUAL_OBJECT_SEQUENCE),
vop_time_increment_resolution(0), fNumVTIRBits(0),
fixed_vop_rate(0), fixed_vop_time_increment(0),
fSecondsSinceLastTimeCode(0), fTotalTicksSinceLastTimeCode(0),
fPrevNewTotalTicks(0), fPrevPictureCountDelta(1), fJustSawTimeCode(False) {
}
MPEG4VideoStreamParser::~MPEG4VideoStreamParser() {
}
void MPEG4VideoStreamParser::setParseState(MPEGParseState parseState) {
fCurrentParseState = parseState;
MPEGVideoStreamParser::setParseState();
}
void MPEG4VideoStreamParser::flushInput() {
fSecondsSinceLastTimeCode = 0;
fTotalTicksSinceLastTimeCode = 0;
fPrevNewTotalTicks = 0;
fPrevPictureCountDelta = 1;
StreamParser::flushInput();
if (fCurrentParseState != PARSING_VISUAL_OBJECT_SEQUENCE) {
setParseState(PARSING_VISUAL_OBJECT_SEQUENCE); // later, change to GOV or VOP? #####
}
}
unsigned MPEG4VideoStreamParser::parse() {
try {
switch (fCurrentParseState) {
case PARSING_VISUAL_OBJECT_SEQUENCE: {
return parseVisualObjectSequence();
}
case PARSING_VISUAL_OBJECT_SEQUENCE_SEEN_CODE: {
return parseVisualObjectSequence(True);
}
case PARSING_VISUAL_OBJECT: {
return parseVisualObject();
}
case PARSING_VIDEO_OBJECT_LAYER: {
return parseVideoObjectLayer();
}
case PARSING_GROUP_OF_VIDEO_OBJECT_PLANE: {
return parseGroupOfVideoObjectPlane();
}
case PARSING_VIDEO_OBJECT_PLANE: {
return parseVideoObjectPlane();
}
case PARSING_VISUAL_OBJECT_SEQUENCE_END_CODE: {
return parseVisualObjectSequenceEndCode();
}
default: {
return 0; // shouldn't happen
}
}
} catch (int /*e*/) {
#ifdef DEBUG
fprintf(stderr, "MPEG4VideoStreamParser::parse() EXCEPTION (This is normal behavior - *not* an error)\n");
#endif
return 0; // the parsing got interrupted
}
}
#define VISUAL_OBJECT_SEQUENCE_START_CODE 0x000001B0
#define VISUAL_OBJECT_SEQUENCE_END_CODE 0x000001B1
#define GROUP_VOP_START_CODE 0x000001B3
#define VISUAL_OBJECT_START_CODE 0x000001B5
#define VOP_START_CODE 0x000001B6
unsigned MPEG4VideoStreamParser
::parseVisualObjectSequence(Boolean haveSeenStartCode) {
#ifdef DEBUG
fprintf(stderr, "parsing VisualObjectSequence\n");
#endif
usingSource()->startNewConfig();
u_int32_t first4Bytes;
if (!haveSeenStartCode) {
while ((first4Bytes = test4Bytes()) != VISUAL_OBJECT_SEQUENCE_START_CODE) {
#ifdef DEBUG
fprintf(stderr, "ignoring non VS header: 0x%08x\n", first4Bytes);
#endif
get1Byte(); setParseState(PARSING_VISUAL_OBJECT_SEQUENCE);
// ensures we progress over bad data
}
first4Bytes = get4Bytes();
} else {
// We've already seen the start code
first4Bytes = VISUAL_OBJECT_SEQUENCE_START_CODE;
}
save4Bytes(first4Bytes);
// The next byte is the "profile_and_level_indication":
u_int8_t pali = get1Byte();
#ifdef DEBUG
fprintf(stderr, "profile_and_level_indication: %02x\n", pali);
#endif
saveByte(pali);
usingSource()->fProfileAndLevelIndication = pali;
// Now, copy all bytes that we see, up until we reach
// a VISUAL_OBJECT_START_CODE:
u_int32_t next4Bytes = get4Bytes();
while (next4Bytes != VISUAL_OBJECT_START_CODE) {
saveToNextCode(next4Bytes);
}
setParseState(PARSING_VISUAL_OBJECT);
// Compute this frame's presentation time:
usingSource()->computePresentationTime(fTotalTicksSinceLastTimeCode);
// This header forms part of the 'configuration' information:
usingSource()->appendToNewConfig(fStartOfFrame, curFrameSize());
return curFrameSize();
}
static inline Boolean isVideoObjectStartCode(u_int32_t code) {
return code >= 0x00000100 && code <= 0x0000011F;
}
unsigned MPEG4VideoStreamParser::parseVisualObject() {
#ifdef DEBUG
fprintf(stderr, "parsing VisualObject\n");
#endif
// Note that we've already read the VISUAL_OBJECT_START_CODE
save4Bytes(VISUAL_OBJECT_START_CODE);
// Next, extract the "visual_object_type" from the next 1 or 2 bytes:
u_int8_t nextByte = get1Byte(); saveByte(nextByte);
Boolean is_visual_object_identifier = (nextByte&0x80) != 0;
u_int8_t visual_object_type;
if (is_visual_object_identifier) {
#ifdef DEBUG
fprintf(stderr, "visual_object_verid: 0x%x; visual_object_priority: 0x%x\n", (nextByte&0x78)>>3, (nextByte&0x07));
#endif
nextByte = get1Byte(); saveByte(nextByte);
visual_object_type = (nextByte&0xF0)>>4;
} else {
visual_object_type = (nextByte&0x78)>>3;
}
#ifdef DEBUG
fprintf(stderr, "visual_object_type: 0x%x\n", visual_object_type);
#endif
// At present, we support only the "Video ID" "visual_object_type" (1)
if (visual_object_type != 1) {
usingSource()->envir() << "MPEG4VideoStreamParser::parseVisualObject(): Warning: We don't handle visual_object_type " << visual_object_type << "\n";
}
// Now, copy all bytes that we see, up until we reach
// a video_object_start_code
u_int32_t next4Bytes = get4Bytes();
while (!isVideoObjectStartCode(next4Bytes)) {
saveToNextCode(next4Bytes);
}
save4Bytes(next4Bytes);
#ifdef DEBUG
fprintf(stderr, "saw a video_object_start_code: 0x%08x\n", next4Bytes);
#endif
setParseState(PARSING_VIDEO_OBJECT_LAYER);
// Compute this frame's presentation time:
usingSource()->computePresentationTime(fTotalTicksSinceLastTimeCode);
// This header forms part of the 'configuration' information:
usingSource()->appendToNewConfig(fStartOfFrame, curFrameSize());
return curFrameSize();
}
static inline Boolean isVideoObjectLayerStartCode(u_int32_t code) {
return code >= 0x00000120 && code <= 0x0000012F;
}
Boolean MPEG4VideoStreamParser::getNextFrameBit(u_int8_t& result) {
if (fNumBitsSeenSoFar/8 >= curFrameSize()) return False;
u_int8_t nextByte = fStartOfFrame[fNumBitsSeenSoFar/8];
result = (nextByte>>(7-fNumBitsSeenSoFar%8))&1;
++fNumBitsSeenSoFar;
return True;
}
Boolean MPEG4VideoStreamParser::getNextFrameBits(unsigned numBits,
u_int32_t& result) {
result = 0;
for (unsigned i = 0; i < numBits; ++i) {
u_int8_t nextBit;
if (!getNextFrameBit(nextBit)) return False;
result = (result<<1)|nextBit;
}
return True;
}
void MPEG4VideoStreamParser::analyzeVOLHeader() {
// Extract timing information (in particular,
// "vop_time_increment_resolution") from the VOL Header:
fNumBitsSeenSoFar = 41;
do {
u_int8_t is_object_layer_identifier;
if (!getNextFrameBit(is_object_layer_identifier)) break;
if (is_object_layer_identifier) fNumBitsSeenSoFar += 7;
u_int32_t aspect_ratio_info;
if (!getNextFrameBits(4, aspect_ratio_info)) break;
if (aspect_ratio_info == 15 /*extended_PAR*/) fNumBitsSeenSoFar += 16;
u_int8_t vol_control_parameters;
if (!getNextFrameBit(vol_control_parameters)) break;
if (vol_control_parameters) {
fNumBitsSeenSoFar += 3; // chroma_format; low_delay
u_int8_t vbw_parameters;
if (!getNextFrameBit(vbw_parameters)) break;
if (vbw_parameters) fNumBitsSeenSoFar += 79;
}
fNumBitsSeenSoFar += 2; // video_object_layer_shape
u_int8_t marker_bit;
if (!getNextFrameBit(marker_bit)) break;
if (marker_bit != 1) { // sanity check
usingSource()->envir() << "MPEG4VideoStreamParser::analyzeVOLHeader(): marker_bit 1 not set!\n";
break;
}
if (!getNextFrameBits(16, vop_time_increment_resolution)) break;
#ifdef DEBUG
fprintf(stderr, "vop_time_increment_resolution: %d\n", vop_time_increment_resolution);
#endif
if (vop_time_increment_resolution == 0) {
usingSource()->envir() << "MPEG4VideoStreamParser::analyzeVOLHeader(): vop_time_increment_resolution is zero!\n";
break;
}
// Compute how many bits are necessary to represent this:
fNumVTIRBits = 0;
for (unsigned test = vop_time_increment_resolution; test>0; test /= 2) {
++fNumVTIRBits;
}
if (!getNextFrameBit(marker_bit)) break;
if (marker_bit != 1) { // sanity check
usingSource()->envir() << "MPEG4VideoStreamParser::analyzeVOLHeader(): marker_bit 2 not set!\n";
break;
}
if (!getNextFrameBit(fixed_vop_rate)) break;
if (fixed_vop_rate) {
// Get the following "fixed_vop_time_increment":
if (!getNextFrameBits(fNumVTIRBits, fixed_vop_time_increment)) break;
#ifdef DEBUG
fprintf(stderr, "fixed_vop_time_increment: %d\n", fixed_vop_time_increment);
if (fixed_vop_time_increment == 0) {
usingSource()->envir() << "MPEG4VideoStreamParser::analyzeVOLHeader(): fixed_vop_time_increment is zero!\n";
}
#endif
}
// Use "vop_time_increment_resolution" as the 'frame rate'
// (really, 'tick rate'):
usingSource()->fFrameRate = (double)vop_time_increment_resolution;
#ifdef DEBUG
fprintf(stderr, "fixed_vop_rate: %d; 'frame' (really tick) rate: %f\n", fixed_vop_rate, usingSource()->fFrameRate);
#endif
return;
} while (0);
if (fNumBitsSeenSoFar/8 >= curFrameSize()) {
char errMsg[200];
sprintf(errMsg, "Not enough bits in VOL header: %d/8 >= %d\n", fNumBitsSeenSoFar, curFrameSize());
usingSource()->envir() << errMsg;
}
}
unsigned MPEG4VideoStreamParser::parseVideoObjectLayer() {
#ifdef DEBUG
fprintf(stderr, "parsing VideoObjectLayer\n");
#endif
// The first 4 bytes must be a "video_object_layer_start_code".
// If not, this is a 'short video header', which we currently
// don't support:
u_int32_t next4Bytes = get4Bytes();
if (!isVideoObjectLayerStartCode(next4Bytes)) {
usingSource()->envir() << "MPEG4VideoStreamParser::parseVideoObjectLayer(): This appears to be a 'short video header', which we current don't support\n";
}
// Now, copy all bytes that we see, up until we reach
// a GROUP_VOP_START_CODE or a VOP_START_CODE:
do {
saveToNextCode(next4Bytes);
} while (next4Bytes != GROUP_VOP_START_CODE
&& next4Bytes != VOP_START_CODE);
analyzeVOLHeader();
setParseState((next4Bytes == GROUP_VOP_START_CODE)
? PARSING_GROUP_OF_VIDEO_OBJECT_PLANE
: PARSING_VIDEO_OBJECT_PLANE);
// Compute this frame's presentation time:
usingSource()->computePresentationTime(fTotalTicksSinceLastTimeCode);
// This header ends the 'configuration' information:
usingSource()->appendToNewConfig(fStartOfFrame, curFrameSize());
usingSource()->completeNewConfig();
return curFrameSize();
}
unsigned MPEG4VideoStreamParser::parseGroupOfVideoObjectPlane() {
#ifdef DEBUG
fprintf(stderr, "parsing GroupOfVideoObjectPlane\n");
#endif
// Note that we've already read the GROUP_VOP_START_CODE
save4Bytes(GROUP_VOP_START_CODE);
// Next, extract the (18-bit) time code from the next 3 bytes:
u_int8_t next3Bytes[3];
getBytes(next3Bytes, 3);
saveByte(next3Bytes[0]);saveByte(next3Bytes[1]);saveByte(next3Bytes[2]);
unsigned time_code
= (next3Bytes[0]<<10)|(next3Bytes[1]<<2)|(next3Bytes[2]>>6);
unsigned time_code_hours = (time_code&0x0003E000)>>13;
unsigned time_code_minutes = (time_code&0x00001F80)>>7;
#if defined(DEBUG) || defined(DEBUG_TIMESTAMPS)
Boolean marker_bit = (time_code&0x00000040) != 0;
#endif
unsigned time_code_seconds = (time_code&0x0000003F);
#if defined(DEBUG) || defined(DEBUG_TIMESTAMPS)
fprintf(stderr, "time_code: 0x%05x, hours %d, minutes %d, marker_bit %d, seconds %d\n", time_code, time_code_hours, time_code_minutes, marker_bit, time_code_seconds);
#endif
fJustSawTimeCode = True;
// Now, copy all bytes that we see, up until we reach a VOP_START_CODE:
u_int32_t next4Bytes = get4Bytes();
while (next4Bytes != VOP_START_CODE) {
saveToNextCode(next4Bytes);
}
// Compute this frame's presentation time:
usingSource()->computePresentationTime(fTotalTicksSinceLastTimeCode);
// Record the time code:
usingSource()->setTimeCode(time_code_hours, time_code_minutes,
time_code_seconds, 0, 0);
// Note: Because the GOV header can appear anywhere (not just at a 1s point), we
// don't pass "fTotalTicksSinceLastTimeCode" as the "picturesSinceLastGOP" parameter.
fSecondsSinceLastTimeCode = 0;
if (fixed_vop_rate) fTotalTicksSinceLastTimeCode = 0;
setParseState(PARSING_VIDEO_OBJECT_PLANE);
return curFrameSize();
}
unsigned MPEG4VideoStreamParser::parseVideoObjectPlane() {
#ifdef DEBUG
fprintf(stderr, "#parsing VideoObjectPlane\n");
#endif
// Note that we've already read the VOP_START_CODE
save4Bytes(VOP_START_CODE);
// Get the "vop_coding_type" from the next byte:
u_int8_t nextByte = get1Byte(); saveByte(nextByte);
u_int8_t vop_coding_type = nextByte>>6;
// Next, get the "modulo_time_base" by counting the '1' bits that follow.
// We look at the next 32-bits only. This should be enough in most cases.
u_int32_t next4Bytes = get4Bytes();
u_int32_t timeInfo = (nextByte<<(32-6))|(next4Bytes>>6);
unsigned modulo_time_base = 0;
u_int32_t mask = 0x80000000;
while ((timeInfo&mask) != 0) {
++modulo_time_base;
mask >>= 1;
}
mask >>= 1;
// Check the following marker bit:
if ((timeInfo&mask) == 0) {
usingSource()->envir() << "MPEG4VideoStreamParser::parseVideoObjectPlane(): marker bit not set!\n";
}
mask >>= 1;
// Then, get the "vop_time_increment".
// First, make sure we have enough bits left for this:
if ((mask>>(fNumVTIRBits-1)) == 0) {
usingSource()->envir() << "MPEG4VideoStreamParser::parseVideoObjectPlane(): 32-bits are not enough to get \"vop_time_increment\"!\n";
}
unsigned vop_time_increment = 0;
for (unsigned i = 0; i < fNumVTIRBits; ++i) {
vop_time_increment |= timeInfo&mask;
mask >>= 1;
}
while (mask != 0) {
vop_time_increment >>= 1;
mask >>= 1;
}
#ifdef DEBUG
fprintf(stderr, "vop_coding_type: %d(%c), modulo_time_base: %d, vop_time_increment: %d\n", vop_coding_type, "IPBS"[vop_coding_type], modulo_time_base, vop_time_increment);
#endif
// Now, copy all bytes that we see, up until we reach a code of some sort:
saveToNextCode(next4Bytes);
// Update our counters based on the frame timing information that we saw:
if (fixed_vop_time_increment > 0) {
// This is a 'fixed_vop_rate' stream. Use 'fixed_vop_time_increment':
usingSource()->fPictureCount += fixed_vop_time_increment;
if (vop_time_increment > 0 || modulo_time_base > 0) {
fTotalTicksSinceLastTimeCode += fixed_vop_time_increment;
// Note: "fSecondsSinceLastTimeCode" and "fPrevNewTotalTicks" are not used.
}
} else {
// Use 'vop_time_increment':
unsigned newTotalTicks
= (fSecondsSinceLastTimeCode + modulo_time_base)*vop_time_increment_resolution
+ vop_time_increment;
if (newTotalTicks == fPrevNewTotalTicks && fPrevNewTotalTicks > 0) {
// This is apparently a buggy MPEG-4 video stream, because
// "vop_time_increment" did not change. Overcome this error,
// by pretending that it did change.
#ifdef DEBUG
fprintf(stderr, "Buggy MPEG-4 video stream: \"vop_time_increment\" did not change!\n");
#endif
// The following assumes that we don't have 'B' frames. If we do, then TARFU!
usingSource()->fPictureCount += vop_time_increment;
fTotalTicksSinceLastTimeCode += vop_time_increment;
fSecondsSinceLastTimeCode += modulo_time_base;
} else {
if (newTotalTicks < fPrevNewTotalTicks && vop_coding_type != 2/*B*/
&& modulo_time_base == 0 && vop_time_increment == 0 && !fJustSawTimeCode) {
// This is another kind of buggy MPEG-4 video stream, in which
// "vop_time_increment" wraps around, but without
// "modulo_time_base" changing (or just having had a new time code).
// Overcome this by pretending that "vop_time_increment" *did* wrap around:
#ifdef DEBUG
fprintf(stderr, "Buggy MPEG-4 video stream: \"vop_time_increment\" wrapped around, but without \"modulo_time_base\" changing!\n");
#endif
++fSecondsSinceLastTimeCode;
newTotalTicks += vop_time_increment_resolution;
}
fPrevNewTotalTicks = newTotalTicks;
if (vop_coding_type != 2/*B*/) {
int pictureCountDelta = newTotalTicks - fTotalTicksSinceLastTimeCode;
if (pictureCountDelta <= 0) pictureCountDelta = fPrevPictureCountDelta;
// ensures that the picture count is always increasing
usingSource()->fPictureCount += pictureCountDelta;
fPrevPictureCountDelta = pictureCountDelta;
fTotalTicksSinceLastTimeCode = newTotalTicks;
fSecondsSinceLastTimeCode += modulo_time_base;
}
}
}
fJustSawTimeCode = False; // for next time
// The next thing to parse depends on the code that we just saw,
// but we are assumed to have ended the current picture:
usingSource()->fPictureEndMarker = True; // HACK #####
switch (next4Bytes) {
case VISUAL_OBJECT_SEQUENCE_END_CODE: {
setParseState(PARSING_VISUAL_OBJECT_SEQUENCE_END_CODE);
break;
}
case VISUAL_OBJECT_SEQUENCE_START_CODE: {
setParseState(PARSING_VISUAL_OBJECT_SEQUENCE_SEEN_CODE);
break;
}
case VISUAL_OBJECT_START_CODE: {
setParseState(PARSING_VISUAL_OBJECT);
break;
}
case GROUP_VOP_START_CODE: {
setParseState(PARSING_GROUP_OF_VIDEO_OBJECT_PLANE);
break;
}
case VOP_START_CODE: {
setParseState(PARSING_VIDEO_OBJECT_PLANE);
break;
}
default: {
if (isVideoObjectStartCode(next4Bytes)) {
setParseState(PARSING_VIDEO_OBJECT_LAYER);
} else if (isVideoObjectLayerStartCode(next4Bytes)){
// copy all bytes that we see, up until we reach a VOP_START_CODE:
u_int32_t next4Bytes = get4Bytes();
while (next4Bytes != VOP_START_CODE) {
saveToNextCode(next4Bytes);
}
setParseState(PARSING_VIDEO_OBJECT_PLANE);
} else {
usingSource()->envir() << "MPEG4VideoStreamParser::parseVideoObjectPlane(): Saw unexpected code "
<< (void*)next4Bytes << "\n";
setParseState(PARSING_VIDEO_OBJECT_PLANE); // the safest way to recover...
}
break;
}
}
// Compute this frame's presentation time:
usingSource()->computePresentationTime(fTotalTicksSinceLastTimeCode);
return curFrameSize();
}
unsigned MPEG4VideoStreamParser::parseVisualObjectSequenceEndCode() {
#ifdef DEBUG
fprintf(stderr, "parsing VISUAL_OBJECT_SEQUENCE_END_CODE\n");
#endif
// Note that we've already read the VISUAL_OBJECT_SEQUENCE_END_CODE
save4Bytes(VISUAL_OBJECT_SEQUENCE_END_CODE);
setParseState(PARSING_VISUAL_OBJECT_SEQUENCE);
// Treat this as if we had ended a picture:
usingSource()->fPictureEndMarker = True; // HACK #####
return curFrameSize();
}
live/liveMedia/WAVAudioFileSource.cpp 000444 001751 000000 00000027363 12265042432 017755 0 ustar 00rsf wheel 000000 000000 /**********
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the
Free Software Foundation; either version 2.1 of the License, or (at your
option) any later version. (See .)
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
more details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
**********/
// "liveMedia"
// Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved.
// A WAV audio file source
// Implementation
#include "WAVAudioFileSource.hh"
#include "InputFile.hh"
#include "GroupsockHelper.hh"
////////// WAVAudioFileSource //////////
WAVAudioFileSource*
WAVAudioFileSource::createNew(UsageEnvironment& env, char const* fileName) {
do {
FILE* fid = OpenInputFile(env, fileName);
if (fid == NULL) break;
WAVAudioFileSource* newSource = new WAVAudioFileSource(env, fid);
if (newSource != NULL && newSource->bitsPerSample() == 0) {
// The WAV file header was apparently invalid.
Medium::close(newSource);
break;
}
newSource->fFileSize = (unsigned)GetFileSize(fileName, fid);
return newSource;
} while (0);
return NULL;
}
unsigned WAVAudioFileSource::numPCMBytes() const {
if (fFileSize < fWAVHeaderSize) return 0;
return fFileSize - fWAVHeaderSize;
}
void WAVAudioFileSource::setScaleFactor(int scale) {
if (!fFidIsSeekable) return; // we can't do 'trick play' operations on non-seekable files
fScaleFactor = scale;
if (fScaleFactor < 0 && TellFile64(fFid) > 0) {
// Because we're reading backwards, seek back one sample, to ensure that
// (i) we start reading the last sample before the start point, and
// (ii) we don't hit end-of-file on the first read.
int bytesPerSample = (fNumChannels*fBitsPerSample)/8;
if (bytesPerSample == 0) bytesPerSample = 1;
SeekFile64(fFid, -bytesPerSample, SEEK_CUR);
}
}
void WAVAudioFileSource::seekToPCMByte(unsigned byteNumber, unsigned numBytesToStream) {
byteNumber += fWAVHeaderSize;
if (byteNumber > fFileSize) byteNumber = fFileSize;
SeekFile64(fFid, byteNumber, SEEK_SET);
fNumBytesToStream = numBytesToStream;
fLimitNumBytesToStream = fNumBytesToStream > 0;
}
unsigned char WAVAudioFileSource::getAudioFormat() {
return fAudioFormat;
}
#define nextc fgetc(fid)
static Boolean get4Bytes(FILE* fid, u_int32_t& result) { // little-endian
int c0, c1, c2, c3;
if ((c0 = nextc) == EOF || (c1 = nextc) == EOF ||
(c2 = nextc) == EOF || (c3 = nextc) == EOF) return False;
result = (c3<<24)|(c2<<16)|(c1<<8)|c0;
return True;
}
static Boolean get2Bytes(FILE* fid, u_int16_t& result) {//little-endian
int c0, c1;
if ((c0 = nextc) == EOF || (c1 = nextc) == EOF) return False;
result = (c1<<8)|c0;
return True;
}
static Boolean skipBytes(FILE* fid, int num) {
while (num-- > 0) {
if (nextc == EOF) return False;
}
return True;
}
WAVAudioFileSource::WAVAudioFileSource(UsageEnvironment& env, FILE* fid)
: AudioInputDevice(env, 0, 0, 0, 0)/* set the real parameters later */,
fFid(fid), fFidIsSeekable(False), fLastPlayTime(0), fHaveStartedReading(False), fWAVHeaderSize(0), fFileSize(0),
fScaleFactor(1), fLimitNumBytesToStream(False), fNumBytesToStream(0), fAudioFormat(WA_UNKNOWN) {
// Check the WAV file header for validity.
// Note: The following web pages contain info about the WAV format:
// http://www.ringthis.com/dev/wave_format.htm
// http://www.lightlink.com/tjweber/StripWav/Canon.html
// http://www.onicos.com/staff/iz/formats/wav.html
Boolean success = False; // until we learn otherwise
do {
// RIFF Chunk:
if (nextc != 'R' || nextc != 'I' || nextc != 'F' || nextc != 'F') break;
if (!skipBytes(fid, 4)) break;
if (nextc != 'W' || nextc != 'A' || nextc != 'V' || nextc != 'E') break;
// Skip over any chunk that's not a FORMAT ('fmt ') chunk:
u_int32_t tmp;
if (!get4Bytes(fid, tmp)) break;
if (tmp != 0x20746d66/*'fmt ', little-endian*/) {
// Skip this chunk:
if (!get4Bytes(fid, tmp)) break;
if (!skipBytes(fid, tmp)) break;
}
// FORMAT Chunk (the 4-byte header code has already been parsed):
unsigned formatLength;
if (!get4Bytes(fid, formatLength)) break;
unsigned short audioFormat;
if (!get2Bytes(fid, audioFormat)) break;
fAudioFormat = (unsigned char)audioFormat;
if (fAudioFormat != WA_PCM && fAudioFormat != WA_PCMA && fAudioFormat != WA_PCMU && fAudioFormat != WA_IMA_ADPCM) {
// It's a format that we don't (yet) understand
env.setResultMsg("Audio format is not one that we handle (PCM/PCMU/PCMA or IMA ADPCM)");
break;
}
unsigned short numChannels;
if (!get2Bytes(fid, numChannels)) break;
fNumChannels = (unsigned char)numChannels;
if (fNumChannels < 1 || fNumChannels > 2) { // invalid # channels
char errMsg[100];
sprintf(errMsg, "Bad # channels: %d", fNumChannels);
env.setResultMsg(errMsg);
break;
}
if (!get4Bytes(fid, fSamplingFrequency)) break;
if (fSamplingFrequency == 0) {
env.setResultMsg("Bad sampling frequency: 0");
break;
}
if (!skipBytes(fid, 6)) break; // "nAvgBytesPerSec" (4 bytes) + "nBlockAlign" (2 bytes)
unsigned short bitsPerSample;
if (!get2Bytes(fid, bitsPerSample)) break;
fBitsPerSample = (unsigned char)bitsPerSample;
if (fBitsPerSample == 0) {
env.setResultMsg("Bad bits-per-sample: 0");
break;
}
if (!skipBytes(fid, formatLength - 16)) break;
// FACT chunk (optional):
int c = nextc;
if (c == 'f') {
if (nextc != 'a' || nextc != 'c' || nextc != 't') break;
unsigned factLength;
if (!get4Bytes(fid, factLength)) break;
if (!skipBytes(fid, factLength)) break;
c = nextc;
}
// DATA Chunk:
if (c != 'd' || nextc != 'a' || nextc != 't' || nextc != 'a') break;
if (!skipBytes(fid, 4)) break;
// The header is good; the remaining data are the sample bytes.
fWAVHeaderSize = (unsigned)TellFile64(fid);
success = True;
} while (0);
if (!success) {
env.setResultMsg("Bad WAV file format");
// Set "fBitsPerSample" to zero, to indicate failure:
fBitsPerSample = 0;
return;
}
fPlayTimePerSample = 1e6/(double)fSamplingFrequency;
// Although PCM is a sample-based format, we group samples into
// 'frames' for efficient delivery to clients. Set up our preferred
// frame size to be close to 20 ms, if possible, but always no greater
// than 1400 bytes (to ensure that it will fit in a single RTP packet)
unsigned maxSamplesPerFrame = (1400*8)/(fNumChannels*fBitsPerSample);
unsigned desiredSamplesPerFrame = (unsigned)(0.02*fSamplingFrequency);
unsigned samplesPerFrame = desiredSamplesPerFrame < maxSamplesPerFrame ? desiredSamplesPerFrame : maxSamplesPerFrame;
fPreferredFrameSize = (samplesPerFrame*fNumChannels*fBitsPerSample)/8;
fFidIsSeekable = FileIsSeekable(fFid);
#ifndef READ_FROM_FILES_SYNCHRONOUSLY
// Now that we've finished reading the WAV header, all future reads (of audio samples) from the file will be asynchronous:
makeSocketNonBlocking(fileno(fFid));
#endif
}
WAVAudioFileSource::~WAVAudioFileSource() {
if (fFid == NULL) return;
#ifndef READ_FROM_FILES_SYNCHRONOUSLY
envir().taskScheduler().turnOffBackgroundReadHandling(fileno(fFid));
#endif
CloseInputFile(fFid);
}
void WAVAudioFileSource::doGetNextFrame() {
if (feof(fFid) || ferror(fFid) || (fLimitNumBytesToStream && fNumBytesToStream == 0)) {
handleClosure(this);
return;
}
fFrameSize = 0; // until it's set later
#ifdef READ_FROM_FILES_SYNCHRONOUSLY
doReadFromFile();
#else
if (!fHaveStartedReading) {
// Await readable data from the file:
envir().taskScheduler().turnOnBackgroundReadHandling(fileno(fFid),
(TaskScheduler::BackgroundHandlerProc*)&fileReadableHandler, this);
fHaveStartedReading = True;
}
#endif
}
void WAVAudioFileSource::doStopGettingFrames() {
#ifndef READ_FROM_FILES_SYNCHRONOUSLY
envir().taskScheduler().turnOffBackgroundReadHandling(fileno(fFid));
fHaveStartedReading = False;
#endif
}
void WAVAudioFileSource::fileReadableHandler(WAVAudioFileSource* source, int /*mask*/) {
if (!source->isCurrentlyAwaitingData()) {
source->doStopGettingFrames(); // we're not ready for the data yet
return;
}
source->doReadFromFile();
}
void WAVAudioFileSource::doReadFromFile() {
// Try to read as many bytes as will fit in the buffer provided (or "fPreferredFrameSize" if less)
if (fLimitNumBytesToStream && fNumBytesToStream < fMaxSize) {
fMaxSize = fNumBytesToStream;
}
if (fPreferredFrameSize < fMaxSize) {
fMaxSize = fPreferredFrameSize;
}
unsigned bytesPerSample = (fNumChannels*fBitsPerSample)/8;
if (bytesPerSample == 0) bytesPerSample = 1; // because we can't read less than a byte at a time
// For 'trick play', read one sample at a time; otherwise (normal case) read samples in bulk:
unsigned bytesToRead = fScaleFactor == 1 ? fMaxSize - fMaxSize%bytesPerSample : bytesPerSample;
unsigned numBytesRead;
while (1) { // loop for 'trick play' only
#ifdef READ_FROM_FILES_SYNCHRONOUSLY
numBytesRead = fread(fTo, 1, bytesToRead, fFid);
#else
if (fFidIsSeekable) {
numBytesRead = fread(fTo, 1, bytesToRead, fFid);
} else {
// For non-seekable files (e.g., pipes), call "read()" rather than "fread()", to ensure that the read doesn't block:
numBytesRead = read(fileno(fFid), fTo, bytesToRead);
}
#endif
if (numBytesRead == 0) {
handleClosure(this);
return;
}
fFrameSize += numBytesRead;
fTo += numBytesRead;
fMaxSize -= numBytesRead;
fNumBytesToStream -= numBytesRead;
// If we did an asynchronous read, and didn't read an integral number of samples, then we need to wait for another read:
#ifndef READ_FROM_FILES_SYNCHRONOUSLY
if (fFrameSize%bytesPerSample > 0) return;
#endif
// If we're doing 'trick play', then seek to the appropriate place for reading the next sample,
// and keep reading until we fill the provided buffer:
if (fScaleFactor != 1) {
SeekFile64(fFid, (fScaleFactor-1)*bytesPerSample, SEEK_CUR);
if (fMaxSize < bytesPerSample) break;
} else {
break; // from the loop (normal case)
}
}
// Set the 'presentation time' and 'duration' of this frame:
if (fPresentationTime.tv_sec == 0 && fPresentationTime.tv_usec == 0) {
// This is the first frame, so use the current time:
gettimeofday(&fPresentationTime, NULL);
} else {
// Increment by the play time of the previous data:
unsigned uSeconds = fPresentationTime.tv_usec + fLastPlayTime;
fPresentationTime.tv_sec += uSeconds/1000000;
fPresentationTime.tv_usec = uSeconds%1000000;
}
// Remember the play time of this data:
fDurationInMicroseconds = fLastPlayTime
= (unsigned)((fPlayTimePerSample*fFrameSize)/bytesPerSample);
// Inform the reader that he has data:
#ifdef READ_FROM_FILES_SYNCHRONOUSLY
// To avoid possible infinite recursion, we need to return to the event loop to do this:
nextTask() = envir().taskScheduler().scheduleDelayedTask(0,
(TaskFunc*)FramedSource::afterGetting, this);
#else
// Because the file read was done from the event loop, we can call the
// 'after getting' function directly, without risk of infinite recursion:
FramedSource::afterGetting(this);
#endif
}
Boolean WAVAudioFileSource::setInputPort(int /*portIndex*/) {
return True;
}
double WAVAudioFileSource::getAverageLevel() const {
return 0.0;//##### fix this later
}
live/liveMedia/MPEG1or2AudioRTPSource.cpp 000444 001751 000000 00000004323 12265042432 020331 0 ustar 00rsf wheel 000000 000000 /**********
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the
Free Software Foundation; either version 2.1 of the License, or (at your
option) any later version. (See .)
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
more details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
**********/
// "liveMedia"
// Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved.
// MPEG-1 or MPEG-2 Audio RTP Sources
// Implementation
#include "MPEG1or2AudioRTPSource.hh"
MPEG1or2AudioRTPSource*
MPEG1or2AudioRTPSource::createNew(UsageEnvironment& env,
Groupsock* RTPgs,
unsigned char rtpPayloadFormat,
unsigned rtpTimestampFrequency) {
return new MPEG1or2AudioRTPSource(env, RTPgs, rtpPayloadFormat,
rtpTimestampFrequency);
}
MPEG1or2AudioRTPSource::MPEG1or2AudioRTPSource(UsageEnvironment& env,
Groupsock* rtpGS,
unsigned char rtpPayloadFormat,
unsigned rtpTimestampFrequency)
: MultiFramedRTPSource(env, rtpGS,
rtpPayloadFormat, rtpTimestampFrequency) {
}
MPEG1or2AudioRTPSource::~MPEG1or2AudioRTPSource() {
}
Boolean MPEG1or2AudioRTPSource
::processSpecialHeader(BufferedPacket* packet,
unsigned& resultSpecialHeaderSize) {
// There's a 4-byte header indicating fragmentation.
if (packet->dataSize() < 4) return False;
// Note: This fragmentation header is actually useless to us, because
// it doesn't tell us whether or not this RTP packet *ends* a
// fragmented frame. Thus, we can't use it to properly set
// "fCurrentPacketCompletesFrame". Instead, we assume that even
// a partial audio frame will be usable to clients.
resultSpecialHeaderSize = 4;
return True;
}
char const* MPEG1or2AudioRTPSource::MIMEtype() const {
return "audio/MPEG";
}
live/liveMedia/MP3StreamState.hh 000444 001751 000000 00000005427 12265042432 016743 0 ustar 00rsf wheel 000000 000000 /**********
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the
Free Software Foundation; either version 2.1 of the License, or (at your
option) any later version. (See .)
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
more details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
**********/
// "liveMedia"
// Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved.
// A class encapsulating the state of a MP3 stream
// C++ header
#ifndef _MP3_STREAM_STATE_HH
#define _MP3_STREAM_STATE_HH
#ifndef _USAGE_ENVIRONMENT_HH
#include "UsageEnvironment.hh"
#endif
#ifndef _BOOLEAN_HH
#include "Boolean.hh"
#endif
#ifndef _MP3_INTERNALS_HH
#include "MP3Internals.hh"
#endif
#ifndef _NET_COMMON_H
#include "NetCommon.h"
#endif
#include
#define XING_TOC_LENGTH 100
class MP3StreamState {
public:
MP3StreamState(UsageEnvironment& env);
virtual ~MP3StreamState();
void assignStream(FILE* fid, unsigned fileSize);
unsigned findNextHeader(struct timeval& presentationTime);
Boolean readFrame(unsigned char* outBuf, unsigned outBufSize,
unsigned& resultFrameSize,
unsigned& resultDurationInMicroseconds);
// called after findNextHeader()
void getAttributes(char* buffer, unsigned bufferSize) const;
float filePlayTime() const; // in seconds
unsigned fileSize() const { return fFileSize; }
void setPresentationTimeScale(unsigned scale) { fPresentationTimeScale = scale; }
unsigned getByteNumberFromPositionFraction(float fraction); // 0.0 <= fraction <= 1.0
void seekWithinFile(unsigned seekByteNumber);
void checkForXingHeader(); // hack for Xing VBR files
protected: // private->protected requested by Pierre l'Hussiez
unsigned readFromStream(unsigned char* buf, unsigned numChars);
private:
MP3FrameParams& fr() {return fCurrentFrame;}
MP3FrameParams const& fr() const {return fCurrentFrame;}
struct timeval currentFramePlayTime() const;
Boolean findNextFrame();
private:
UsageEnvironment& fEnv;
FILE* fFid;
Boolean fFidIsReallyASocket;
unsigned fFileSize;
unsigned fNumFramesInFile;
unsigned fPresentationTimeScale;
// used if we're streaming at other than the normal rate
Boolean fIsVBR, fHasXingTOC;
u_int8_t fXingTOC[XING_TOC_LENGTH]; // set iff "fHasXingTOC" is True
MP3FrameParams fCurrentFrame;
struct timeval fNextFramePresentationTime;
};
#endif
live/liveMedia/H263plusVideoRTPSink.cpp 000444 001751 000000 00000006502 12265042432 020075 0 ustar 00rsf wheel 000000 000000 /**********
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the
Free Software Foundation; either version 2.1 of the License, or (at your
option) any later version. (See .)
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
more details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
**********/
// "liveMedia"
// Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved.
// RTP sink for H.263+ video (RFC 4629)
// Implementation
#include "H263plusVideoRTPSink.hh"
H263plusVideoRTPSink
::H263plusVideoRTPSink(UsageEnvironment& env, Groupsock* RTPgs,
unsigned char rtpPayloadFormat,
u_int32_t rtpTimestampFrequency)
: VideoRTPSink(env, RTPgs, rtpPayloadFormat, rtpTimestampFrequency, "H263-1998") {
}
H263plusVideoRTPSink::~H263plusVideoRTPSink() {
}
H263plusVideoRTPSink*
H263plusVideoRTPSink::createNew(UsageEnvironment& env, Groupsock* RTPgs,
unsigned char rtpPayloadFormat,
u_int32_t rtpTimestampFrequency) {
return new H263plusVideoRTPSink(env, RTPgs, rtpPayloadFormat, rtpTimestampFrequency);
}
Boolean H263plusVideoRTPSink
::frameCanAppearAfterPacketStart(unsigned char const* /*frameStart*/,
unsigned /*numBytesInFrame*/) const {
// A packet can contain only one frame
return False;
}
void H263plusVideoRTPSink
::doSpecialFrameHandling(unsigned fragmentationOffset,
unsigned char* frameStart,
unsigned numBytesInFrame,
struct timeval framePresentationTime,
unsigned numRemainingBytes) {
if (fragmentationOffset == 0) {
// This packet contains the first (or only) fragment of the frame.
// Set the 'P' bit in the special header:
unsigned short specialHeader = 0x0400;
// Also, reuse the first two bytes of the payload for this special
// header. (They should both have been zero.)
if (numBytesInFrame < 2) {
envir() << "H263plusVideoRTPSink::doSpecialFrameHandling(): bad frame size "
<< numBytesInFrame << "\n";
return;
}
if (frameStart[0] != 0 || frameStart[1] != 0) {
envir() << "H263plusVideoRTPSink::doSpecialFrameHandling(): unexpected non-zero first two bytes: "
<< (void*)(frameStart[0]) << "," << (void*)(frameStart[1]) << "\n";
}
frameStart[0] = specialHeader>>8;
frameStart[1] = (unsigned char)specialHeader;
} else {
unsigned short specialHeader = 0;
setSpecialHeaderBytes((unsigned char*)&specialHeader, 2);
}
if (numRemainingBytes == 0) {
// This packet contains the last (or only) fragment of the frame.
// Set the RTP 'M' ('marker') bit:
setMarkerBit();
}
// Also set the RTP timestamp:
setTimestamp(framePresentationTime);
}
unsigned H263plusVideoRTPSink::specialHeaderSize() const {
// There's a 2-byte special video header. However, if we're the first
// (or only) fragment of a frame, then we reuse the first 2 bytes of
// the payload instead.
return (curFragmentationOffset() == 0) ? 0 : 2;
}
live/liveMedia/MPEG1or2DemuxedElementaryStream.cpp 000444 001751 000000 00000006145 12265042432 022322 0 ustar 00rsf wheel 000000 000000 /**********
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the
Free Software Foundation; either version 2.1 of the License, or (at your
option) any later version. (See .)
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
more details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
**********/
// "liveMedia"
// Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved.
// A MPEG 1 or 2 Elementary Stream, demultiplexed from a Program Stream
// Implementation
#include "MPEG1or2DemuxedElementaryStream.hh"
////////// MPEG1or2DemuxedElementaryStream //////////
MPEG1or2DemuxedElementaryStream::
MPEG1or2DemuxedElementaryStream(UsageEnvironment& env, u_int8_t streamIdTag,
MPEG1or2Demux& sourceDemux)
: FramedSource(env),
fOurStreamIdTag(streamIdTag), fOurSourceDemux(sourceDemux), fMPEGversion(0) {
// Set our MIME type string for known media types:
if ((streamIdTag&0xE0) == 0xC0) {
fMIMEtype = "audio/MPEG";
} else if ((streamIdTag&0xF0) == 0xE0) {
fMIMEtype = "video/MPEG";
} else {
fMIMEtype = MediaSource::MIMEtype();
}
}
MPEG1or2DemuxedElementaryStream::~MPEG1or2DemuxedElementaryStream() {
fOurSourceDemux.noteElementaryStreamDeletion(this);
}
void MPEG1or2DemuxedElementaryStream::doGetNextFrame() {
fOurSourceDemux.getNextFrame(fOurStreamIdTag, fTo, fMaxSize,
afterGettingFrame, this,
handleClosure, this);
}
void MPEG1or2DemuxedElementaryStream::doStopGettingFrames() {
fOurSourceDemux.stopGettingFrames(fOurStreamIdTag);
}
char const* MPEG1or2DemuxedElementaryStream::MIMEtype() const {
return fMIMEtype;
}
unsigned MPEG1or2DemuxedElementaryStream::maxFrameSize() const {
return 6+65535;
// because the MPEG spec allows for PES packets as large as
// (6 + 65535) bytes (header + data)
}
void MPEG1or2DemuxedElementaryStream
::afterGettingFrame(void* clientData,
unsigned frameSize, unsigned numTruncatedBytes,
struct timeval presentationTime,
unsigned durationInMicroseconds) {
MPEG1or2DemuxedElementaryStream* stream
= (MPEG1or2DemuxedElementaryStream*)clientData;
stream->afterGettingFrame1(frameSize, numTruncatedBytes,
presentationTime, durationInMicroseconds);
}
void MPEG1or2DemuxedElementaryStream
::afterGettingFrame1(unsigned frameSize, unsigned numTruncatedBytes,
struct timeval presentationTime,
unsigned durationInMicroseconds) {
fFrameSize = frameSize;
fNumTruncatedBytes = numTruncatedBytes;
fPresentationTime = presentationTime;
fDurationInMicroseconds = durationInMicroseconds;
fLastSeenSCR = fOurSourceDemux.lastSeenSCR();
fMPEGversion = fOurSourceDemux.mpegVersion();
FramedSource::afterGetting(this);
}
live/liveMedia/MPEG1or2VideoRTPSource.cpp 000444 001751 000000 00000005414 12265042432 020340 0 ustar 00rsf wheel 000000 000000 /**********
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the
Free Software Foundation; either version 2.1 of the License, or (at your
option) any later version. (See .)
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
more details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
**********/
// "liveMedia"
// Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved.
// MPEG-1 or MPEG-2 Video RTP Sources
// Implementation
#include "MPEG1or2VideoRTPSource.hh"
MPEG1or2VideoRTPSource*
MPEG1or2VideoRTPSource::createNew(UsageEnvironment& env, Groupsock* RTPgs,
unsigned char rtpPayloadFormat,
unsigned rtpTimestampFrequency) {
return new MPEG1or2VideoRTPSource(env, RTPgs, rtpPayloadFormat,
rtpTimestampFrequency);
}
MPEG1or2VideoRTPSource::MPEG1or2VideoRTPSource(UsageEnvironment& env,
Groupsock* RTPgs,
unsigned char rtpPayloadFormat,
unsigned rtpTimestampFrequency)
: MultiFramedRTPSource(env, RTPgs,
rtpPayloadFormat, rtpTimestampFrequency){
}
MPEG1or2VideoRTPSource::~MPEG1or2VideoRTPSource() {
}
Boolean MPEG1or2VideoRTPSource
::processSpecialHeader(BufferedPacket* packet,
unsigned& resultSpecialHeaderSize) {
// There's a 4-byte video-specific header
if (packet->dataSize() < 4) return False;
u_int32_t header = ntohl(*(u_int32_t*)(packet->data()));
u_int32_t sBit = header&0x00002000; // sequence-header-present
u_int32_t bBit = header&0x00001000; // beginning-of-slice
u_int32_t eBit = header&0x00000800; // end-of-slice
fCurrentPacketBeginsFrame = (sBit|bBit) != 0;
fCurrentPacketCompletesFrame = ((sBit != 0) && (bBit == 0)) || (eBit != 0);
resultSpecialHeaderSize = 4;
return True;
}
Boolean MPEG1or2VideoRTPSource
::packetIsUsableInJitterCalculation(unsigned char* packet,
unsigned packetSize) {
// There's a 4-byte video-specific header
if (packetSize < 4) return False;
// Extract the "Picture-Type" field from this, to determine whether
// this packet can be used in jitter calculations:
unsigned header = ntohl(*(u_int32_t*)packet);
unsigned short pictureType = (header>>8)&0x7;
if (pictureType == 1) { // an I frame
return True;
} else { // a P, B, D, or other unknown frame type
return False;
}
}
char const* MPEG1or2VideoRTPSource::MIMEtype() const {
return "video/MPEG";
}
live/liveMedia/MPEG1or2VideoStreamFramer.cpp 000444 001751 000000 00000036613 12265042432 021107 0 ustar 00rsf wheel 000000 000000 /**********
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the
Free Software Foundation; either version 2.1 of the License, or (at your
option) any later version. (See .)
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
more details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
**********/
// "liveMedia"
// Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved.
// A filter that breaks up an MPEG 1 or 2 video elementary stream into
// frames for: Video_Sequence_Header, GOP_Header, Picture_Header
// Implementation
#include "MPEG1or2VideoStreamFramer.hh"
#include "MPEGVideoStreamParser.hh"
#include
////////// MPEG1or2VideoStreamParser definition //////////
// An enum representing the current state of the parser:
enum MPEGParseState {
PARSING_VIDEO_SEQUENCE_HEADER,
PARSING_VIDEO_SEQUENCE_HEADER_SEEN_CODE,
PARSING_GOP_HEADER,
PARSING_GOP_HEADER_SEEN_CODE,
PARSING_PICTURE_HEADER,
PARSING_SLICE
};
#define VSH_MAX_SIZE 1000
class MPEG1or2VideoStreamParser: public MPEGVideoStreamParser {
public:
MPEG1or2VideoStreamParser(MPEG1or2VideoStreamFramer* usingSource,
FramedSource* inputSource,
Boolean iFramesOnly, double vshPeriod);
virtual ~MPEG1or2VideoStreamParser();
private: // redefined virtual functions:
virtual void flushInput();
virtual unsigned parse();
private:
void reset();
MPEG1or2VideoStreamFramer* usingSource() {
return (MPEG1or2VideoStreamFramer*)fUsingSource;
}
void setParseState(MPEGParseState parseState);
unsigned parseVideoSequenceHeader(Boolean haveSeenStartCode);
unsigned parseGOPHeader(Boolean haveSeenStartCode);
unsigned parsePictureHeader();
unsigned parseSlice();
private:
MPEGParseState fCurrentParseState;
unsigned fPicturesSinceLastGOP;
// can be used to compute timestamp for a video_sequence_header
unsigned short fCurPicTemporalReference;
// used to compute slice timestamp
unsigned char fCurrentSliceNumber; // set when parsing a slice
// A saved copy of the most recently seen 'video_sequence_header',
// in case we need to insert it into the stream periodically:
unsigned char fSavedVSHBuffer[VSH_MAX_SIZE];
unsigned fSavedVSHSize;
double fSavedVSHTimestamp;
double fVSHPeriod;
Boolean fIFramesOnly, fSkippingCurrentPicture;
void saveCurrentVSH();
Boolean needToUseSavedVSH();
unsigned useSavedVSH(); // returns the size of the saved VSH
};
////////// MPEG1or2VideoStreamFramer implementation //////////
MPEG1or2VideoStreamFramer::MPEG1or2VideoStreamFramer(UsageEnvironment& env,
FramedSource* inputSource,
Boolean iFramesOnly,
double vshPeriod,
Boolean createParser)
: MPEGVideoStreamFramer(env, inputSource) {
fParser = createParser
? new MPEG1or2VideoStreamParser(this, inputSource,
iFramesOnly, vshPeriod)
: NULL;
}
MPEG1or2VideoStreamFramer::~MPEG1or2VideoStreamFramer() {
}
MPEG1or2VideoStreamFramer*
MPEG1or2VideoStreamFramer::createNew(UsageEnvironment& env,
FramedSource* inputSource,
Boolean iFramesOnly,
double vshPeriod) {
// Need to add source type checking here??? #####
return new MPEG1or2VideoStreamFramer(env, inputSource, iFramesOnly, vshPeriod);
}
double MPEG1or2VideoStreamFramer::getCurrentPTS() const {
return fPresentationTime.tv_sec + fPresentationTime.tv_usec/1000000.0;
}
Boolean MPEG1or2VideoStreamFramer::isMPEG1or2VideoStreamFramer() const {
return True;
}
////////// MPEG1or2VideoStreamParser implementation //////////
MPEG1or2VideoStreamParser
::MPEG1or2VideoStreamParser(MPEG1or2VideoStreamFramer* usingSource,
FramedSource* inputSource,
Boolean iFramesOnly, double vshPeriod)
: MPEGVideoStreamParser(usingSource, inputSource),
fCurrentParseState(PARSING_VIDEO_SEQUENCE_HEADER),
fVSHPeriod(vshPeriod), fIFramesOnly(iFramesOnly) {
reset();
}
MPEG1or2VideoStreamParser::~MPEG1or2VideoStreamParser() {
}
void MPEG1or2VideoStreamParser::setParseState(MPEGParseState parseState) {
fCurrentParseState = parseState;
MPEGVideoStreamParser::setParseState();
}
void MPEG1or2VideoStreamParser::reset() {
fPicturesSinceLastGOP = 0;
fCurPicTemporalReference = 0;
fCurrentSliceNumber = 0;
fSavedVSHSize = 0;
fSkippingCurrentPicture = False;
}
void MPEG1or2VideoStreamParser::flushInput() {
reset();
StreamParser::flushInput();
if (fCurrentParseState != PARSING_VIDEO_SEQUENCE_HEADER) {
setParseState(PARSING_GOP_HEADER); // start from the next GOP
}
}
unsigned MPEG1or2VideoStreamParser::parse() {
try {
switch (fCurrentParseState) {
case PARSING_VIDEO_SEQUENCE_HEADER: {
return parseVideoSequenceHeader(False);
}
case PARSING_VIDEO_SEQUENCE_HEADER_SEEN_CODE: {
return parseVideoSequenceHeader(True);
}
case PARSING_GOP_HEADER: {
return parseGOPHeader(False);
}
case PARSING_GOP_HEADER_SEEN_CODE: {
return parseGOPHeader(True);
}
case PARSING_PICTURE_HEADER: {
return parsePictureHeader();
}
case PARSING_SLICE: {
return parseSlice();
}
default: {
return 0; // shouldn't happen
}
}
} catch (int /*e*/) {
#ifdef DEBUG
fprintf(stderr, "MPEG1or2VideoStreamParser::parse() EXCEPTION (This is normal behavior - *not* an error)\n");
#endif
return 0; // the parsing got interrupted
}
}
void MPEG1or2VideoStreamParser::saveCurrentVSH() {
unsigned frameSize = curFrameSize();
if (frameSize > sizeof fSavedVSHBuffer) return; // too big to save
memmove(fSavedVSHBuffer, fStartOfFrame, frameSize);
fSavedVSHSize = frameSize;
fSavedVSHTimestamp = usingSource()->getCurrentPTS();
}
Boolean MPEG1or2VideoStreamParser::needToUseSavedVSH() {
return usingSource()->getCurrentPTS() > fSavedVSHTimestamp+fVSHPeriod
&& fSavedVSHSize > 0;
}
unsigned MPEG1or2VideoStreamParser::useSavedVSH() {
unsigned bytesToUse = fSavedVSHSize;
unsigned maxBytesToUse = fLimit - fStartOfFrame;
if (bytesToUse > maxBytesToUse) bytesToUse = maxBytesToUse;
memmove(fStartOfFrame, fSavedVSHBuffer, bytesToUse);
// Also reset the saved timestamp:
fSavedVSHTimestamp = usingSource()->getCurrentPTS();
#ifdef DEBUG
fprintf(stderr, "used saved video_sequence_header (%d bytes)\n", bytesToUse);
#endif
return bytesToUse;
}
#define VIDEO_SEQUENCE_HEADER_START_CODE 0x000001B3
#define GROUP_START_CODE 0x000001B8
#define PICTURE_START_CODE 0x00000100
#define SEQUENCE_END_CODE 0x000001B7
static double const frameRateFromCode[] = {
0.0, // forbidden
24000/1001.0, // approx 23.976
24.0,
25.0,
30000/1001.0, // approx 29.97
30.0,
50.0,
60000/1001.0, // approx 59.94
60.0,
0.0, // reserved
0.0, // reserved
0.0, // reserved
0.0, // reserved
0.0, // reserved
0.0, // reserved
0.0 // reserved
};
unsigned MPEG1or2VideoStreamParser
::parseVideoSequenceHeader(Boolean haveSeenStartCode) {
#ifdef DEBUG
fprintf(stderr, "parsing video sequence header\n");
#endif
unsigned first4Bytes;
if (!haveSeenStartCode) {
while ((first4Bytes = test4Bytes()) != VIDEO_SEQUENCE_HEADER_START_CODE) {
#ifdef DEBUG
fprintf(stderr, "ignoring non video sequence header: 0x%08x\n", first4Bytes);
#endif
get1Byte(); setParseState(PARSING_VIDEO_SEQUENCE_HEADER);
// ensures we progress over bad data
}
first4Bytes = get4Bytes();
} else {
// We've already seen the start code
first4Bytes = VIDEO_SEQUENCE_HEADER_START_CODE;
}
save4Bytes(first4Bytes);
// Next, extract the size and rate parameters from the next 8 bytes
unsigned paramWord1 = get4Bytes();
save4Bytes(paramWord1);
unsigned next4Bytes = get4Bytes();
#ifdef DEBUG
unsigned short horizontal_size_value = (paramWord1&0xFFF00000)>>(32-12);
unsigned short vertical_size_value = (paramWord1&0x000FFF00)>>8;
unsigned char aspect_ratio_information = (paramWord1&0x000000F0)>>4;
#endif
unsigned char frame_rate_code = (paramWord1&0x0000000F);
usingSource()->fFrameRate = frameRateFromCode[frame_rate_code];
#ifdef DEBUG
unsigned bit_rate_value = (next4Bytes&0xFFFFC000)>>(32-18);
unsigned vbv_buffer_size_value = (next4Bytes&0x00001FF8)>>3;
fprintf(stderr, "horizontal_size_value: %d, vertical_size_value: %d, aspect_ratio_information: %d, frame_rate_code: %d (=>%f fps), bit_rate_value: %d (=>%d bps), vbv_buffer_size_value: %d\n", horizontal_size_value, vertical_size_value, aspect_ratio_information, frame_rate_code, usingSource()->fFrameRate, bit_rate_value, bit_rate_value*400, vbv_buffer_size_value);
#endif
// Now, copy all bytes that we see, up until we reach a GROUP_START_CODE
// or a PICTURE_START_CODE:
do {
saveToNextCode(next4Bytes);
} while (next4Bytes != GROUP_START_CODE && next4Bytes != PICTURE_START_CODE);
setParseState((next4Bytes == GROUP_START_CODE)
? PARSING_GOP_HEADER_SEEN_CODE : PARSING_PICTURE_HEADER);
// Compute this frame's timestamp by noting how many pictures we've seen
// since the last GOP header:
usingSource()->computePresentationTime(fPicturesSinceLastGOP);
// Save this video_sequence_header, in case we need to insert a copy
// into the stream later:
saveCurrentVSH();
return curFrameSize();
}
unsigned MPEG1or2VideoStreamParser::parseGOPHeader(Boolean haveSeenStartCode) {
// First check whether we should insert a previously-saved
// 'video_sequence_header' here:
if (needToUseSavedVSH()) return useSavedVSH();
#ifdef DEBUG
fprintf(stderr, "parsing GOP header\n");
#endif
unsigned first4Bytes;
if (!haveSeenStartCode) {
while ((first4Bytes = test4Bytes()) != GROUP_START_CODE) {
#ifdef DEBUG
fprintf(stderr, "ignoring non GOP start code: 0x%08x\n", first4Bytes);
#endif
get1Byte(); setParseState(PARSING_GOP_HEADER);
// ensures we progress over bad data
}
first4Bytes = get4Bytes();
} else {
// We've already seen the GROUP_START_CODE
first4Bytes = GROUP_START_CODE;
}
save4Bytes(first4Bytes);
// Next, extract the (25-bit) time code from the next 4 bytes:
unsigned next4Bytes = get4Bytes();
unsigned time_code = (next4Bytes&0xFFFFFF80)>>(32-25);
#if defined(DEBUG) || defined(DEBUG_TIMESTAMPS)
Boolean drop_frame_flag = (time_code&0x01000000) != 0;
#endif
unsigned time_code_hours = (time_code&0x00F80000)>>19;
unsigned time_code_minutes = (time_code&0x0007E000)>>13;
unsigned time_code_seconds = (time_code&0x00000FC0)>>6;
unsigned time_code_pictures = (time_code&0x0000003F);
#if defined(DEBUG) || defined(DEBUG_TIMESTAMPS)
fprintf(stderr, "time_code: 0x%07x, drop_frame %d, hours %d, minutes %d, seconds %d, pictures %d\n", time_code, drop_frame_flag, time_code_hours, time_code_minutes, time_code_seconds, time_code_pictures);
#endif
#ifdef DEBUG
Boolean closed_gop = (next4Bytes&0x00000040) != 0;
Boolean broken_link = (next4Bytes&0x00000020) != 0;
fprintf(stderr, "closed_gop: %d, broken_link: %d\n", closed_gop, broken_link);
#endif
// Now, copy all bytes that we see, up until we reach a PICTURE_START_CODE:
do {
saveToNextCode(next4Bytes);
} while (next4Bytes != PICTURE_START_CODE);
// Record the time code:
usingSource()->setTimeCode(time_code_hours, time_code_minutes,
time_code_seconds, time_code_pictures,
fPicturesSinceLastGOP);
fPicturesSinceLastGOP = 0;
// Compute this frame's timestamp:
usingSource()->computePresentationTime(0);
setParseState(PARSING_PICTURE_HEADER);
return curFrameSize();
}
inline Boolean isSliceStartCode(unsigned fourBytes) {
if ((fourBytes&0xFFFFFF00) != 0x00000100) return False;
unsigned char lastByte = fourBytes&0xFF;
return lastByte <= 0xAF && lastByte >= 1;
}
unsigned MPEG1or2VideoStreamParser::parsePictureHeader() {
#ifdef DEBUG
fprintf(stderr, "parsing picture header\n");
#endif
// Note that we've already read the PICTURE_START_CODE
// Next, extract the temporal reference from the next 4 bytes:
unsigned next4Bytes = get4Bytes();
unsigned short temporal_reference = (next4Bytes&0xFFC00000)>>(32-10);
unsigned char picture_coding_type = (next4Bytes&0x00380000)>>19;
#ifdef DEBUG
unsigned short vbv_delay = (next4Bytes&0x0007FFF8)>>3;
fprintf(stderr, "temporal_reference: %d, picture_coding_type: %d, vbv_delay: %d\n", temporal_reference, picture_coding_type, vbv_delay);
#endif
fSkippingCurrentPicture = fIFramesOnly && picture_coding_type != 1;
if (fSkippingCurrentPicture) {
// Skip all bytes that we see, up until we reach a slice_start_code:
do {
skipToNextCode(next4Bytes);
} while (!isSliceStartCode(next4Bytes));
} else {
// Save the PICTURE_START_CODE that we've already read:
save4Bytes(PICTURE_START_CODE);
// Copy all bytes that we see, up until we reach a slice_start_code:
do {
saveToNextCode(next4Bytes);
} while (!isSliceStartCode(next4Bytes));
}
setParseState(PARSING_SLICE);
fCurrentSliceNumber = next4Bytes&0xFF;
// Record the temporal reference:
fCurPicTemporalReference = temporal_reference;
// Compute this frame's timestamp:
usingSource()->computePresentationTime(fCurPicTemporalReference);
if (fSkippingCurrentPicture) {
return parse(); // try again, until we get a non-skipped frame
} else {
return curFrameSize();
}
}
unsigned MPEG1or2VideoStreamParser::parseSlice() {
// Note that we've already read the slice_start_code:
unsigned next4Bytes = PICTURE_START_CODE|fCurrentSliceNumber;
#ifdef DEBUG_SLICE
fprintf(stderr, "parsing slice: 0x%08x\n", next4Bytes);
#endif
if (fSkippingCurrentPicture) {
// Skip all bytes that we see, up until we reach a code of some sort:
skipToNextCode(next4Bytes);
} else {
// Copy all bytes that we see, up until we reach a code of some sort:
saveToNextCode(next4Bytes);
}
// The next thing to parse depends on the code that we just saw:
if (isSliceStartCode(next4Bytes)) { // common case
setParseState(PARSING_SLICE);
fCurrentSliceNumber = next4Bytes&0xFF;
} else {
// Because we don't see any more slices, we are assumed to have ended
// the current picture:
++fPicturesSinceLastGOP;
++usingSource()->fPictureCount;
usingSource()->fPictureEndMarker = True; // HACK #####
switch (next4Bytes) {
case SEQUENCE_END_CODE: {
setParseState(PARSING_VIDEO_SEQUENCE_HEADER);
break;
}
case VIDEO_SEQUENCE_HEADER_START_CODE: {
setParseState(PARSING_VIDEO_SEQUENCE_HEADER_SEEN_CODE);
break;
}
case GROUP_START_CODE: {
setParseState(PARSING_GOP_HEADER_SEEN_CODE);
break;
}
case PICTURE_START_CODE: {
setParseState(PARSING_PICTURE_HEADER);
break;
}
default: {
usingSource()->envir() << "MPEG1or2VideoStreamParser::parseSlice(): Saw unexpected code "
<< (void*)next4Bytes << "\n";
setParseState(PARSING_SLICE); // the safest way to recover...
break;
}
}
}
// Compute this frame's timestamp:
usingSource()->computePresentationTime(fCurPicTemporalReference);
if (fSkippingCurrentPicture) {
return parse(); // try again, until we get a non-skipped frame
} else {
return curFrameSize();
}
}
live/liveMedia/MPEGVideoStreamParser.cpp 000444 001751 000000 00000003215 12265042432 020413 0 ustar 00rsf wheel 000000 000000 /**********
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the
Free Software Foundation; either version 2.1 of the License, or (at your
option) any later version. (See .)
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
more details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
**********/
// "liveMedia"
// Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved.
// An abstract parser for MPEG video streams
// Implementation
#include "MPEGVideoStreamParser.hh"
MPEGVideoStreamParser
::MPEGVideoStreamParser(MPEGVideoStreamFramer* usingSource,
FramedSource* inputSource)
: StreamParser(inputSource, FramedSource::handleClosure, usingSource,
&MPEGVideoStreamFramer::continueReadProcessing, usingSource),
fUsingSource(usingSource) {
}
MPEGVideoStreamParser::~MPEGVideoStreamParser() {
}
void MPEGVideoStreamParser::restoreSavedParserState() {
StreamParser::restoreSavedParserState();
fTo = fSavedTo;
fNumTruncatedBytes = fSavedNumTruncatedBytes;
}
void MPEGVideoStreamParser::registerReadInterest(unsigned char* to,
unsigned maxSize) {
fStartOfFrame = fTo = fSavedTo = to;
fLimit = to + maxSize;
fNumTruncatedBytes = fSavedNumTruncatedBytes = 0;
}
live/liveMedia/FileServerMediaSubsession.cpp 000444 001751 000000 00000002512 12265042432 021426 0 ustar 00rsf wheel 000000 000000 /**********
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the
Free Software Foundation; either version 2.1 of the License, or (at your
option) any later version. (See .)
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
more details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
**********/
// "liveMedia"
// Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved.
// A 'ServerMediaSubsession' object that creates new, unicast, "RTPSink"s
// on demand, from a file.
// Implementation
#include "FileServerMediaSubsession.hh"
FileServerMediaSubsession
::FileServerMediaSubsession(UsageEnvironment& env, char const* fileName,
Boolean reuseFirstSource)
: OnDemandServerMediaSubsession(env, reuseFirstSource),
fFileSize(0) {
fFileName = strDup(fileName);
}
FileServerMediaSubsession::~FileServerMediaSubsession() {
delete[] (char*)fFileName;
}
live/liveMedia/MPEG1or2VideoStreamDiscreteFramer.cpp 000444 001751 000000 00000017733 12265042432 022574 0 ustar 00rsf wheel 000000 000000 /**********
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the
Free Software Foundation; either version 2.1 of the License, or (at your
option) any later version. (See .)
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
more details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
**********/
// "liveMedia"
// Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved.
// A simplified version of "MPEG1or2VideoStreamFramer" that takes only
// complete, discrete frames (rather than an arbitrary byte stream) as input.
// This avoids the parsing and data copying overhead of the full
// "MPEG1or2VideoStreamFramer".
// Implementation
#include "MPEG1or2VideoStreamDiscreteFramer.hh"
MPEG1or2VideoStreamDiscreteFramer*
MPEG1or2VideoStreamDiscreteFramer::createNew(UsageEnvironment& env,
FramedSource* inputSource,
Boolean iFramesOnly,
double vshPeriod,
Boolean leavePresentationTimesUnmodified) {
// Need to add source type checking here??? #####
return new MPEG1or2VideoStreamDiscreteFramer(env, inputSource,
iFramesOnly, vshPeriod, leavePresentationTimesUnmodified);
}
MPEG1or2VideoStreamDiscreteFramer
::MPEG1or2VideoStreamDiscreteFramer(UsageEnvironment& env,
FramedSource* inputSource,
Boolean iFramesOnly, double vshPeriod, Boolean leavePresentationTimesUnmodified)
: MPEG1or2VideoStreamFramer(env, inputSource, iFramesOnly, vshPeriod,
False/*don't create a parser*/),
fLeavePresentationTimesUnmodified(leavePresentationTimesUnmodified),
fLastNonBFrameTemporal_reference(0),
fSavedVSHSize(0), fSavedVSHTimestamp(0.0),
fIFramesOnly(iFramesOnly), fVSHPeriod(vshPeriod) {
fLastNonBFramePresentationTime.tv_sec = 0;
fLastNonBFramePresentationTime.tv_usec = 0;
}
MPEG1or2VideoStreamDiscreteFramer::~MPEG1or2VideoStreamDiscreteFramer() {
}
void MPEG1or2VideoStreamDiscreteFramer::doGetNextFrame() {
// Arrange to read data (which should be a complete MPEG-1 or 2 video frame)
// from our data source, directly into the client's input buffer.
// After reading this, we'll do some parsing on the frame.
fInputSource->getNextFrame(fTo, fMaxSize,
afterGettingFrame, this,
FramedSource::handleClosure, this);
}
void MPEG1or2VideoStreamDiscreteFramer
::afterGettingFrame(void* clientData, unsigned frameSize,
unsigned numTruncatedBytes,
struct timeval presentationTime,
unsigned durationInMicroseconds) {
MPEG1or2VideoStreamDiscreteFramer* source
= (MPEG1or2VideoStreamDiscreteFramer*)clientData;
source->afterGettingFrame1(frameSize, numTruncatedBytes,
presentationTime, durationInMicroseconds);
}
static double const frameRateFromCode[] = {
0.0, // forbidden
24000/1001.0, // approx 23.976
24.0,
25.0,
30000/1001.0, // approx 29.97
30.0,
50.0,
60000/1001.0, // approx 59.94
60.0,
0.0, // reserved
0.0, // reserved
0.0, // reserved
0.0, // reserved
0.0, // reserved
0.0, // reserved
0.0 // reserved
};
#define MILLION 1000000
void MPEG1or2VideoStreamDiscreteFramer
::afterGettingFrame1(unsigned frameSize, unsigned numTruncatedBytes,
struct timeval presentationTime,
unsigned durationInMicroseconds) {
// Check that the first 4 bytes are a system code:
if (frameSize >= 4 && fTo[0] == 0 && fTo[1] == 0 && fTo[2] == 1) {
fPictureEndMarker = True; // Assume that we have a complete 'picture' here
u_int8_t nextCode = fTo[3];
if (nextCode == 0xB3) { // VIDEO_SEQUENCE_HEADER_START_CODE
// Note the following 'frame rate' code:
if (frameSize >= 8) {
u_int8_t frame_rate_code = fTo[7]&0x0F;
fFrameRate = frameRateFromCode[frame_rate_code];
}
// Also, save away this Video Sequence Header, in case we need it later:
// First, figure out how big it is:
unsigned vshSize;
for (vshSize = 4; vshSize < frameSize-3; ++vshSize) {
if (fTo[vshSize] == 0 && fTo[vshSize+1] == 0 && fTo[vshSize+2] == 1 &&
(fTo[vshSize+3] == 0xB8 || fTo[vshSize+3] == 0x00)) break;
}
if (vshSize == frameSize-3) vshSize = frameSize; // There was nothing else following it
if (vshSize <= sizeof fSavedVSHBuffer) {
memmove(fSavedVSHBuffer, fTo, vshSize);
fSavedVSHSize = vshSize;
fSavedVSHTimestamp
= presentationTime.tv_sec + presentationTime.tv_usec/(double)MILLION;
}
} else if (nextCode == 0xB8) { // GROUP_START_CODE
// If necessary, insert a saved Video Sequence Header in front of this:
double pts = presentationTime.tv_sec + presentationTime.tv_usec/(double)MILLION;
if (pts > fSavedVSHTimestamp + fVSHPeriod &&
fSavedVSHSize + frameSize <= fMaxSize) {
memmove(&fTo[fSavedVSHSize], &fTo[0], frameSize); // make room for the header
memmove(&fTo[0], fSavedVSHBuffer, fSavedVSHSize); // insert it
frameSize += fSavedVSHSize;
fSavedVSHTimestamp = pts;
}
}
unsigned i = 3;
if (nextCode == 0xB3 /*VIDEO_SEQUENCE_HEADER_START_CODE*/ ||
nextCode == 0xB8 /*GROUP_START_CODE*/) {
// Skip to the following PICTURE_START_CODE (if any):
for (i += 4; i < frameSize; ++i) {
if (fTo[i] == 0x00 /*PICTURE_START_CODE*/
&& fTo[i-1] == 1 && fTo[i-2] == 0 && fTo[i-3] == 0) {
nextCode = fTo[i];
break;
}
}
}
if (nextCode == 0x00 /*PICTURE_START_CODE*/ && i+2 < frameSize) {
// Get the 'temporal_reference' and 'picture_coding_type' from the
// following 2 bytes:
++i;
unsigned short temporal_reference = (fTo[i]<<2)|(fTo[i+1]>>6);
unsigned char picture_coding_type = (fTo[i+1]&0x38)>>3;
// If this is not an "I" frame, but we were asked for "I" frames only, then try again:
if (fIFramesOnly && picture_coding_type != 1) {
doGetNextFrame();
return;
}
// If this is a "B" frame, then we have to tweak "presentationTime":
if (!fLeavePresentationTimesUnmodified && picture_coding_type == 3/*B*/
&& (fLastNonBFramePresentationTime.tv_usec > 0 ||
fLastNonBFramePresentationTime.tv_sec > 0)) {
int trIncrement
= fLastNonBFrameTemporal_reference - temporal_reference;
if (trIncrement < 0) trIncrement += 1024; // field is 10 bits in size
unsigned usIncrement = fFrameRate == 0.0 ? 0
: (unsigned)((trIncrement*MILLION)/fFrameRate);
unsigned secondsToSubtract = usIncrement/MILLION;
unsigned uSecondsToSubtract = usIncrement%MILLION;
presentationTime = fLastNonBFramePresentationTime;
if ((unsigned)presentationTime.tv_usec < uSecondsToSubtract) {
presentationTime.tv_usec += MILLION;
if (presentationTime.tv_sec > 0) --presentationTime.tv_sec;
}
presentationTime.tv_usec -= uSecondsToSubtract;
if ((unsigned)presentationTime.tv_sec > secondsToSubtract) {
presentationTime.tv_sec -= secondsToSubtract;
} else {
presentationTime.tv_sec = presentationTime.tv_usec = 0;
}
} else {
fLastNonBFramePresentationTime = presentationTime;
fLastNonBFrameTemporal_reference = temporal_reference;
}
}
}
// ##### Later:
// - do "iFramesOnly" if requested
// Complete delivery to the client:
fFrameSize = frameSize;
fNumTruncatedBytes = numTruncatedBytes;
fPresentationTime = presentationTime;
fDurationInMicroseconds = durationInMicroseconds;
afterGetting(this);
}
live/liveMedia/AC3AudioStreamFramer.cpp 000444 001751 000000 00000024562 12265042432 020214 0 ustar 00rsf wheel 000000 000000 /**********
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the
Free Software Foundation; either version 2.1 of the License, or (at your
option) any later version. (See .)
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
more details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
**********/
// "liveMedia"
// Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved.
// A filter that breaks up an AC3 audio elementary stream into frames
// Implementation
#include "AC3AudioStreamFramer.hh"
#include "StreamParser.hh"
#include
////////// AC3AudioStreamParser definition //////////
class AC3FrameParams {
public:
AC3FrameParams() : samplingFreq(0) {}
// 8-byte header at the start of each frame:
// u_int32_t hdr0, hdr1;
unsigned hdr0, hdr1;
// parameters derived from the headers
unsigned kbps, samplingFreq, frameSize;
void setParamsFromHeader();
};
class AC3AudioStreamParser: public StreamParser {
public:
AC3AudioStreamParser(AC3AudioStreamFramer* usingSource,
FramedSource* inputSource);
virtual ~AC3AudioStreamParser();
public:
void testStreamCode(unsigned char ourStreamCode,
unsigned char* ptr, unsigned size);
unsigned parseFrame(unsigned& numTruncatedBytes);
// returns the size of the frame that was acquired, or 0 if none was
void registerReadInterest(unsigned char* to, unsigned maxSize);
AC3FrameParams const& currentFrame() const { return fCurrentFrame; }
Boolean haveParsedAFrame() const { return fHaveParsedAFrame; }
void readAndSaveAFrame();
private:
static void afterGettingSavedFrame(void* clientData, unsigned frameSize,
unsigned numTruncatedBytes,
struct timeval presentationTime,
unsigned durationInMicroseconds);
void afterGettingSavedFrame1(unsigned frameSize);
static void onSavedFrameClosure(void* clientData);
void onSavedFrameClosure1();
private:
AC3AudioStreamFramer* fUsingSource;
unsigned char* fTo;
unsigned fMaxSize;
Boolean fHaveParsedAFrame;
unsigned char* fSavedFrame;
unsigned fSavedFrameSize;
char fSavedFrameFlag;
// Parameters of the most recently read frame:
AC3FrameParams fCurrentFrame;
};
////////// AC3AudioStreamFramer implementation //////////
AC3AudioStreamFramer::AC3AudioStreamFramer(UsageEnvironment& env,
FramedSource* inputSource,
unsigned char streamCode)
: FramedFilter(env, inputSource), fOurStreamCode(streamCode) {
// Use the current wallclock time as the initial 'presentation time':
gettimeofday(&fNextFramePresentationTime, NULL);
fParser = new AC3AudioStreamParser(this, inputSource);
}
AC3AudioStreamFramer::~AC3AudioStreamFramer() {
delete fParser;
}
AC3AudioStreamFramer*
AC3AudioStreamFramer::createNew(UsageEnvironment& env,
FramedSource* inputSource,
unsigned char streamCode) {
// Need to add source type checking here??? #####
return new AC3AudioStreamFramer(env, inputSource, streamCode);
}
unsigned AC3AudioStreamFramer::samplingRate() {
if (!fParser->haveParsedAFrame()) {
// Because we haven't yet parsed a frame, we don't yet know the input
// stream's sampling rate. So, we first need to read a frame
// (into a special buffer that we keep around for later use).
fParser->readAndSaveAFrame();
}
return fParser->currentFrame().samplingFreq;
}
void AC3AudioStreamFramer::flushInput() {
fParser->flushInput();
}
void AC3AudioStreamFramer::doGetNextFrame() {
fParser->registerReadInterest(fTo, fMaxSize);
parseNextFrame();
}
#define MILLION 1000000
struct timeval AC3AudioStreamFramer::currentFramePlayTime() const {
AC3FrameParams const& fr = fParser->currentFrame();
unsigned const numSamples = 1536;
unsigned const freq = fr.samplingFreq;
// result is numSamples/freq
unsigned const uSeconds = (freq == 0) ? 0
: ((numSamples*2*MILLION)/freq + 1)/2; // rounds to nearest integer
struct timeval result;
result.tv_sec = uSeconds/MILLION;
result.tv_usec = uSeconds%MILLION;
return result;
}
void AC3AudioStreamFramer
::handleNewData(void* clientData, unsigned char* ptr, unsigned size,
struct timeval /*presentationTime*/) {
AC3AudioStreamFramer* framer = (AC3AudioStreamFramer*)clientData;
framer->handleNewData(ptr, size);
}
void AC3AudioStreamFramer
::handleNewData(unsigned char* ptr, unsigned size) {
fParser->testStreamCode(fOurStreamCode, ptr, size);
parseNextFrame();
}
void AC3AudioStreamFramer::parseNextFrame() {
unsigned acquiredFrameSize = fParser->parseFrame(fNumTruncatedBytes);
if (acquiredFrameSize > 0) {
// We were able to acquire a frame from the input.
// It has already been copied to the reader's space.
fFrameSize = acquiredFrameSize;
// Also set the presentation time, and increment it for next time,
// based on the length of this frame:
fPresentationTime = fNextFramePresentationTime;
struct timeval framePlayTime = currentFramePlayTime();
fDurationInMicroseconds = framePlayTime.tv_sec*MILLION + framePlayTime.tv_usec;
fNextFramePresentationTime.tv_usec += framePlayTime.tv_usec;
fNextFramePresentationTime.tv_sec
+= framePlayTime.tv_sec + fNextFramePresentationTime.tv_usec/MILLION;
fNextFramePresentationTime.tv_usec %= MILLION;
// Call our own 'after getting' function. Because we're not a 'leaf'
// source, we can call this directly, without risking infinite recursion.
afterGetting(this);
} else {
// We were unable to parse a complete frame from the input, because:
// - we had to read more data from the source stream, or
// - the source stream has ended.
}
}
////////// AC3AudioStreamParser implementation //////////
static int const kbpsTable[] = {32, 40, 48, 56, 64, 80, 96, 112,
128, 160, 192, 224, 256, 320, 384, 448,
512, 576, 640};
void AC3FrameParams::setParamsFromHeader() {
unsigned char byte4 = hdr1 >> 24;
unsigned char kbpsIndex = (byte4&0x3E) >> 1;
if (kbpsIndex > 18) kbpsIndex = 18;
kbps = kbpsTable[kbpsIndex];
unsigned char samplingFreqIndex = (byte4&0xC0) >> 6;
switch (samplingFreqIndex) {
case 0:
samplingFreq = 48000;
frameSize = 4*kbps;
break;
case 1:
samplingFreq = 44100;
frameSize = 2*(320*kbps/147 + (byte4&1));
break;
case 2:
case 3: // not legal?
samplingFreq = 32000;
frameSize = 6*kbps;
}
}
AC3AudioStreamParser
::AC3AudioStreamParser(AC3AudioStreamFramer* usingSource,
FramedSource* inputSource)
: StreamParser(inputSource, FramedSource::handleClosure, usingSource,
&AC3AudioStreamFramer::handleNewData, usingSource),
fUsingSource(usingSource), fHaveParsedAFrame(False),
fSavedFrame(NULL), fSavedFrameSize(0) {
}
AC3AudioStreamParser::~AC3AudioStreamParser() {
}
void AC3AudioStreamParser::registerReadInterest(unsigned char* to,
unsigned maxSize) {
fTo = to;
fMaxSize = maxSize;
}
void AC3AudioStreamParser
::testStreamCode(unsigned char ourStreamCode,
unsigned char* ptr, unsigned size) {
if (ourStreamCode == 0) return; // we assume that there's no stream code at the beginning of the data
if (size < 4) return;
unsigned char streamCode = *ptr;
if (streamCode == ourStreamCode) {
// Remove the first 4 bytes from the stream:
memmove(ptr, ptr + 4, size - 4);
totNumValidBytes() = totNumValidBytes() - 4;
} else {
// Discard all of the data that was just read:
totNumValidBytes() = totNumValidBytes() - size;
}
}
unsigned AC3AudioStreamParser::parseFrame(unsigned& numTruncatedBytes) {
if (fSavedFrameSize > 0) {
// We've already read and parsed a frame. Use it instead:
memmove(fTo, fSavedFrame, fSavedFrameSize);
delete[] fSavedFrame; fSavedFrame = NULL;
unsigned frameSize = fSavedFrameSize;
fSavedFrameSize = 0;
return frameSize;
}
try {
saveParserState();
// We expect an AC3 audio header (first 2 bytes == 0x0B77) at the start:
while (1) {
unsigned next4Bytes = test4Bytes();
if (next4Bytes>>16 == 0x0B77) break;
skipBytes(1);
saveParserState();
}
fCurrentFrame.hdr0 = get4Bytes();
fCurrentFrame.hdr1 = test4Bytes();
fCurrentFrame.setParamsFromHeader();
fHaveParsedAFrame = True;
// Copy the frame to the requested destination:
unsigned frameSize = fCurrentFrame.frameSize;
if (frameSize > fMaxSize) {
numTruncatedBytes = frameSize - fMaxSize;
frameSize = fMaxSize;
} else {
numTruncatedBytes = 0;
}
fTo[0] = fCurrentFrame.hdr0 >> 24;
fTo[1] = fCurrentFrame.hdr0 >> 16;
fTo[2] = fCurrentFrame.hdr0 >> 8;
fTo[3] = fCurrentFrame.hdr0;
getBytes(&fTo[4], frameSize-4);
skipBytes(numTruncatedBytes);
return frameSize;
} catch (int /*e*/) {
#ifdef DEBUG
fUsingSource->envir() << "AC3AudioStreamParser::parseFrame() EXCEPTION (This is normal behavior - *not* an error)\n";
#endif
return 0; // the parsing got interrupted
}
}
void AC3AudioStreamParser::readAndSaveAFrame() {
unsigned const maxAC3FrameSize = 4000;
fSavedFrame = new unsigned char[maxAC3FrameSize];
fSavedFrameSize = 0;
fSavedFrameFlag = 0;
fUsingSource->getNextFrame(fSavedFrame, maxAC3FrameSize,
afterGettingSavedFrame, this,
onSavedFrameClosure, this);
fUsingSource->envir().taskScheduler().doEventLoop(&fSavedFrameFlag);
}
void AC3AudioStreamParser
::afterGettingSavedFrame(void* clientData, unsigned frameSize,
unsigned /*numTruncatedBytes*/,
struct timeval /*presentationTime*/,
unsigned /*durationInMicroseconds*/) {
AC3AudioStreamParser* parser = (AC3AudioStreamParser*)clientData;
parser->afterGettingSavedFrame1(frameSize);
}
void AC3AudioStreamParser
::afterGettingSavedFrame1(unsigned frameSize) {
fSavedFrameSize = frameSize;
fSavedFrameFlag = ~0;
}
void AC3AudioStreamParser::onSavedFrameClosure(void* clientData) {
AC3AudioStreamParser* parser = (AC3AudioStreamParser*)clientData;
parser->onSavedFrameClosure1();
}
void AC3AudioStreamParser::onSavedFrameClosure1() {
delete[] fSavedFrame; fSavedFrame = NULL;
fSavedFrameSize = 0;
fSavedFrameFlag = ~0;
}
live/liveMedia/DarwinInjector.cpp 000444 001751 000000 00000030505 12265042432 017267 0 ustar 00rsf wheel 000000 000000 /**********
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the
Free Software Foundation; either version 2.1 of the License, or (at your
option) any later version. (See .)
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
more details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
**********/
// "liveMedia"
// Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved.
// An object that redirects one or more RTP/RTCP streams - forming a single
// multimedia session - into a 'Darwin Streaming Server' (for subsequent
// reflection to potentially arbitrarily many remote RTSP clients).
// Implementation
#include "DarwinInjector.hh"
#include
////////// SubstreamDescriptor definition //////////
class SubstreamDescriptor {
public:
SubstreamDescriptor(RTPSink* rtpSink, RTCPInstance* rtcpInstance, unsigned trackId);
~SubstreamDescriptor();
SubstreamDescriptor*& next() { return fNext; }
RTPSink* rtpSink() const { return fRTPSink; }
RTCPInstance* rtcpInstance() const { return fRTCPInstance; }
char const* sdpLines() const { return fSDPLines; }
private:
SubstreamDescriptor* fNext;
RTPSink* fRTPSink;
RTCPInstance* fRTCPInstance;
char* fSDPLines;
};
////////// DarwinInjector implementation //////////
DarwinInjector* DarwinInjector::createNew(UsageEnvironment& env,
char const* applicationName,
int verbosityLevel) {
return new DarwinInjector(env, applicationName, verbosityLevel);
}
Boolean DarwinInjector::lookupByName(UsageEnvironment& env, char const* name,
DarwinInjector*& result) {
result = NULL; // unless we succeed
Medium* medium;
if (!Medium::lookupByName(env, name, medium)) return False;
if (!medium->isDarwinInjector()) {
env.setResultMsg(name, " is not a 'Darwin injector'");
return False;
}
result = (DarwinInjector*)medium;
return True;
}
DarwinInjector::DarwinInjector(UsageEnvironment& env,
char const* applicationName, int verbosityLevel)
: Medium(env),
fApplicationName(strDup(applicationName)), fVerbosityLevel(verbosityLevel),
fRTSPClient(NULL), fSubstreamSDPSizes(0),
fHeadSubstream(NULL), fTailSubstream(NULL), fSession(NULL), fLastTrackId(0), fResultString(NULL) {
}
DarwinInjector::~DarwinInjector() {
if (fSession != NULL) { // close down and delete the session
fRTSPClient->sendTeardownCommand(*fSession, NULL);
Medium::close(fSession);
}
delete fHeadSubstream;
delete[] (char*)fApplicationName;
Medium::close(fRTSPClient);
}
void DarwinInjector::addStream(RTPSink* rtpSink, RTCPInstance* rtcpInstance) {
if (rtpSink == NULL) return; // "rtpSink" should be non-NULL
SubstreamDescriptor* newDescriptor = new SubstreamDescriptor(rtpSink, rtcpInstance, ++fLastTrackId);
if (fHeadSubstream == NULL) {
fHeadSubstream = fTailSubstream = newDescriptor;
} else {
fTailSubstream->next() = newDescriptor;
fTailSubstream = newDescriptor;
}
fSubstreamSDPSizes += strlen(newDescriptor->sdpLines());
}
// Define a special subclass of "RTSPClient" that has a pointer field to a "DarwinInjector". We'll use this to implement RTSP ops:
class RTSPClientForDarwinInjector: public RTSPClient {
public:
RTSPClientForDarwinInjector(UsageEnvironment& env, char const* rtspURL, int verbosityLevel, char const* applicationName,
DarwinInjector* ourDarwinInjector)
: RTSPClient(env, rtspURL, verbosityLevel, applicationName, 0, -1),
fOurDarwinInjector(ourDarwinInjector) {}
virtual ~RTSPClientForDarwinInjector() {}
DarwinInjector* fOurDarwinInjector;
};
Boolean DarwinInjector
::setDestination(char const* remoteRTSPServerNameOrAddress,
char const* remoteFileName,
char const* sessionName,
char const* sessionInfo,
portNumBits remoteRTSPServerPortNumber,
char const* remoteUserName,
char const* remotePassword,
char const* sessionAuthor,
char const* sessionCopyright,
int timeout) {
char* sdp = NULL;
char* url = NULL;
Boolean success = False; // until we learn otherwise
do {
// Construct a RTSP URL for the remote stream:
char const* const urlFmt = "rtsp://%s:%u/%s";
unsigned urlLen
= strlen(urlFmt) + strlen(remoteRTSPServerNameOrAddress) + 5 /* max short len */ + strlen(remoteFileName);
url = new char[urlLen];
sprintf(url, urlFmt, remoteRTSPServerNameOrAddress, remoteRTSPServerPortNumber, remoteFileName);
// Begin by creating our RTSP client object:
fRTSPClient = new RTSPClientForDarwinInjector(envir(), url, fVerbosityLevel, fApplicationName, this);
if (fRTSPClient == NULL) break;
// Get the remote RTSP server's IP address:
struct in_addr addr;
{
NetAddressList addresses(remoteRTSPServerNameOrAddress);
if (addresses.numAddresses() == 0) break;
NetAddress const* address = addresses.firstAddress();
addr.s_addr = *(unsigned*)(address->data());
}
AddressString remoteRTSPServerAddressStr(addr);
// Construct a SDP description for the session that we'll be streaming:
char const* const sdpFmt =
"v=0\r\n"
"o=- %u %u IN IP4 127.0.0.1\r\n"
"s=%s\r\n"
"i=%s\r\n"
"c=IN IP4 %s\r\n"
"t=0 0\r\n"
"a=x-qt-text-nam:%s\r\n"
"a=x-qt-text-inf:%s\r\n"
"a=x-qt-text-cmt:source application:%s\r\n"
"a=x-qt-text-aut:%s\r\n"
"a=x-qt-text-cpy:%s\r\n";
// plus, %s for each substream SDP
unsigned sdpLen = strlen(sdpFmt)
+ 20 /* max int len */ + 20 /* max int len */
+ strlen(sessionName)
+ strlen(sessionInfo)
+ strlen(remoteRTSPServerAddressStr.val())
+ strlen(sessionName)
+ strlen(sessionInfo)
+ strlen(fApplicationName)
+ strlen(sessionAuthor)
+ strlen(sessionCopyright)
+ fSubstreamSDPSizes;
unsigned const sdpSessionId = our_random32();
unsigned const sdpVersion = sdpSessionId;
sdp = new char[sdpLen];
sprintf(sdp, sdpFmt,
sdpSessionId, sdpVersion, // o= line
sessionName, // s= line
sessionInfo, // i= line
remoteRTSPServerAddressStr.val(), // c= line
sessionName, // a=x-qt-text-nam: line
sessionInfo, // a=x-qt-text-inf: line
fApplicationName, // a=x-qt-text-cmt: line
sessionAuthor, // a=x-qt-text-aut: line
sessionCopyright // a=x-qt-text-cpy: line
);
char* p = &sdp[strlen(sdp)];
SubstreamDescriptor* ss;
for (ss = fHeadSubstream; ss != NULL; ss = ss->next()) {
sprintf(p, "%s", ss->sdpLines());
p += strlen(p);
}
// Do a RTSP "ANNOUNCE" with this SDP description:
Authenticator auth;
Authenticator* authToUse = NULL;
if (remoteUserName[0] != '\0' || remotePassword[0] != '\0') {
auth.setUsernameAndPassword(remoteUserName, remotePassword);
authToUse = &auth;
}
fWatchVariable = 0;
(void)fRTSPClient->sendAnnounceCommand(sdp, genericResponseHandler, authToUse);
// Now block (but handling events) until we get a response:
envir().taskScheduler().doEventLoop(&fWatchVariable);
delete[] fResultString;
if (fResultCode != 0) break; // an error occurred with the RTSP "ANNOUNCE" command
// Next, tell the remote server to start receiving the stream from us.
// (To do this, we first create a "MediaSession" object from the SDP description.)
fSession = MediaSession::createNew(envir(), sdp);
if (fSession == NULL) break;
ss = fHeadSubstream;
MediaSubsessionIterator iter(*fSession);
MediaSubsession* subsession;
ss = fHeadSubstream;
unsigned streamChannelId = 0;
while ((subsession = iter.next()) != NULL) {
if (!subsession->initiate()) break;
fWatchVariable = 0;
(void)fRTSPClient->sendSetupCommand(*subsession, genericResponseHandler,
True /*streamOutgoing*/,
True /*streamUsingTCP*/);
// Now block (but handling events) until we get a response:
envir().taskScheduler().doEventLoop(&fWatchVariable);
delete[] fResultString;
if (fResultCode != 0) break; // an error occurred with the RTSP "SETUP" command
// Tell this subsession's RTPSink and RTCPInstance to use
// the RTSP TCP connection:
ss->rtpSink()->setStreamSocket(fRTSPClient->socketNum(), streamChannelId++);
if (ss->rtcpInstance() != NULL) {
ss->rtcpInstance()->setStreamSocket(fRTSPClient->socketNum(),
streamChannelId++);
}
ss = ss->next();
}
if (subsession != NULL) break; // an error occurred above
// Tell the RTSP server to start:
fWatchVariable = 0;
(void)fRTSPClient->sendPlayCommand(*fSession, genericResponseHandler);
// Now block (but handling events) until we get a response:
envir().taskScheduler().doEventLoop(&fWatchVariable);
delete[] fResultString;
if (fResultCode != 0) break; // an error occurred with the RTSP "PLAY" command
// Finally, make sure that the output TCP buffer is a reasonable size:
increaseSendBufferTo(envir(), fRTSPClient->socketNum(), 100*1024);
success = True;
} while (0);
delete[] sdp;
delete[] url;
return success;
}
Boolean DarwinInjector::isDarwinInjector() const {
return True;
}
void DarwinInjector::genericResponseHandler(RTSPClient* rtspClient, int responseCode, char* responseString) {
DarwinInjector* di = ((RTSPClientForDarwinInjector*)rtspClient)-> fOurDarwinInjector;
di->genericResponseHandler1(responseCode, responseString);
}
void DarwinInjector::genericResponseHandler1(int responseCode, char* responseString) {
// Set result values:
fResultCode = responseCode;
fResultString = responseString;
// Signal a break from the event loop (thereby returning from the blocking command):
fWatchVariable = ~0;
}
////////// SubstreamDescriptor implementation //////////
SubstreamDescriptor::SubstreamDescriptor(RTPSink* rtpSink,
RTCPInstance* rtcpInstance, unsigned trackId)
: fNext(NULL), fRTPSink(rtpSink), fRTCPInstance(rtcpInstance) {
// Create the SDP description for this substream
char const* mediaType = fRTPSink->sdpMediaType();
unsigned char rtpPayloadType = fRTPSink->rtpPayloadType();
char const* rtpPayloadFormatName = fRTPSink->rtpPayloadFormatName();
unsigned rtpTimestampFrequency = fRTPSink->rtpTimestampFrequency();
unsigned numChannels = fRTPSink->numChannels();
char* rtpmapLine;
if (rtpPayloadType >= 96) {
char* encodingParamsPart;
if (numChannels != 1) {
encodingParamsPart = new char[1 + 20 /* max int len */];
sprintf(encodingParamsPart, "/%d", numChannels);
} else {
encodingParamsPart = strDup("");
}
char const* const rtpmapFmt = "a=rtpmap:%d %s/%d%s\r\n";
unsigned rtpmapFmtSize = strlen(rtpmapFmt)
+ 3 /* max char len */ + strlen(rtpPayloadFormatName)
+ 20 /* max int len */ + strlen(encodingParamsPart);
rtpmapLine = new char[rtpmapFmtSize];
sprintf(rtpmapLine, rtpmapFmt,
rtpPayloadType, rtpPayloadFormatName,
rtpTimestampFrequency, encodingParamsPart);
delete[] encodingParamsPart;
} else {
// Static payload type => no "a=rtpmap:" line
rtpmapLine = strDup("");
}
unsigned rtpmapLineSize = strlen(rtpmapLine);
char const* auxSDPLine = fRTPSink->auxSDPLine();
if (auxSDPLine == NULL) auxSDPLine = "";
unsigned auxSDPLineSize = strlen(auxSDPLine);
char const* const sdpFmt =
"m=%s 0 RTP/AVP %u\r\n"
"%s" // "a=rtpmap:" line (if present)
"%s" // auxilliary (e.g., "a=fmtp:") line (if present)
"a=control:trackID=%u\r\n";
unsigned sdpFmtSize = strlen(sdpFmt)
+ strlen(mediaType) + 3 /* max char len */
+ rtpmapLineSize
+ auxSDPLineSize
+ 20 /* max int len */;
char* sdpLines = new char[sdpFmtSize];
sprintf(sdpLines, sdpFmt,
mediaType, // m=
rtpPayloadType, // m=
rtpmapLine, // a=rtpmap:... (if present)
auxSDPLine, // optional extra SDP line
trackId); // a=control:
fSDPLines = strDup(sdpLines);
delete[] sdpLines;
delete[] rtpmapLine;
}
SubstreamDescriptor::~SubstreamDescriptor() {
delete fSDPLines;
delete fNext;
}
live/liveMedia/MP3AudioFileServerMediaSubsession.cpp 000444 001751 000000 00000015431 12265042432 022734 0 ustar 00rsf wheel 000000 000000 /**********
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the
Free Software Foundation; either version 2.1 of the License, or (at your
option) any later version. (See .)
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
more details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
**********/
// "liveMedia"
// Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved.
// A 'ServerMediaSubsession' object that creates new, unicast, "RTPSink"s
// on demand, from a MP3 audio file.
// (Actually, any MPEG-1 or MPEG-2 audio file should work.)
// Implementation
#include "MP3AudioFileServerMediaSubsession.hh"
#include "MPEG1or2AudioRTPSink.hh"
#include "MP3ADURTPSink.hh"
#include "MP3FileSource.hh"
#include "MP3ADU.hh"
MP3AudioFileServerMediaSubsession* MP3AudioFileServerMediaSubsession
::createNew(UsageEnvironment& env, char const* fileName, Boolean reuseFirstSource,
Boolean generateADUs, Interleaving* interleaving) {
return new MP3AudioFileServerMediaSubsession(env, fileName, reuseFirstSource,
generateADUs, interleaving);
}
MP3AudioFileServerMediaSubsession
::MP3AudioFileServerMediaSubsession(UsageEnvironment& env,
char const* fileName, Boolean reuseFirstSource,
Boolean generateADUs,
Interleaving* interleaving)
: FileServerMediaSubsession(env, fileName, reuseFirstSource),
fGenerateADUs(generateADUs), fInterleaving(interleaving), fFileDuration(0.0) {
}
MP3AudioFileServerMediaSubsession
::~MP3AudioFileServerMediaSubsession() {
delete fInterleaving;
}
FramedSource* MP3AudioFileServerMediaSubsession
::createNewStreamSourceCommon(FramedSource* baseMP3Source, unsigned mp3NumBytes, unsigned& estBitrate) {
FramedSource* streamSource;
do {
streamSource = baseMP3Source; // by default
if (streamSource == NULL) break;
// Use the MP3 file size, plus the duration, to estimate the stream's bitrate:
if (mp3NumBytes > 0 && fFileDuration > 0.0) {
estBitrate = (unsigned)(mp3NumBytes/(125*fFileDuration) + 0.5); // kbps, rounded
} else {
estBitrate = 128; // kbps, estimate
}
if (fGenerateADUs) {
// Add a filter that converts the source MP3s to ADUs:
streamSource = ADUFromMP3Source::createNew(envir(), streamSource);
if (streamSource == NULL) break;
if (fInterleaving != NULL) {
// Add another filter that interleaves the ADUs before packetizing:
streamSource = MP3ADUinterleaver::createNew(envir(), *fInterleaving,
streamSource);
if (streamSource == NULL) break;
}
} else if (fFileDuration > 0.0) {
// Because this is a seekable file, insert a pair of filters: one that
// converts the input MP3 stream to ADUs; another that converts these
// ADUs back to MP3. This allows us to seek within the input stream without
// tripping over the MP3 'bit reservoir':
streamSource = ADUFromMP3Source::createNew(envir(), streamSource);
if (streamSource == NULL) break;
streamSource = MP3FromADUSource::createNew(envir(), streamSource);
if (streamSource == NULL) break;
}
} while (0);
return streamSource;
}
void MP3AudioFileServerMediaSubsession::getBaseStreams(FramedSource* frontStream,
FramedSource*& sourceMP3Stream, ADUFromMP3Source*& aduStream/*if any*/) {
if (fGenerateADUs) {
// There's an ADU stream.
if (fInterleaving != NULL) {
// There's an interleaving filter in front of the ADU stream. So go back one, to reach the ADU stream:
aduStream = (ADUFromMP3Source*)(((FramedFilter*)frontStream)->inputSource());
} else {
aduStream = (ADUFromMP3Source*)frontStream;
}
// Then, go back one more, to reach the MP3 source:
sourceMP3Stream = (MP3FileSource*)(aduStream->inputSource());
} else if (fFileDuration > 0.0) {
// There are a pair of filters - MP3->ADU and ADU->MP3 - in front of the
// original MP3 source. So, go back one, to reach the ADU source:
aduStream = (ADUFromMP3Source*)(((FramedFilter*)frontStream)->inputSource());
// Then, go back one more, to reach the MP3 source:
sourceMP3Stream = (MP3FileSource*)(aduStream->inputSource());
} else {
// There's no filter in front of the source MP3 stream (and there's no ADU stream):
aduStream = NULL;
sourceMP3Stream = frontStream;
}
}
void MP3AudioFileServerMediaSubsession
::seekStreamSource(FramedSource* inputSource, double& seekNPT, double streamDuration, u_int64_t& /*numBytes*/) {
FramedSource* sourceMP3Stream;
ADUFromMP3Source* aduStream;
getBaseStreams(inputSource, sourceMP3Stream, aduStream);
if (aduStream != NULL) aduStream->resetInput(); // because we're about to seek within its source
((MP3FileSource*)sourceMP3Stream)->seekWithinFile(seekNPT, streamDuration);
}
void MP3AudioFileServerMediaSubsession
::setStreamSourceScale(FramedSource* inputSource, float scale) {
FramedSource* sourceMP3Stream;
ADUFromMP3Source* aduStream;
getBaseStreams(inputSource, sourceMP3Stream, aduStream);
if (aduStream == NULL) return; // because, in this case, the stream's not scalable
int iScale = (int)scale;
aduStream->setScaleFactor(iScale);
((MP3FileSource*)sourceMP3Stream)->setPresentationTimeScale(iScale);
}
FramedSource* MP3AudioFileServerMediaSubsession
::createNewStreamSource(unsigned /*clientSessionId*/, unsigned& estBitrate) {
MP3FileSource* mp3Source = MP3FileSource::createNew(envir(), fFileName);
if (mp3Source == NULL) return NULL;
fFileDuration = mp3Source->filePlayTime();
return createNewStreamSourceCommon(mp3Source, mp3Source->fileSize(), estBitrate);
}
RTPSink* MP3AudioFileServerMediaSubsession
::createNewRTPSink(Groupsock* rtpGroupsock,
unsigned char rtpPayloadTypeIfDynamic,
FramedSource* /*inputSource*/) {
if (fGenerateADUs) {
return MP3ADURTPSink::createNew(envir(), rtpGroupsock,
rtpPayloadTypeIfDynamic);
} else {
return MPEG1or2AudioRTPSink::createNew(envir(), rtpGroupsock);
}
}
void MP3AudioFileServerMediaSubsession::testScaleFactor(float& scale) {
if (fFileDuration <= 0.0) {
// The file is non-seekable, so is probably a live input source.
// We don't support scale factors other than 1
scale = 1;
} else {
// We support any integral scale >= 1
int iScale = (int)(scale + 0.5); // round
if (iScale < 1) iScale = 1;
scale = (float)iScale;
}
}
float MP3AudioFileServerMediaSubsession::duration() const {
return fFileDuration;
}
live/liveMedia/H264VideoRTPSink.cpp 000444 001751 000000 00000011305 12265042432 017167 0 ustar 00rsf wheel 000000 000000 /**********
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the
Free Software Foundation; either version 2.1 of the License, or (at your
option) any later version. (See .)
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
more details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
**********/
// "liveMedia"
// Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved.
// RTP sink for H.264 video (RFC 3984)
// Implementation
#include "H264VideoRTPSink.hh"
#include "H264VideoStreamFramer.hh"
#include "Base64.hh"
#include "H264VideoRTPSource.hh" // for "parseSPropParameterSets()"
////////// H264VideoRTPSink implementation //////////
H264VideoRTPSink
::H264VideoRTPSink(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat,
u_int8_t const* sps, unsigned spsSize, u_int8_t const* pps, unsigned ppsSize)
: H264or5VideoRTPSink(264, env, RTPgs, rtpPayloadFormat,
NULL, 0, sps, spsSize, pps, ppsSize) {
}
H264VideoRTPSink::~H264VideoRTPSink() {
}
H264VideoRTPSink* H264VideoRTPSink
::createNew(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat) {
return new H264VideoRTPSink(env, RTPgs, rtpPayloadFormat);
}
H264VideoRTPSink* H264VideoRTPSink
::createNew(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat,
u_int8_t const* sps, unsigned spsSize, u_int8_t const* pps, unsigned ppsSize) {
return new H264VideoRTPSink(env, RTPgs, rtpPayloadFormat, sps, spsSize, pps, ppsSize);
}
H264VideoRTPSink* H264VideoRTPSink
::createNew(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat,
char const* sPropParameterSetsStr) {
u_int8_t* sps = NULL; unsigned spsSize = 0;
u_int8_t* pps = NULL; unsigned ppsSize = 0;
unsigned numSPropRecords;
SPropRecord* sPropRecords = parseSPropParameterSets(sPropParameterSetsStr, numSPropRecords);
for (unsigned i = 0; i < numSPropRecords; ++i) {
if (sPropRecords[i].sPropLength == 0) continue; // bad data
u_int8_t nal_unit_type = (sPropRecords[i].sPropBytes[0])&0x1F;
if (nal_unit_type == 7/*SPS*/) {
sps = sPropRecords[i].sPropBytes;
spsSize = sPropRecords[i].sPropLength;
} else if (nal_unit_type == 8/*PPS*/) {
pps = sPropRecords[i].sPropBytes;
ppsSize = sPropRecords[i].sPropLength;
}
}
H264VideoRTPSink* result
= new H264VideoRTPSink(env, RTPgs, rtpPayloadFormat, sps, spsSize, pps, ppsSize);
delete[] sPropRecords;
return result;
}
Boolean H264VideoRTPSink::sourceIsCompatibleWithUs(MediaSource& source) {
// Our source must be an appropriate framer:
return source.isH264VideoStreamFramer();
}
char const* H264VideoRTPSink::auxSDPLine() {
// Generate a new "a=fmtp:" line each time, using our SPS and PPS (if we have them),
// otherwise parameters from our framer source (in case they've changed since the last time that
// we were called):
H264or5VideoStreamFramer* framerSource = NULL;
u_int8_t* vpsDummy = NULL; unsigned vpsDummySize = 0;
u_int8_t* sps = fSPS; unsigned spsSize = fSPSSize;
u_int8_t* pps = fPPS; unsigned ppsSize = fPPSSize;
if (sps == NULL || pps == NULL) {
// We need to get SPS and PPS from our framer source:
if (fOurFragmenter == NULL) return NULL; // we don't yet have a fragmenter (and therefore not a source)
framerSource = (H264or5VideoStreamFramer*)(fOurFragmenter->inputSource());
if (framerSource == NULL) return NULL; // we don't yet have a source
framerSource->getVPSandSPSandPPS(vpsDummy, vpsDummySize, sps, spsSize, pps, ppsSize);
if (sps == NULL || pps == NULL) return NULL; // our source isn't ready
}
// Set up the "a=fmtp:" SDP line for this stream:
char* sps_base64 = base64Encode((char*)sps, spsSize);
char* pps_base64 = base64Encode((char*)pps, ppsSize);
char const* fmtpFmt =
"a=fmtp:%d packetization-mode=1"
";profile-level-id=%06X"
";sprop-parameter-sets=%s,%s\r\n";
unsigned fmtpFmtSize = strlen(fmtpFmt)
+ 3 /* max char len */
+ 6 /* 3 bytes in hex */
+ strlen(sps_base64) + strlen(pps_base64);
char* fmtp = new char[fmtpFmtSize];
sprintf(fmtp, fmtpFmt,
rtpPayloadType(),
framerSource->profileLevelId(),
sps_base64, pps_base64);
delete[] sps_base64;
delete[] pps_base64;
delete[] fFmtpSDPLine; fFmtpSDPLine = fmtp;
return fFmtpSDPLine;
}
live/liveMedia/AVIFileSink.cpp 000444 001751 000000 00000065614 12265042432 016422 0 ustar 00rsf wheel 000000 000000 /**********
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the
Free Software Foundation; either version 2.1 of the License, or (at your
option) any later version. (See .)
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
more details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
**********/
// "liveMedia"
// Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved.
// A sink that generates an AVI file from a composite media session
// Implementation
#include "AVIFileSink.hh"
#include "InputFile.hh"
#include "OutputFile.hh"
#include "GroupsockHelper.hh"
#define fourChar(x,y,z,w) ( ((w)<<24)|((z)<<16)|((y)<<8)|(x) )/*little-endian*/
////////// AVISubsessionIOState ///////////
// A structure used to represent the I/O state of each input 'subsession':
class SubsessionBuffer {
public:
SubsessionBuffer(unsigned bufferSize)
: fBufferSize(bufferSize) {
reset();
fData = new unsigned char[bufferSize];
}
virtual ~SubsessionBuffer() { delete[] fData; }
void reset() { fBytesInUse = 0; }
void addBytes(unsigned numBytes) { fBytesInUse += numBytes; }
unsigned char* dataStart() { return &fData[0]; }
unsigned char* dataEnd() { return &fData[fBytesInUse]; }
unsigned bytesInUse() const { return fBytesInUse; }
unsigned bytesAvailable() const { return fBufferSize - fBytesInUse; }
void setPresentationTime(struct timeval const& presentationTime) {
fPresentationTime = presentationTime;
}
struct timeval const& presentationTime() const {return fPresentationTime;}
private:
unsigned fBufferSize;
struct timeval fPresentationTime;
unsigned char* fData;
unsigned fBytesInUse;
};
class AVISubsessionIOState {
public:
AVISubsessionIOState(AVIFileSink& sink, MediaSubsession& subsession);
virtual ~AVISubsessionIOState();
void setAVIstate(unsigned subsessionIndex);
void setFinalAVIstate();
void afterGettingFrame(unsigned packetDataSize,
struct timeval presentationTime);
void onSourceClosure();
UsageEnvironment& envir() const { return fOurSink.envir(); }
public:
SubsessionBuffer *fBuffer, *fPrevBuffer;
AVIFileSink& fOurSink;
MediaSubsession& fOurSubsession;
unsigned short fLastPacketRTPSeqNum;
Boolean fOurSourceIsActive;
struct timeval fPrevPresentationTime;
unsigned fMaxBytesPerSecond;
Boolean fIsVideo, fIsAudio, fIsByteSwappedAudio;
unsigned fAVISubsessionTag;
unsigned fAVICodecHandlerType;
unsigned fAVISamplingFrequency; // for audio
u_int16_t fWAVCodecTag; // for audio
unsigned fAVIScale;
unsigned fAVIRate;
unsigned fAVISize;
unsigned fNumFrames;
unsigned fSTRHFrameCountPosition;
private:
void useFrame(SubsessionBuffer& buffer);
};
///////// AVIIndexRecord definition & implementation //////////
class AVIIndexRecord {
public:
AVIIndexRecord(unsigned chunkId, unsigned flags, unsigned offset, unsigned size)
: fNext(NULL), fChunkId(chunkId), fFlags(flags), fOffset(offset), fSize(size) {
}
AVIIndexRecord*& next() { return fNext; }
unsigned chunkId() const { return fChunkId; }
unsigned flags() const { return fFlags; }
unsigned offset() const { return fOffset; }
unsigned size() const { return fSize; }
private:
AVIIndexRecord* fNext;
unsigned fChunkId;
unsigned fFlags;
unsigned fOffset;
unsigned fSize;
};
////////// AVIFileSink implementation //////////
AVIFileSink::AVIFileSink(UsageEnvironment& env,
MediaSession& inputSession,
char const* outputFileName,
unsigned bufferSize,
unsigned short movieWidth, unsigned short movieHeight,
unsigned movieFPS, Boolean packetLossCompensate)
: Medium(env), fInputSession(inputSession),
fIndexRecordsHead(NULL), fIndexRecordsTail(NULL), fNumIndexRecords(0),
fBufferSize(bufferSize), fPacketLossCompensate(packetLossCompensate),
fAreCurrentlyBeingPlayed(False), fNumSubsessions(0), fNumBytesWritten(0),
fHaveCompletedOutputFile(False),
fMovieWidth(movieWidth), fMovieHeight(movieHeight), fMovieFPS(movieFPS) {
fOutFid = OpenOutputFile(env, outputFileName);
if (fOutFid == NULL) return;
// Set up I/O state for each input subsession:
MediaSubsessionIterator iter(fInputSession);
MediaSubsession* subsession;
while ((subsession = iter.next()) != NULL) {
// Ignore subsessions without a data source:
FramedSource* subsessionSource = subsession->readSource();
if (subsessionSource == NULL) continue;
// If "subsession's" SDP description specified screen dimension
// or frame rate parameters, then use these.
if (subsession->videoWidth() != 0) {
fMovieWidth = subsession->videoWidth();
}
if (subsession->videoHeight() != 0) {
fMovieHeight = subsession->videoHeight();
}
if (subsession->videoFPS() != 0) {
fMovieFPS = subsession->videoFPS();
}
AVISubsessionIOState* ioState
= new AVISubsessionIOState(*this, *subsession);
subsession->miscPtr = (void*)ioState;
// Also set a 'BYE' handler for this subsession's RTCP instance:
if (subsession->rtcpInstance() != NULL) {
subsession->rtcpInstance()->setByeHandler(onRTCPBye, ioState);
}
++fNumSubsessions;
}
// Begin by writing an AVI header:
addFileHeader_AVI();
}
AVIFileSink::~AVIFileSink() {
completeOutputFile();
// Then, stop streaming and delete each active "AVISubsessionIOState":
MediaSubsessionIterator iter(fInputSession);
MediaSubsession* subsession;
while ((subsession = iter.next()) != NULL) {
subsession->readSource()->stopGettingFrames();
AVISubsessionIOState* ioState
= (AVISubsessionIOState*)(subsession->miscPtr);
if (ioState == NULL) continue;
delete ioState;
}
// Then, delete the index records:
AVIIndexRecord* cur = fIndexRecordsHead;
while (cur != NULL) {
AVIIndexRecord* next = cur->next();
delete cur;
cur = next;
}
// Finally, close our output file:
CloseOutputFile(fOutFid);
}
AVIFileSink* AVIFileSink
::createNew(UsageEnvironment& env, MediaSession& inputSession,
char const* outputFileName,
unsigned bufferSize,
unsigned short movieWidth, unsigned short movieHeight,
unsigned movieFPS, Boolean packetLossCompensate) {
AVIFileSink* newSink =
new AVIFileSink(env, inputSession, outputFileName, bufferSize,
movieWidth, movieHeight, movieFPS, packetLossCompensate);
if (newSink == NULL || newSink->fOutFid == NULL) {
Medium::close(newSink);
return NULL;
}
return newSink;
}
Boolean AVIFileSink::startPlaying(afterPlayingFunc* afterFunc,
void* afterClientData) {
// Make sure we're not already being played:
if (fAreCurrentlyBeingPlayed) {
envir().setResultMsg("This sink has already been played");
return False;
}
fAreCurrentlyBeingPlayed = True;
fAfterFunc = afterFunc;
fAfterClientData = afterClientData;
return continuePlaying();
}
Boolean AVIFileSink::continuePlaying() {
// Run through each of our input session's 'subsessions',
// asking for a frame from each one:
Boolean haveActiveSubsessions = False;
MediaSubsessionIterator iter(fInputSession);
MediaSubsession* subsession;
while ((subsession = iter.next()) != NULL) {
FramedSource* subsessionSource = subsession->readSource();
if (subsessionSource == NULL) continue;
if (subsessionSource->isCurrentlyAwaitingData()) continue;
AVISubsessionIOState* ioState
= (AVISubsessionIOState*)(subsession->miscPtr);
if (ioState == NULL) continue;
haveActiveSubsessions = True;
unsigned char* toPtr = ioState->fBuffer->dataEnd();
unsigned toSize = ioState->fBuffer->bytesAvailable();
subsessionSource->getNextFrame(toPtr, toSize,
afterGettingFrame, ioState,
onSourceClosure, ioState);
}
if (!haveActiveSubsessions) {
envir().setResultMsg("No subsessions are currently active");
return False;
}
return True;
}
void AVIFileSink
::afterGettingFrame(void* clientData, unsigned packetDataSize,
unsigned numTruncatedBytes,
struct timeval presentationTime,
unsigned /*durationInMicroseconds*/) {
AVISubsessionIOState* ioState = (AVISubsessionIOState*)clientData;
if (numTruncatedBytes > 0) {
ioState->envir() << "AVIFileSink::afterGettingFrame(): The input frame data was too large for our buffer. "
<< numTruncatedBytes
<< " bytes of trailing data was dropped! Correct this by increasing the \"bufferSize\" parameter in the \"createNew()\" call.\n";
}
ioState->afterGettingFrame(packetDataSize, presentationTime);
}
void AVIFileSink::onSourceClosure(void* clientData) {
AVISubsessionIOState* ioState = (AVISubsessionIOState*)clientData;
ioState->onSourceClosure();
}
void AVIFileSink::onSourceClosure1() {
// Check whether *all* of the subsession sources have closed.
// If not, do nothing for now:
MediaSubsessionIterator iter(fInputSession);
MediaSubsession* subsession;
while ((subsession = iter.next()) != NULL) {
AVISubsessionIOState* ioState
= (AVISubsessionIOState*)(subsession->miscPtr);
if (ioState == NULL) continue;
if (ioState->fOurSourceIsActive) return; // this source hasn't closed
}
completeOutputFile();
// Call our specified 'after' function:
if (fAfterFunc != NULL) {
(*fAfterFunc)(fAfterClientData);
}
}
void AVIFileSink::onRTCPBye(void* clientData) {
AVISubsessionIOState* ioState = (AVISubsessionIOState*)clientData;
struct timeval timeNow;
gettimeofday(&timeNow, NULL);
unsigned secsDiff
= timeNow.tv_sec - ioState->fOurSink.fStartTime.tv_sec;
MediaSubsession& subsession = ioState->fOurSubsession;
ioState->envir() << "Received RTCP \"BYE\" on \""
<< subsession.mediumName()
<< "/" << subsession.codecName()
<< "\" subsession (after "
<< secsDiff << " seconds)\n";
// Handle the reception of a RTCP "BYE" as if the source had closed:
ioState->onSourceClosure();
}
void AVIFileSink::addIndexRecord(AVIIndexRecord* newIndexRecord) {
if (fIndexRecordsHead == NULL) {
fIndexRecordsHead = newIndexRecord;
} else {
fIndexRecordsTail->next() = newIndexRecord;
}
fIndexRecordsTail = newIndexRecord;
++fNumIndexRecords;
}
void AVIFileSink::completeOutputFile() {
if (fHaveCompletedOutputFile || fOutFid == NULL) return;
// Update various AVI 'size' fields to take account of the codec data that
// we've now written to the file:
unsigned maxBytesPerSecond = 0;
unsigned numVideoFrames = 0;
unsigned numAudioFrames = 0;
//// Subsession-specific fields:
MediaSubsessionIterator iter(fInputSession);
MediaSubsession* subsession;
while ((subsession = iter.next()) != NULL) {
AVISubsessionIOState* ioState
= (AVISubsessionIOState*)(subsession->miscPtr);
if (ioState == NULL) continue;
maxBytesPerSecond += ioState->fMaxBytesPerSecond;
setWord(ioState->fSTRHFrameCountPosition, ioState->fNumFrames);
if (ioState->fIsVideo) numVideoFrames = ioState->fNumFrames;
else if (ioState->fIsAudio) numAudioFrames = ioState->fNumFrames;
}
//// Global fields:
add4ByteString("idx1");
addWord(fNumIndexRecords*4*4); // the size of all of the index records, which come next:
for (AVIIndexRecord* indexRecord = fIndexRecordsHead; indexRecord != NULL; indexRecord = indexRecord->next()) {
addWord(indexRecord->chunkId());
addWord(indexRecord->flags());
addWord(indexRecord->offset());
addWord(indexRecord->size());
}
fRIFFSizeValue += fNumBytesWritten;
setWord(fRIFFSizePosition, fRIFFSizeValue);
setWord(fAVIHMaxBytesPerSecondPosition, maxBytesPerSecond);
setWord(fAVIHFrameCountPosition,
numVideoFrames > 0 ? numVideoFrames : numAudioFrames);
fMoviSizeValue += fNumBytesWritten;
setWord(fMoviSizePosition, fMoviSizeValue);
// We're done:
fHaveCompletedOutputFile = True;
}
////////// AVISubsessionIOState implementation ///////////
AVISubsessionIOState::AVISubsessionIOState(AVIFileSink& sink,
MediaSubsession& subsession)
: fOurSink(sink), fOurSubsession(subsession),
fMaxBytesPerSecond(0), fIsVideo(False), fIsAudio(False), fIsByteSwappedAudio(False), fNumFrames(0) {
fBuffer = new SubsessionBuffer(fOurSink.fBufferSize);
fPrevBuffer = sink.fPacketLossCompensate
? new SubsessionBuffer(fOurSink.fBufferSize) : NULL;
FramedSource* subsessionSource = subsession.readSource();
fOurSourceIsActive = subsessionSource != NULL;
fPrevPresentationTime.tv_sec = 0;
fPrevPresentationTime.tv_usec = 0;
}
AVISubsessionIOState::~AVISubsessionIOState() {
delete fBuffer; delete fPrevBuffer;
}
void AVISubsessionIOState::setAVIstate(unsigned subsessionIndex) {
fIsVideo = strcmp(fOurSubsession.mediumName(), "video") == 0;
fIsAudio = strcmp(fOurSubsession.mediumName(), "audio") == 0;
if (fIsVideo) {
fAVISubsessionTag
= fourChar('0'+subsessionIndex/10,'0'+subsessionIndex%10,'d','c');
if (strcmp(fOurSubsession.codecName(), "JPEG") == 0) {
fAVICodecHandlerType = fourChar('m','j','p','g');
} else if (strcmp(fOurSubsession.codecName(), "MP4V-ES") == 0) {
fAVICodecHandlerType = fourChar('D','I','V','X');
} else if (strcmp(fOurSubsession.codecName(), "MPV") == 0) {
fAVICodecHandlerType = fourChar('m','p','g','1'); // what about MPEG-2?
} else if (strcmp(fOurSubsession.codecName(), "H263-1998") == 0 ||
strcmp(fOurSubsession.codecName(), "H263-2000") == 0) {
fAVICodecHandlerType = fourChar('H','2','6','3');
} else if (strcmp(fOurSubsession.codecName(), "H264") == 0) {
fAVICodecHandlerType = fourChar('H','2','6','4');
} else {
fAVICodecHandlerType = fourChar('?','?','?','?');
}
fAVIScale = 1; // ??? #####
fAVIRate = fOurSink.fMovieFPS; // ??? #####
fAVISize = fOurSink.fMovieWidth*fOurSink.fMovieHeight*3; // ??? #####
} else if (fIsAudio) {
fIsByteSwappedAudio = False; // by default
fAVISubsessionTag
= fourChar('0'+subsessionIndex/10,'0'+subsessionIndex%10,'w','b');
fAVICodecHandlerType = 1; // ??? ####
unsigned numChannels = fOurSubsession.numChannels();
fAVISamplingFrequency = fOurSubsession.rtpTimestampFrequency(); // default
if (strcmp(fOurSubsession.codecName(), "L16") == 0) {
fIsByteSwappedAudio = True; // need to byte-swap data before writing it
fWAVCodecTag = 0x0001;
fAVIScale = fAVISize = 2*numChannels; // 2 bytes/sample
fAVIRate = fAVISize*fAVISamplingFrequency;
} else if (strcmp(fOurSubsession.codecName(), "L8") == 0) {
fWAVCodecTag = 0x0001;
fAVIScale = fAVISize = numChannels; // 1 byte/sample
fAVIRate = fAVISize*fAVISamplingFrequency;
} else if (strcmp(fOurSubsession.codecName(), "PCMA") == 0) {
fWAVCodecTag = 0x0006;
fAVIScale = fAVISize = numChannels; // 1 byte/sample
fAVIRate = fAVISize*fAVISamplingFrequency;
} else if (strcmp(fOurSubsession.codecName(), "PCMU") == 0) {
fWAVCodecTag = 0x0007;
fAVIScale = fAVISize = numChannels; // 1 byte/sample
fAVIRate = fAVISize*fAVISamplingFrequency;
} else if (strcmp(fOurSubsession.codecName(), "MPA") == 0) {
fWAVCodecTag = 0x0050;
fAVIScale = fAVISize = 1;
fAVIRate = 0; // ??? #####
} else {
fWAVCodecTag = 0x0001; // ??? #####
fAVIScale = fAVISize = 1;
fAVIRate = 0; // ??? #####
}
} else { // unknown medium
fAVISubsessionTag
= fourChar('0'+subsessionIndex/10,'0'+subsessionIndex%10,'?','?');
fAVICodecHandlerType = 0;
fAVIScale = fAVISize = 1;
fAVIRate = 0; // ??? #####
}
}
void AVISubsessionIOState::afterGettingFrame(unsigned packetDataSize,
struct timeval presentationTime) {
// Begin by checking whether there was a gap in the RTP stream.
// If so, try to compensate for this (if desired):
unsigned short rtpSeqNum
= fOurSubsession.rtpSource()->curPacketRTPSeqNum();
if (fOurSink.fPacketLossCompensate && fPrevBuffer->bytesInUse() > 0) {
short seqNumGap = rtpSeqNum - fLastPacketRTPSeqNum;
for (short i = 1; i < seqNumGap; ++i) {
// Insert a copy of the previous frame, to compensate for the loss:
useFrame(*fPrevBuffer);
}
}
fLastPacketRTPSeqNum = rtpSeqNum;
// Now, continue working with the frame that we just got
if (fBuffer->bytesInUse() == 0) {
fBuffer->setPresentationTime(presentationTime);
}
fBuffer->addBytes(packetDataSize);
useFrame(*fBuffer);
if (fOurSink.fPacketLossCompensate) {
// Save this frame, in case we need it for recovery:
SubsessionBuffer* tmp = fPrevBuffer; // assert: != NULL
fPrevBuffer = fBuffer;
fBuffer = tmp;
}
fBuffer->reset(); // for the next input
// Now, try getting more frames:
fOurSink.continuePlaying();
}
void AVISubsessionIOState::useFrame(SubsessionBuffer& buffer) {
unsigned char* const frameSource = buffer.dataStart();
unsigned const frameSize = buffer.bytesInUse();
struct timeval const& presentationTime = buffer.presentationTime();
if (fPrevPresentationTime.tv_usec != 0||fPrevPresentationTime.tv_sec != 0) {
int uSecondsDiff
= (presentationTime.tv_sec - fPrevPresentationTime.tv_sec)*1000000
+ (presentationTime.tv_usec - fPrevPresentationTime.tv_usec);
if (uSecondsDiff > 0) {
unsigned bytesPerSecond = (unsigned)((frameSize*1000000.0)/uSecondsDiff);
if (bytesPerSecond > fMaxBytesPerSecond) {
fMaxBytesPerSecond = bytesPerSecond;
}
}
}
fPrevPresentationTime = presentationTime;
if (fIsByteSwappedAudio) {
// We need to swap the 16-bit audio samples from big-endian
// to little-endian order, before writing them to a file:
for (unsigned i = 0; i < frameSize; i += 2) {
unsigned char tmp = frameSource[i];
frameSource[i] = frameSource[i+1];
frameSource[i+1] = tmp;
}
}
// Add an index record for this frame:
AVIIndexRecord* newIndexRecord
= new AVIIndexRecord(fAVISubsessionTag, // chunk id
frameSource[0] == 0x67 ? 0x10 : 0, // flags
fOurSink.fMoviSizePosition + 8 + fOurSink.fNumBytesWritten, // offset (note: 8 == size + 'movi')
frameSize + 4); // size
fOurSink.addIndexRecord(newIndexRecord);
// Write the data into the file:
fOurSink.fNumBytesWritten += fOurSink.addWord(fAVISubsessionTag);
if (strcmp(fOurSubsession.codecName(), "H264") == 0) {
// Insert a 'start code' (0x00 0x00 0x00 0x01) in front of the frame:
fOurSink.fNumBytesWritten += fOurSink.addWord(4+frameSize);
fOurSink.fNumBytesWritten += fOurSink.addWord(fourChar(0x00, 0x00, 0x00, 0x01));//add start code
} else {
fOurSink.fNumBytesWritten += fOurSink.addWord(frameSize);
}
fwrite(frameSource, 1, frameSize, fOurSink.fOutFid);
fOurSink.fNumBytesWritten += frameSize;
// Pad to an even length:
if (frameSize%2 != 0) fOurSink.fNumBytesWritten += fOurSink.addByte(0);
++fNumFrames;
}
void AVISubsessionIOState::onSourceClosure() {
fOurSourceIsActive = False;
fOurSink.onSourceClosure1();
}
////////// AVI-specific implementation //////////
unsigned AVIFileSink::addWord(unsigned word) {
// Add "word" to the file in little-endian order:
addByte(word); addByte(word>>8);
addByte(word>>16); addByte(word>>24);
return 4;
}
unsigned AVIFileSink::addHalfWord(unsigned short halfWord) {
// Add "halfWord" to the file in little-endian order:
addByte((unsigned char)halfWord); addByte((unsigned char)(halfWord>>8));
return 2;
}
unsigned AVIFileSink::addZeroWords(unsigned numWords) {
for (unsigned i = 0; i < numWords; ++i) {
addWord(0);
}
return numWords*4;
}
unsigned AVIFileSink::add4ByteString(char const* str) {
addByte(str[0]); addByte(str[1]); addByte(str[2]);
addByte(str[3] == '\0' ? ' ' : str[3]); // e.g., for "AVI "
return 4;
}
void AVIFileSink::setWord(unsigned filePosn, unsigned size) {
do {
if (SeekFile64(fOutFid, filePosn, SEEK_SET) < 0) break;
addWord(size);
if (SeekFile64(fOutFid, 0, SEEK_END) < 0) break; // go back to where we were
return;
} while (0);
// One of the SeekFile64()s failed, probable because we're not a seekable file
envir() << "AVIFileSink::setWord(): SeekFile64 failed (err "
<< envir().getErrno() << ")\n";
}
// Methods for writing particular file headers. Note the following macros:
#define addFileHeader(tag,name) \
unsigned AVIFileSink::addFileHeader_##name() { \
add4ByteString("" #tag ""); \
unsigned headerSizePosn = (unsigned)TellFile64(fOutFid); addWord(0); \
add4ByteString("" #name ""); \
unsigned ignoredSize = 8;/*don't include size of tag or size fields*/ \
unsigned size = 12
#define addFileHeader1(name) \
unsigned AVIFileSink::addFileHeader_##name() { \
add4ByteString("" #name ""); \
unsigned headerSizePosn = (unsigned)TellFile64(fOutFid); addWord(0); \
unsigned ignoredSize = 8;/*don't include size of name or size fields*/ \
unsigned size = 8
#define addFileHeaderEnd \
setWord(headerSizePosn, size-ignoredSize); \
return size; \
}
addFileHeader(RIFF,AVI);
size += addFileHeader_hdrl();
size += addFileHeader_movi();
fRIFFSizePosition = headerSizePosn;
fRIFFSizeValue = size-ignoredSize;
addFileHeaderEnd;
addFileHeader(LIST,hdrl);
size += addFileHeader_avih();
// Then, add a "strl" header for each subsession (stream):
// (Make the video subsession (if any) come before the audio subsession.)
unsigned subsessionCount = 0;
MediaSubsessionIterator iter(fInputSession);
MediaSubsession* subsession;
while ((subsession = iter.next()) != NULL) {
fCurrentIOState = (AVISubsessionIOState*)(subsession->miscPtr);
if (fCurrentIOState == NULL) continue;
if (strcmp(subsession->mediumName(), "video") != 0) continue;
fCurrentIOState->setAVIstate(subsessionCount++);
size += addFileHeader_strl();
}
iter.reset();
while ((subsession = iter.next()) != NULL) {
fCurrentIOState = (AVISubsessionIOState*)(subsession->miscPtr);
if (fCurrentIOState == NULL) continue;
if (strcmp(subsession->mediumName(), "video") == 0) continue;
fCurrentIOState->setAVIstate(subsessionCount++);
size += addFileHeader_strl();
}
// Then add another JUNK entry
++fJunkNumber;
size += addFileHeader_JUNK();
addFileHeaderEnd;
#define AVIF_HASINDEX 0x00000010 // Index at end of file?
#define AVIF_MUSTUSEINDEX 0x00000020
#define AVIF_ISINTERLEAVED 0x00000100
#define AVIF_TRUSTCKTYPE 0x00000800 // Use CKType to find key frames?
#define AVIF_WASCAPTUREFILE 0x00010000
#define AVIF_COPYRIGHTED 0x00020000
addFileHeader1(avih);
unsigned usecPerFrame = fMovieFPS == 0 ? 0 : 1000000/fMovieFPS;
size += addWord(usecPerFrame); // dwMicroSecPerFrame
fAVIHMaxBytesPerSecondPosition = (unsigned)TellFile64(fOutFid);
size += addWord(0); // dwMaxBytesPerSec (fill in later)
size += addWord(0); // dwPaddingGranularity
size += addWord(AVIF_TRUSTCKTYPE|AVIF_HASINDEX|AVIF_ISINTERLEAVED); // dwFlags
fAVIHFrameCountPosition = (unsigned)TellFile64(fOutFid);
size += addWord(0); // dwTotalFrames (fill in later)
size += addWord(0); // dwInitialFrame
size += addWord(fNumSubsessions); // dwStreams
size += addWord(fBufferSize); // dwSuggestedBufferSize
size += addWord(fMovieWidth); // dwWidth
size += addWord(fMovieHeight); // dwHeight
size += addZeroWords(4); // dwReserved
addFileHeaderEnd;
addFileHeader(LIST,strl);
size += addFileHeader_strh();
size += addFileHeader_strf();
fJunkNumber = 0;
size += addFileHeader_JUNK();
addFileHeaderEnd;
addFileHeader1(strh);
size += add4ByteString(fCurrentIOState->fIsVideo ? "vids" :
fCurrentIOState->fIsAudio ? "auds" :
"????"); // fccType
size += addWord(fCurrentIOState->fAVICodecHandlerType); // fccHandler
size += addWord(0); // dwFlags
size += addWord(0); // wPriority + wLanguage
size += addWord(0); // dwInitialFrames
size += addWord(fCurrentIOState->fAVIScale); // dwScale
size += addWord(fCurrentIOState->fAVIRate); // dwRate
size += addWord(0); // dwStart
fCurrentIOState->fSTRHFrameCountPosition = (unsigned)TellFile64(fOutFid);
size += addWord(0); // dwLength (fill in later)
size += addWord(fBufferSize); // dwSuggestedBufferSize
size += addWord((unsigned)-1); // dwQuality
size += addWord(fCurrentIOState->fAVISize); // dwSampleSize
size += addWord(0); // rcFrame (start)
if (fCurrentIOState->fIsVideo) {
size += addHalfWord(fMovieWidth);
size += addHalfWord(fMovieHeight);
} else {
size += addWord(0);
}
addFileHeaderEnd;
addFileHeader1(strf);
if (fCurrentIOState->fIsVideo) {
// Add a BITMAPINFO header:
unsigned extraDataSize = 0;
size += addWord(10*4 + extraDataSize); // size
size += addWord(fMovieWidth);
size += addWord(fMovieHeight);
size += addHalfWord(1); // planes
size += addHalfWord(24); // bits-per-sample #####
size += addWord(fCurrentIOState->fAVICodecHandlerType); // compr. type
size += addWord(fCurrentIOState->fAVISize);
size += addZeroWords(4); // ??? #####
// Later, add extra data here (if any) #####
} else if (fCurrentIOState->fIsAudio) {
// Add a WAVFORMATEX header:
size += addHalfWord(fCurrentIOState->fWAVCodecTag);
unsigned numChannels = fCurrentIOState->fOurSubsession.numChannels();
size += addHalfWord(numChannels);
size += addWord(fCurrentIOState->fAVISamplingFrequency);
size += addWord(fCurrentIOState->fAVIRate); // bytes per second
size += addHalfWord(fCurrentIOState->fAVISize); // block alignment
unsigned bitsPerSample = (fCurrentIOState->fAVISize*8)/numChannels;
size += addHalfWord(bitsPerSample);
if (strcmp(fCurrentIOState->fOurSubsession.codecName(), "MPA") == 0) {
// Assume MPEG layer II audio (not MP3): #####
size += addHalfWord(22); // wav_extra_size
size += addHalfWord(2); // fwHeadLayer
size += addWord(8*fCurrentIOState->fAVIRate); // dwHeadBitrate #####
size += addHalfWord(numChannels == 2 ? 1: 8); // fwHeadMode
size += addHalfWord(0); // fwHeadModeExt
size += addHalfWord(1); // wHeadEmphasis
size += addHalfWord(16); // fwHeadFlags
size += addWord(0); // dwPTSLow
size += addWord(0); // dwPTSHigh
}
}
addFileHeaderEnd;
#define AVI_MASTER_INDEX_SIZE 256
addFileHeader1(JUNK);
if (fJunkNumber == 0) {
size += addHalfWord(4); // wLongsPerEntry
size += addHalfWord(0); // bIndexSubType + bIndexType
size += addWord(0); // nEntriesInUse #####
size += addWord(fCurrentIOState->fAVISubsessionTag); // dwChunkId
size += addZeroWords(2); // dwReserved
size += addZeroWords(AVI_MASTER_INDEX_SIZE*4);
} else {
size += add4ByteString("odml");
size += add4ByteString("dmlh");
unsigned wtfCount = 248;
size += addWord(wtfCount); // ??? #####
size += addZeroWords(wtfCount/4);
}
addFileHeaderEnd;
addFileHeader(LIST,movi);
fMoviSizePosition = headerSizePosn;
fMoviSizeValue = size-ignoredSize;
addFileHeaderEnd;
live/liveMedia/MPEG4ESVideoRTPSink.cpp 000444 001751 000000 00000012717 12265042432 017620 0 ustar 00rsf wheel 000000 000000 /**********
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the
Free Software Foundation; either version 2.1 of the License, or (at your
option) any later version. (See .)
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
more details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
**********/
// "liveMedia"
// Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved.
// RTP sink for MPEG-4 Elementary Stream video (RFC 3016)
// Implementation
#include "MPEG4ESVideoRTPSink.hh"
#include "MPEG4VideoStreamFramer.hh"
#include "MPEG4LATMAudioRTPSource.hh" // for "parseGeneralConfigStr()"
MPEG4ESVideoRTPSink
::MPEG4ESVideoRTPSink(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, u_int32_t rtpTimestampFrequency,
u_int8_t profileAndLevelIndication, char const* configStr)
: VideoRTPSink(env, RTPgs, rtpPayloadFormat, rtpTimestampFrequency, "MP4V-ES"),
fVOPIsPresent(False), fProfileAndLevelIndication(profileAndLevelIndication), fFmtpSDPLine(NULL) {
fConfigBytes = parseGeneralConfigStr(configStr, fNumConfigBytes);
}
MPEG4ESVideoRTPSink::~MPEG4ESVideoRTPSink() {
delete[] fFmtpSDPLine;
delete[] fConfigBytes;
}
MPEG4ESVideoRTPSink*
MPEG4ESVideoRTPSink::createNew(UsageEnvironment& env,
Groupsock* RTPgs, unsigned char rtpPayloadFormat,
u_int32_t rtpTimestampFrequency) {
return new MPEG4ESVideoRTPSink(env, RTPgs, rtpPayloadFormat, rtpTimestampFrequency);
}
MPEG4ESVideoRTPSink*
MPEG4ESVideoRTPSink::createNew(UsageEnvironment& env,
Groupsock* RTPgs, unsigned char rtpPayloadFormat, u_int32_t rtpTimestampFrequency,
u_int8_t profileAndLevelIndication, char const* configStr) {
return new MPEG4ESVideoRTPSink(env, RTPgs, rtpPayloadFormat, rtpTimestampFrequency, profileAndLevelIndication, configStr);
}
Boolean MPEG4ESVideoRTPSink::sourceIsCompatibleWithUs(MediaSource& source) {
// Our source must be an appropriate framer:
return source.isMPEG4VideoStreamFramer();
}
#define VOP_START_CODE 0x000001B6
void MPEG4ESVideoRTPSink
::doSpecialFrameHandling(unsigned fragmentationOffset,
unsigned char* frameStart,
unsigned numBytesInFrame,
struct timeval framePresentationTime,
unsigned numRemainingBytes) {
if (fragmentationOffset == 0) {
// Begin by inspecting the 4-byte code at the start of the frame:
if (numBytesInFrame < 4) return; // shouldn't happen
unsigned startCode = (frameStart[0]<<24) | (frameStart[1]<<16)
| (frameStart[2]<<8) | frameStart[3];
fVOPIsPresent = startCode == VOP_START_CODE;
}
// Set the RTP 'M' (marker) bit iff this frame ends a VOP
// (and there are no fragments remaining).
// This relies on the source being a "MPEG4VideoStreamFramer".
MPEG4VideoStreamFramer* framerSource = (MPEG4VideoStreamFramer*)fSource;
if (framerSource != NULL && framerSource->pictureEndMarker()
&& numRemainingBytes == 0) {
setMarkerBit();
framerSource->pictureEndMarker() = False;
}
// Also set the RTP timestamp. (We do this for each frame
// in the packet, to ensure that the timestamp of the VOP (if present)
// gets used.)
setTimestamp(framePresentationTime);
}
Boolean MPEG4ESVideoRTPSink::allowFragmentationAfterStart() const {
return True;
}
Boolean MPEG4ESVideoRTPSink
::frameCanAppearAfterPacketStart(unsigned char const* /*frameStart*/,
unsigned /*numBytesInFrame*/) const {
// Once we've packed a VOP into the packet, then no other
// frame can be packed into it:
return !fVOPIsPresent;
}
char const* MPEG4ESVideoRTPSink::auxSDPLine() {
// Generate a new "a=fmtp:" line each time, using our own 'configuration' information (if we have it),
// otherwise parameters from our framer source (in case they've changed since the last time that
// we were called):
unsigned configLength = fNumConfigBytes;
unsigned char* config = fConfigBytes;
if (fProfileAndLevelIndication == 0 || config == NULL) {
// We need to get this information from our framer source:
MPEG4VideoStreamFramer* framerSource = (MPEG4VideoStreamFramer*)fSource;
if (framerSource == NULL) return NULL; // we don't yet have a source
fProfileAndLevelIndication = framerSource->profile_and_level_indication();
if (fProfileAndLevelIndication == 0) return NULL; // our source isn't ready
config = framerSource->getConfigBytes(configLength);
if (config == NULL) return NULL; // our source isn't ready
}
char const* fmtpFmt =
"a=fmtp:%d "
"profile-level-id=%d;"
"config=";
unsigned fmtpFmtSize = strlen(fmtpFmt)
+ 3 /* max char len */
+ 3 /* max char len */
+ 2*configLength /* 2*, because each byte prints as 2 chars */
+ 2 /* trailing \r\n */;
char* fmtp = new char[fmtpFmtSize];
sprintf(fmtp, fmtpFmt, rtpPayloadType(), fProfileAndLevelIndication);
char* endPtr = &fmtp[strlen(fmtp)];
for (unsigned i = 0; i < configLength; ++i) {
sprintf(endPtr, "%02X", config[i]);
endPtr += 2;
}
sprintf(endPtr, "\r\n");
delete[] fFmtpSDPLine;
fFmtpSDPLine = strDup(fmtp);
delete[] fmtp;
return fFmtpSDPLine;
}
live/liveMedia/DigestAuthentication.cpp 000444 001751 000000 00000011725 12265042432 020467 0 ustar 00rsf wheel 000000 000000 /**********
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the
Free Software Foundation; either version 2.1 of the License, or (at your
option) any later version. (See .)
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
more details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
**********/
// "liveMedia"
// Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved.
// A class used for digest authentication.
// Implementation
#include "DigestAuthentication.hh"
#include "ourMD5.hh"
#include
#include // for gettimeofday()
#include
#include
#include
Authenticator::Authenticator() {
assign(NULL, NULL, NULL, NULL, False);
}
Authenticator::Authenticator(char const* username, char const* password, Boolean passwordIsMD5) {
assign(NULL, NULL, username, password, passwordIsMD5);
}
Authenticator::Authenticator(const Authenticator& orig) {
assign(orig.realm(), orig.nonce(), orig.username(), orig.password(), orig.fPasswordIsMD5);
}
Authenticator& Authenticator::operator=(const Authenticator& rightSide) {
if (&rightSide != this) {
reset();
assign(rightSide.realm(), rightSide.nonce(),
rightSide.username(), rightSide.password(), rightSide.fPasswordIsMD5);
}
return *this;
}
Authenticator::~Authenticator() {
reset();
}
void Authenticator::reset() {
resetRealmAndNonce();
resetUsernameAndPassword();
}
void Authenticator::setRealmAndNonce(char const* realm, char const* nonce) {
resetRealmAndNonce();
assignRealmAndNonce(realm, nonce);
}
void Authenticator::setRealmAndRandomNonce(char const* realm) {
resetRealmAndNonce();
// Construct data to seed the random nonce:
struct {
struct timeval timestamp;
unsigned counter;
} seedData;
gettimeofday(&seedData.timestamp, NULL);
static unsigned counter = 0;
seedData.counter = ++counter;
// Use MD5 to compute a 'random' nonce from this seed data:
char nonceBuf[33];
our_MD5Data((unsigned char*)(&seedData), sizeof seedData, nonceBuf);
assignRealmAndNonce(realm, nonceBuf);
}
void Authenticator::setUsernameAndPassword(char const* username,
char const* password,
Boolean passwordIsMD5) {
resetUsernameAndPassword();
assignUsernameAndPassword(username, password, passwordIsMD5);
}
char const* Authenticator::computeDigestResponse(char const* cmd,
char const* url) const {
// The "response" field is computed as:
// md5(md5(::)::md5(:))
// or, if "fPasswordIsMD5" is True:
// md5(::md5(:))
char ha1Buf[33];
if (fPasswordIsMD5) {
strncpy(ha1Buf, password(), 32);
ha1Buf[32] = '\0'; // just in case
} else {
unsigned const ha1DataLen = strlen(username()) + 1
+ strlen(realm()) + 1 + strlen(password());
unsigned char* ha1Data = new unsigned char[ha1DataLen+1];
sprintf((char*)ha1Data, "%s:%s:%s", username(), realm(), password());
our_MD5Data(ha1Data, ha1DataLen, ha1Buf);
delete[] ha1Data;
}
unsigned const ha2DataLen = strlen(cmd) + 1 + strlen(url);
unsigned char* ha2Data = new unsigned char[ha2DataLen+1];
sprintf((char*)ha2Data, "%s:%s", cmd, url);
char ha2Buf[33];
our_MD5Data(ha2Data, ha2DataLen, ha2Buf);
delete[] ha2Data;
unsigned const digestDataLen
= 32 + 1 + strlen(nonce()) + 1 + 32;
unsigned char* digestData = new unsigned char[digestDataLen+1];
sprintf((char*)digestData, "%s:%s:%s",
ha1Buf, nonce(), ha2Buf);
char const* result = our_MD5Data(digestData, digestDataLen, NULL);
delete[] digestData;
return result;
}
void Authenticator::reclaimDigestResponse(char const* responseStr) const {
delete[](char*)responseStr;
}
void Authenticator::resetRealmAndNonce() {
delete[] fRealm; fRealm = NULL;
delete[] fNonce; fNonce = NULL;
}
void Authenticator::resetUsernameAndPassword() {
delete[] fUsername; fUsername = NULL;
delete[] fPassword; fPassword = NULL;
fPasswordIsMD5 = False;
}
void Authenticator::assignRealmAndNonce(char const* realm, char const* nonce) {
fRealm = strDup(realm);
fNonce = strDup(nonce);
}
void Authenticator::assignUsernameAndPassword(char const* username, char const* password, Boolean passwordIsMD5) {
fUsername = strDup(username);
fPassword = strDup(password);
fPasswordIsMD5 = passwordIsMD5;
}
void Authenticator::assign(char const* realm, char const* nonce,
char const* username, char const* password, Boolean passwordIsMD5) {
assignRealmAndNonce(realm, nonce);
assignUsernameAndPassword(username, password, passwordIsMD5);
}
live/liveMedia/PassiveServerMediaSubsession.cpp 000444 001751 000000 00000017425 12265042432 022172 0 ustar 00rsf wheel 000000 000000 /**********
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the
Free Software Foundation; either version 2.1 of the License, or (at your
option) any later version. (See .)
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
more details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
**********/
// "liveMedia"
// Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved.
// A 'ServerMediaSubsession' object that represents an existing
// 'RTPSink', rather than one that creates new 'RTPSink's on demand.
// Implementation
#include "PassiveServerMediaSubsession.hh"
#include
////////// PassiveServerMediaSubsession //////////
PassiveServerMediaSubsession*
PassiveServerMediaSubsession::createNew(RTPSink& rtpSink,
RTCPInstance* rtcpInstance) {
return new PassiveServerMediaSubsession(rtpSink, rtcpInstance);
}
PassiveServerMediaSubsession
::PassiveServerMediaSubsession(RTPSink& rtpSink, RTCPInstance* rtcpInstance)
: ServerMediaSubsession(rtpSink.envir()),
fSDPLines(NULL), fRTPSink(rtpSink), fRTCPInstance(rtcpInstance) {
fClientRTCPSourceRecords = HashTable::create(ONE_WORD_HASH_KEYS);
}
class RTCPSourceRecord {
public:
RTCPSourceRecord(netAddressBits addr, Port const& port)
: addr(addr), port(port) {
}
netAddressBits addr;
Port port;
};
PassiveServerMediaSubsession::~PassiveServerMediaSubsession() {
delete[] fSDPLines;
// Clean out the RTCPSourceRecord table:
while (1) {
RTCPSourceRecord* source = (RTCPSourceRecord*)(fClientRTCPSourceRecords->RemoveNext());
if (source == NULL) break;
delete source;
}
delete fClientRTCPSourceRecords;
}
char const*
PassiveServerMediaSubsession::sdpLines() {
if (fSDPLines == NULL ) {
// Construct a set of SDP lines that describe this subsession:
// Use the components from "rtpSink":
Groupsock const& gs = fRTPSink.groupsockBeingUsed();
AddressString groupAddressStr(gs.groupAddress());
unsigned short portNum = ntohs(gs.port().num());
unsigned char ttl = gs.ttl();
unsigned char rtpPayloadType = fRTPSink.rtpPayloadType();
char const* mediaType = fRTPSink.sdpMediaType();
unsigned estBitrate
= fRTCPInstance == NULL ? 50 : fRTCPInstance->totSessionBW();
char* rtpmapLine = fRTPSink.rtpmapLine();
char const* rangeLine = rangeSDPLine();
char const* auxSDPLine = fRTPSink.auxSDPLine();
if (auxSDPLine == NULL) auxSDPLine = "";
char const* const sdpFmt =
"m=%s %d RTP/AVP %d\r\n"
"c=IN IP4 %s/%d\r\n"
"b=AS:%u\r\n"
"%s"
"%s"
"%s"
"a=control:%s\r\n";
unsigned sdpFmtSize = strlen(sdpFmt)
+ strlen(mediaType) + 5 /* max short len */ + 3 /* max char len */
+ strlen(groupAddressStr.val()) + 3 /* max char len */
+ 20 /* max int len */
+ strlen(rtpmapLine)
+ strlen(rangeLine)
+ strlen(auxSDPLine)
+ strlen(trackId());
char* sdpLines = new char[sdpFmtSize];
sprintf(sdpLines, sdpFmt,
mediaType, // m=
portNum, // m=
rtpPayloadType, // m=
groupAddressStr.val(), // c=
ttl, // c= TTL
estBitrate, // b=AS:
rtpmapLine, // a=rtpmap:... (if present)
rangeLine, // a=range:... (if present)
auxSDPLine, // optional extra SDP line
trackId()); // a=control:
delete[] (char*)rangeLine; delete[] rtpmapLine;
fSDPLines = strDup(sdpLines);
delete[] sdpLines;
}
return fSDPLines;
}
void PassiveServerMediaSubsession
::getStreamParameters(unsigned clientSessionId,
netAddressBits clientAddress,
Port const& /*clientRTPPort*/,
Port const& clientRTCPPort,
int /*tcpSocketNum*/,
unsigned char /*rtpChannelId*/,
unsigned char /*rtcpChannelId*/,
netAddressBits& destinationAddress,
u_int8_t& destinationTTL,
Boolean& isMulticast,
Port& serverRTPPort,
Port& serverRTCPPort,
void*& streamToken) {
isMulticast = True;
Groupsock& gs = fRTPSink.groupsockBeingUsed();
if (destinationTTL == 255) destinationTTL = gs.ttl();
if (destinationAddress == 0) { // normal case
destinationAddress = gs.groupAddress().s_addr;
} else { // use the client-specified destination address instead:
struct in_addr destinationAddr; destinationAddr.s_addr = destinationAddress;
gs.changeDestinationParameters(destinationAddr, 0, destinationTTL);
if (fRTCPInstance != NULL) {
Groupsock* rtcpGS = fRTCPInstance->RTCPgs();
rtcpGS->changeDestinationParameters(destinationAddr, 0, destinationTTL);
}
}
serverRTPPort = gs.port();
if (fRTCPInstance != NULL) {
Groupsock* rtcpGS = fRTCPInstance->RTCPgs();
serverRTCPPort = rtcpGS->port();
}
streamToken = NULL; // not used
// Make a record of this client's source - for RTCP RR handling:
RTCPSourceRecord* source = new RTCPSourceRecord(clientAddress, clientRTCPPort);
fClientRTCPSourceRecords->Add((char const*)clientSessionId, source);
}
void PassiveServerMediaSubsession::startStream(unsigned clientSessionId,
void* /*streamToken*/,
TaskFunc* rtcpRRHandler,
void* rtcpRRHandlerClientData,
unsigned short& rtpSeqNum,
unsigned& rtpTimestamp,
ServerRequestAlternativeByteHandler* /*serverRequestAlternativeByteHandler*/,
void* /*serverRequestAlternativeByteHandlerClientData*/) {
rtpSeqNum = fRTPSink.currentSeqNo();
rtpTimestamp = fRTPSink.presetNextTimestamp();
// Try to use a big send buffer for RTP - at least 0.1 second of
// specified bandwidth and at least 50 KB
unsigned streamBitrate = fRTCPInstance == NULL ? 50 : fRTCPInstance->totSessionBW(); // in kbps
unsigned rtpBufSize = streamBitrate * 25 / 2; // 1 kbps * 0.1 s = 12.5 bytes
if (rtpBufSize < 50 * 1024) rtpBufSize = 50 * 1024;
increaseSendBufferTo(envir(), fRTPSink.groupsockBeingUsed().socketNum(), rtpBufSize);
if (fRTCPInstance != NULL) {
// Hack: Send a RTCP "SR" packet now, so that receivers will (likely) be able to
// get RTCP-synchronized presentation times immediately:
fRTCPInstance->sendReport();
// Set up the handler for incoming RTCP "RR" packets from this client:
RTCPSourceRecord* source = (RTCPSourceRecord*)(fClientRTCPSourceRecords->Lookup((char const*)clientSessionId));
if (source != NULL) {
fRTCPInstance->setSpecificRRHandler(source->addr, source->port,
rtcpRRHandler, rtcpRRHandlerClientData);
}
}
}
float PassiveServerMediaSubsession::getCurrentNPT(void* streamToken) {
// Return the elapsed time between our "RTPSink"s creation time, and the current time:
struct timeval const& creationTime = fRTPSink.creationTime(); // alias
struct timeval timeNow;
gettimeofday(&timeNow, NULL);
return (float)(timeNow.tv_sec - creationTime.tv_sec + (timeNow.tv_usec - creationTime.tv_usec)/1000000.0);
}
void PassiveServerMediaSubsession::deleteStream(unsigned clientSessionId, void*& /*streamToken*/) {
// Lookup and remove the 'RTCPSourceRecord' for this client. Also turn off RTCP "RR" handling:
RTCPSourceRecord* source = (RTCPSourceRecord*)(fClientRTCPSourceRecords->Lookup((char const*)clientSessionId));
if (source != NULL) {
if (fRTCPInstance != NULL) {
fRTCPInstance->unsetSpecificRRHandler(source->addr, source->port);
}
fClientRTCPSourceRecords->Remove((char const*)clientSessionId);
delete source;
}
}
live/liveMedia/AC3AudioRTPSource.cpp 000444 001751 000000 00000004313 12265042432 017442 0 ustar 00rsf wheel 000000 000000 /**********
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the
Free Software Foundation; either version 2.1 of the License, or (at your
option) any later version. (See .)
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
more details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
**********/
// "liveMedia"
// Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved.
// AC3 Audio RTP Sources
// Implementation
#include "AC3AudioRTPSource.hh"
AC3AudioRTPSource*
AC3AudioRTPSource::createNew(UsageEnvironment& env,
Groupsock* RTPgs,
unsigned char rtpPayloadFormat,
unsigned rtpTimestampFrequency) {
return new AC3AudioRTPSource(env, RTPgs, rtpPayloadFormat,
rtpTimestampFrequency);
}
AC3AudioRTPSource::AC3AudioRTPSource(UsageEnvironment& env,
Groupsock* rtpGS,
unsigned char rtpPayloadFormat,
unsigned rtpTimestampFrequency)
: MultiFramedRTPSource(env, rtpGS,
rtpPayloadFormat, rtpTimestampFrequency) {
}
AC3AudioRTPSource::~AC3AudioRTPSource() {
}
Boolean AC3AudioRTPSource
::processSpecialHeader(BufferedPacket* packet,
unsigned& resultSpecialHeaderSize) {
unsigned char* headerStart = packet->data();
unsigned packetSize = packet->dataSize();
// There's a 2-byte payload header at the beginning:
if (packetSize < 2) return False;
resultSpecialHeaderSize = 2;
unsigned char FT = headerStart[0]&0x03;
fCurrentPacketBeginsFrame = FT != 3;
// The RTP "M" (marker) bit indicates the last fragment of a frame.
// In case the sender did not set the "M" bit correctly, we also test for FT == 0:
fCurrentPacketCompletesFrame = packet->rtpMarkerBit() || FT == 0;
return True;
}
char const* AC3AudioRTPSource::MIMEtype() const {
return "audio/AC3";
}
live/liveMedia/GSMAudioRTPSink.cpp 000444 001751 000000 00000002657 12265042432 017177 0 ustar 00rsf wheel 000000 000000 /**********
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the
Free Software Foundation; either version 2.1 of the License, or (at your
option) any later version. (See .)
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
more details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
**********/
// "liveMedia"
// Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved.
// RTP sink for GSM audio
// Implementation
#include "GSMAudioRTPSink.hh"
GSMAudioRTPSink::GSMAudioRTPSink(UsageEnvironment& env, Groupsock* RTPgs)
: AudioRTPSink(env, RTPgs, 3, 8000, "GSM") {
}
GSMAudioRTPSink::~GSMAudioRTPSink() {
}
GSMAudioRTPSink*
GSMAudioRTPSink::createNew(UsageEnvironment& env, Groupsock* RTPgs) {
return new GSMAudioRTPSink(env, RTPgs);
}
Boolean GSMAudioRTPSink
::frameCanAppearAfterPacketStart(unsigned char const* /*frameStart*/,
unsigned /*numBytesInFrame*/) const {
// Allow at most 5 frames in a single packet:
return numFramesUsedSoFar() < 5;
}
live/liveMedia/BasicUDPSource.cpp 000444 001751 000000 00000005604 12265042432 017122 0 ustar 00rsf wheel 000000 000000 /**********
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the
Free Software Foundation; either version 2.1 of the License, or (at your
option) any later version. (See .)
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
more details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
**********/
// "liveMedia"
// Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved.
// A simple UDP source, where every UDP payload is a complete frame
// Implementation
#include "BasicUDPSource.hh"
#include
BasicUDPSource* BasicUDPSource::createNew(UsageEnvironment& env,
Groupsock* inputGS) {
return new BasicUDPSource(env, inputGS);
}
BasicUDPSource::BasicUDPSource(UsageEnvironment& env, Groupsock* inputGS)
: FramedSource(env), fInputGS(inputGS), fHaveStartedReading(False) {
// Try to use a large receive buffer (in the OS):
increaseReceiveBufferTo(env, inputGS->socketNum(), 50*1024);
// Make the socket non-blocking, even though it will be read from only asynchronously, when packets arrive.
// The reason for this is that, in some OSs, reads on a blocking socket can (allegedly) sometimes block,
// even if the socket was previously reported (e.g., by "select()") as having data available.
// (This can supposedly happen if the UDP checksum fails, for example.)
makeSocketNonBlocking(fInputGS->socketNum());
}
BasicUDPSource::~BasicUDPSource(){
envir().taskScheduler().turnOffBackgroundReadHandling(fInputGS->socketNum());
}
void BasicUDPSource::doGetNextFrame() {
if (!fHaveStartedReading) {
// Await incoming packets:
envir().taskScheduler().turnOnBackgroundReadHandling(fInputGS->socketNum(),
(TaskScheduler::BackgroundHandlerProc*)&incomingPacketHandler, this);
fHaveStartedReading = True;
}
}
void BasicUDPSource::doStopGettingFrames() {
envir().taskScheduler().turnOffBackgroundReadHandling(fInputGS->socketNum());
fHaveStartedReading = False;
}
void BasicUDPSource::incomingPacketHandler(BasicUDPSource* source, int /*mask*/){
source->incomingPacketHandler1();
}
void BasicUDPSource::incomingPacketHandler1() {
if (!isCurrentlyAwaitingData()) return; // we're not ready for the data yet
// Read the packet into our desired destination:
struct sockaddr_in fromAddress;
if (!fInputGS->handleRead(fTo, fMaxSize, fFrameSize, fromAddress)) return;
// Tell our client that we have new data:
afterGetting(this); // we're preceded by a net read; no infinite recursion
}
live/liveMedia/H264VideoRTPSource.cpp 000444 001751 000000 00000014374 12265042432 017534 0 ustar 00rsf wheel 000000 000000 /**********
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the
Free Software Foundation; either version 2.1 of the License, or (at your
option) any later version. (See .)
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
more details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
**********/
// "liveMedia"
// Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved.
// H.264 Video RTP Sources
// Implementation
#include "H264VideoRTPSource.hh"
#include "Base64.hh"
////////// H264BufferedPacket and H264BufferedPacketFactory //////////
class H264BufferedPacket: public BufferedPacket {
public:
H264BufferedPacket(H264VideoRTPSource& ourSource);
virtual ~H264BufferedPacket();
private: // redefined virtual functions
virtual unsigned nextEnclosedFrameSize(unsigned char*& framePtr,
unsigned dataSize);
private:
H264VideoRTPSource& fOurSource;
};
class H264BufferedPacketFactory: public BufferedPacketFactory {
private: // redefined virtual functions
virtual BufferedPacket* createNewPacket(MultiFramedRTPSource* ourSource);
};
///////// H264VideoRTPSource implementation ////////
H264VideoRTPSource*
H264VideoRTPSource::createNew(UsageEnvironment& env, Groupsock* RTPgs,
unsigned char rtpPayloadFormat,
unsigned rtpTimestampFrequency) {
return new H264VideoRTPSource(env, RTPgs, rtpPayloadFormat,
rtpTimestampFrequency);
}
H264VideoRTPSource
::H264VideoRTPSource(UsageEnvironment& env, Groupsock* RTPgs,
unsigned char rtpPayloadFormat,
unsigned rtpTimestampFrequency)
: MultiFramedRTPSource(env, RTPgs, rtpPayloadFormat, rtpTimestampFrequency,
new H264BufferedPacketFactory) {
}
H264VideoRTPSource::~H264VideoRTPSource() {
}
Boolean H264VideoRTPSource
::processSpecialHeader(BufferedPacket* packet,
unsigned& resultSpecialHeaderSize) {
unsigned char* headerStart = packet->data();
unsigned packetSize = packet->dataSize();
// The header has a minimum size of 0, since the NAL header is used
// as a payload header
unsigned expectedHeaderSize = 0;
// Check if the type field is 28 (FU-A) or 29 (FU-B)
fCurPacketNALUnitType = (headerStart[0]&0x1F);
switch (fCurPacketNALUnitType) {
case 24: { // STAP-A
expectedHeaderSize = 1; // discard the type byte
break;
}
case 25: case 26: case 27: { // STAP-B, MTAP16, or MTAP24
expectedHeaderSize = 3; // discard the type byte, and the initial DON
break;
}
case 28: case 29: { // // FU-A or FU-B
// For these NALUs, the first two bytes are the FU indicator and the FU header.
// If the start bit is set, we reconstruct the original NAL header:
unsigned char startBit = headerStart[1]&0x80;
unsigned char endBit = headerStart[1]&0x40;
if (startBit) {
expectedHeaderSize = 1;
if (packetSize < expectedHeaderSize) return False;
headerStart[1] = (headerStart[0]&0xE0)+(headerStart[1]&0x1F);
fCurrentPacketBeginsFrame = True;
} else {
// If the startbit is not set, both the FU indicator and header
// can be discarded
expectedHeaderSize = 2;
if (packetSize < expectedHeaderSize) return False;
fCurrentPacketBeginsFrame = False;
}
fCurrentPacketCompletesFrame = (endBit != 0);
break;
}
default: {
// This packet contains one or more complete, decodable NAL units
fCurrentPacketBeginsFrame = fCurrentPacketCompletesFrame = True;
break;
}
}
resultSpecialHeaderSize = expectedHeaderSize;
return True;
}
char const* H264VideoRTPSource::MIMEtype() const {
return "video/H264";
}
SPropRecord* parseSPropParameterSets(char const* sPropParameterSetsStr,
// result parameter:
unsigned& numSPropRecords) {
// Make a copy of the input string, so we can replace the commas with '\0's:
char* inStr = strDup(sPropParameterSetsStr);
if (inStr == NULL) {
numSPropRecords = 0;
return NULL;
}
// Count the number of commas (and thus the number of parameter sets):
numSPropRecords = 1;
char* s;
for (s = inStr; *s != '\0'; ++s) {
if (*s == ',') {
++numSPropRecords;
*s = '\0';
}
}
// Allocate and fill in the result array:
SPropRecord* resultArray = new SPropRecord[numSPropRecords];
s = inStr;
for (unsigned i = 0; i < numSPropRecords; ++i) {
resultArray[i].sPropBytes = base64Decode(s, resultArray[i].sPropLength);
s += strlen(s) + 1;
}
delete[] inStr;
return resultArray;
}
////////// H264BufferedPacket and H264BufferedPacketFactory implementation //////////
H264BufferedPacket::H264BufferedPacket(H264VideoRTPSource& ourSource)
: fOurSource(ourSource) {
}
H264BufferedPacket::~H264BufferedPacket() {
}
unsigned H264BufferedPacket
::nextEnclosedFrameSize(unsigned char*& framePtr, unsigned dataSize) {
unsigned resultNALUSize = 0; // if an error occurs
switch (fOurSource.fCurPacketNALUnitType) {
case 24: case 25: { // STAP-A or STAP-B
// The first two bytes are NALU size:
if (dataSize < 2) break;
resultNALUSize = (framePtr[0]<<8)|framePtr[1];
framePtr += 2;
break;
}
case 26: { // MTAP16
// The first two bytes are NALU size. The next three are the DOND and TS offset:
if (dataSize < 5) break;
resultNALUSize = (framePtr[0]<<8)|framePtr[1];
framePtr += 5;
break;
}
case 27: { // MTAP24
// The first two bytes are NALU size. The next four are the DOND and TS offset:
if (dataSize < 6) break;
resultNALUSize = (framePtr[0]<<8)|framePtr[1];
framePtr += 6;
break;
}
default: {
// Common case: We use the entire packet data:
return dataSize;
}
}
return (resultNALUSize <= dataSize) ? resultNALUSize : dataSize;
}
BufferedPacket* H264BufferedPacketFactory
::createNewPacket(MultiFramedRTPSource* ourSource) {
return new H264BufferedPacket((H264VideoRTPSource&)(*ourSource));
}
live/liveMedia/MPEG4VideoStreamDiscreteFramer.cpp 000444 001751 000000 00000022500 12265042432 022140 0 ustar 00rsf wheel 000000 000000 /**********
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the
Free Software Foundation; either version 2.1 of the License, or (at your
option) any later version. (See .)
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
more details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
**********/
// "liveMedia"
// Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved.
// A simplified version of "MPEG4VideoStreamFramer" that takes only complete,
// discrete frames (rather than an arbitrary byte stream) as input.
// This avoids the parsing and data copying overhead of the full
// "MPEG4VideoStreamFramer".
// Implementation
#include "MPEG4VideoStreamDiscreteFramer.hh"
MPEG4VideoStreamDiscreteFramer*
MPEG4VideoStreamDiscreteFramer::createNew(UsageEnvironment& env,
FramedSource* inputSource, Boolean leavePresentationTimesUnmodified) {
// Need to add source type checking here??? #####
return new MPEG4VideoStreamDiscreteFramer(env, inputSource, leavePresentationTimesUnmodified);
}
MPEG4VideoStreamDiscreteFramer
::MPEG4VideoStreamDiscreteFramer(UsageEnvironment& env,
FramedSource* inputSource, Boolean leavePresentationTimesUnmodified)
: MPEG4VideoStreamFramer(env, inputSource, False/*don't create a parser*/),
fLeavePresentationTimesUnmodified(leavePresentationTimesUnmodified), vop_time_increment_resolution(0), fNumVTIRBits(0),
fLastNonBFrameVop_time_increment(0) {
fLastNonBFramePresentationTime.tv_sec = 0;
fLastNonBFramePresentationTime.tv_usec = 0;
}
MPEG4VideoStreamDiscreteFramer::~MPEG4VideoStreamDiscreteFramer() {
}
void MPEG4VideoStreamDiscreteFramer::doGetNextFrame() {
// Arrange to read data (which should be a complete MPEG-4 video frame)
// from our data source, directly into the client's input buffer.
// After reading this, we'll do some parsing on the frame.
fInputSource->getNextFrame(fTo, fMaxSize,
afterGettingFrame, this,
FramedSource::handleClosure, this);
}
void MPEG4VideoStreamDiscreteFramer
::afterGettingFrame(void* clientData, unsigned frameSize,
unsigned numTruncatedBytes,
struct timeval presentationTime,
unsigned durationInMicroseconds) {
MPEG4VideoStreamDiscreteFramer* source = (MPEG4VideoStreamDiscreteFramer*)clientData;
source->afterGettingFrame1(frameSize, numTruncatedBytes,
presentationTime, durationInMicroseconds);
}
void MPEG4VideoStreamDiscreteFramer
::afterGettingFrame1(unsigned frameSize, unsigned numTruncatedBytes,
struct timeval presentationTime,
unsigned durationInMicroseconds) {
// Check that the first 4 bytes are a system code:
if (frameSize >= 4 && fTo[0] == 0 && fTo[1] == 0 && fTo[2] == 1) {
fPictureEndMarker = True; // Assume that we have a complete 'picture' here
unsigned i = 3;
if (fTo[i] == 0xB0) { // VISUAL_OBJECT_SEQUENCE_START_CODE
// The next byte is the "profile_and_level_indication":
if (frameSize >= 5) fProfileAndLevelIndication = fTo[4];
// The start of this frame - up to the first GROUP_VOP_START_CODE
// or VOP_START_CODE - is stream configuration information. Save this:
for (i = 7; i < frameSize; ++i) {
if ((fTo[i] == 0xB3 /*GROUP_VOP_START_CODE*/ ||
fTo[i] == 0xB6 /*VOP_START_CODE*/)
&& fTo[i-1] == 1 && fTo[i-2] == 0 && fTo[i-3] == 0) {
break; // The configuration information ends here
}
}
fNumConfigBytes = i < frameSize ? i-3 : frameSize;
delete[] fConfigBytes; fConfigBytes = new unsigned char[fNumConfigBytes];
for (unsigned j = 0; j < fNumConfigBytes; ++j) fConfigBytes[j] = fTo[j];
// This information (should) also contain a VOL header, which we need
// to analyze, to get "vop_time_increment_resolution" (which we need
// - along with "vop_time_increment" - in order to generate accurate
// presentation times for "B" frames).
analyzeVOLHeader();
}
if (i < frameSize) {
u_int8_t nextCode = fTo[i];
if (nextCode == 0xB3 /*GROUP_VOP_START_CODE*/) {
// Skip to the following VOP_START_CODE (if any):
for (i += 4; i < frameSize; ++i) {
if (fTo[i] == 0xB6 /*VOP_START_CODE*/
&& fTo[i-1] == 1 && fTo[i-2] == 0 && fTo[i-3] == 0) {
nextCode = fTo[i];
break;
}
}
}
if (nextCode == 0xB6 /*VOP_START_CODE*/ && i+5 < frameSize) {
++i;
// Get the "vop_coding_type" from the next byte:
u_int8_t nextByte = fTo[i++];
u_int8_t vop_coding_type = nextByte>>6;
// Next, get the "modulo_time_base" by counting the '1' bits that
// follow. We look at the next 32-bits only.
// This should be enough in most cases.
u_int32_t next4Bytes
= (fTo[i]<<24)|(fTo[i+1]<<16)|(fTo[i+2]<<8)|fTo[i+3];
i += 4;
u_int32_t timeInfo = (nextByte<<(32-6))|(next4Bytes>>6);
unsigned modulo_time_base = 0;
u_int32_t mask = 0x80000000;
while ((timeInfo&mask) != 0) {
++modulo_time_base;
mask >>= 1;
}
mask >>= 2;
// Then, get the "vop_time_increment".
unsigned vop_time_increment = 0;
// First, make sure we have enough bits left for this:
if ((mask>>(fNumVTIRBits-1)) != 0) {
for (unsigned i = 0; i < fNumVTIRBits; ++i) {
vop_time_increment |= timeInfo&mask;
mask >>= 1;
}
while (mask != 0) {
vop_time_increment >>= 1;
mask >>= 1;
}
}
// If this is a "B" frame, then we have to tweak "presentationTime":
if (!fLeavePresentationTimesUnmodified && vop_coding_type == 2/*B*/
&& (fLastNonBFramePresentationTime.tv_usec > 0 ||
fLastNonBFramePresentationTime.tv_sec > 0)) {
int timeIncrement
= fLastNonBFrameVop_time_increment - vop_time_increment;
if (timeIncrement<0) timeIncrement += vop_time_increment_resolution;
unsigned const MILLION = 1000000;
double usIncrement = vop_time_increment_resolution == 0 ? 0.0
: ((double)timeIncrement*MILLION)/vop_time_increment_resolution;
unsigned secondsToSubtract = (unsigned)(usIncrement/MILLION);
unsigned uSecondsToSubtract = ((unsigned)usIncrement)%MILLION;
presentationTime = fLastNonBFramePresentationTime;
if ((unsigned)presentationTime.tv_usec < uSecondsToSubtract) {
presentationTime.tv_usec += MILLION;
if (presentationTime.tv_sec > 0) --presentationTime.tv_sec;
}
presentationTime.tv_usec -= uSecondsToSubtract;
if ((unsigned)presentationTime.tv_sec > secondsToSubtract) {
presentationTime.tv_sec -= secondsToSubtract;
} else {
presentationTime.tv_sec = presentationTime.tv_usec = 0;
}
} else {
fLastNonBFramePresentationTime = presentationTime;
fLastNonBFrameVop_time_increment = vop_time_increment;
}
}
}
}
// Complete delivery to the client:
fFrameSize = frameSize;
fNumTruncatedBytes = numTruncatedBytes;
fPresentationTime = presentationTime;
fDurationInMicroseconds = durationInMicroseconds;
afterGetting(this);
}
Boolean MPEG4VideoStreamDiscreteFramer::getNextFrameBit(u_int8_t& result) {
if (fNumBitsSeenSoFar/8 >= fNumConfigBytes) return False;
u_int8_t nextByte = fConfigBytes[fNumBitsSeenSoFar/8];
result = (nextByte>>(7-fNumBitsSeenSoFar%8))&1;
++fNumBitsSeenSoFar;
return True;
}
Boolean MPEG4VideoStreamDiscreteFramer::getNextFrameBits(unsigned numBits,
u_int32_t& result) {
result = 0;
for (unsigned i = 0; i < numBits; ++i) {
u_int8_t nextBit;
if (!getNextFrameBit(nextBit)) return False;
result = (result<<1)|nextBit;
}
return True;
}
void MPEG4VideoStreamDiscreteFramer::analyzeVOLHeader() {
// Begin by moving to the VOL header:
unsigned i;
for (i = 3; i < fNumConfigBytes; ++i) {
if (fConfigBytes[i] >= 0x20 && fConfigBytes[i] <= 0x2F
&& fConfigBytes[i-1] == 1
&& fConfigBytes[i-2] == 0 && fConfigBytes[i-3] == 0) {
++i;
break;
}
}
fNumBitsSeenSoFar = 8*i + 9;
do {
u_int8_t is_object_layer_identifier;
if (!getNextFrameBit(is_object_layer_identifier)) break;
if (is_object_layer_identifier) fNumBitsSeenSoFar += 7;
u_int32_t aspect_ratio_info;
if (!getNextFrameBits(4, aspect_ratio_info)) break;
if (aspect_ratio_info == 15 /*extended_PAR*/) fNumBitsSeenSoFar += 16;
u_int8_t vol_control_parameters;
if (!getNextFrameBit(vol_control_parameters)) break;
if (vol_control_parameters) {
fNumBitsSeenSoFar += 3; // chroma_format; low_delay
u_int8_t vbw_parameters;
if (!getNextFrameBit(vbw_parameters)) break;
if (vbw_parameters) fNumBitsSeenSoFar += 79;
}
fNumBitsSeenSoFar += 2; // video_object_layer_shape
u_int8_t marker_bit;
if (!getNextFrameBit(marker_bit)) break;
if (marker_bit != 1) break; // sanity check
if (!getNextFrameBits(16, vop_time_increment_resolution)) break;
if (vop_time_increment_resolution == 0) break; // shouldn't happen
// Compute how many bits are necessary to represent this:
fNumVTIRBits = 0;
for (unsigned test = vop_time_increment_resolution; test>0; test /= 2) {
++fNumVTIRBits;
}
} while (0);
}
live/liveMedia/MPEG2TransportStreamFromPESSource.cpp 000444 001751 000000 00000005347 12265042432 022633 0 ustar 00rsf wheel 000000 000000 /**********
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the
Free Software Foundation; either version 2.1 of the License, or (at your
option) any later version. (See .)
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
more details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
**********/
// "liveMedia"
// Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved.
// A filter for converting a stream of MPEG PES packets to a MPEG-2 Transport Stream
// Implementation
#include "MPEG2TransportStreamFromPESSource.hh"
#define MAX_PES_PACKET_SIZE (6+65535)
MPEG2TransportStreamFromPESSource* MPEG2TransportStreamFromPESSource
::createNew(UsageEnvironment& env, MPEG1or2DemuxedElementaryStream* inputSource) {
return new MPEG2TransportStreamFromPESSource(env, inputSource);
}
MPEG2TransportStreamFromPESSource
::MPEG2TransportStreamFromPESSource(UsageEnvironment& env,
MPEG1or2DemuxedElementaryStream* inputSource)
: MPEG2TransportStreamMultiplexor(env),
fInputSource(inputSource) {
fInputBuffer = new unsigned char[MAX_PES_PACKET_SIZE];
}
MPEG2TransportStreamFromPESSource::~MPEG2TransportStreamFromPESSource() {
Medium::close(fInputSource);
delete[] fInputBuffer;
}
void MPEG2TransportStreamFromPESSource::doStopGettingFrames() {
fInputSource->stopGettingFrames();
}
void MPEG2TransportStreamFromPESSource
::awaitNewBuffer(unsigned char* /*oldBuffer*/) {
fInputSource->getNextFrame(fInputBuffer, MAX_PES_PACKET_SIZE,
afterGettingFrame, this,
FramedSource::handleClosure, this);
}
void MPEG2TransportStreamFromPESSource
::afterGettingFrame(void* clientData, unsigned frameSize,
unsigned numTruncatedBytes,
struct timeval presentationTime,
unsigned durationInMicroseconds) {
MPEG2TransportStreamFromPESSource* source
= (MPEG2TransportStreamFromPESSource*)clientData;
source->afterGettingFrame1(frameSize, numTruncatedBytes,
presentationTime, durationInMicroseconds);
}
void MPEG2TransportStreamFromPESSource
::afterGettingFrame1(unsigned frameSize,
unsigned /*numTruncatedBytes*/,
struct timeval /*presentationTime*/,
unsigned /*durationInMicroseconds*/) {
if (frameSize < 4) return;
handleNewBuffer(fInputBuffer, frameSize,
fInputSource->mpegVersion(), fInputSource->lastSeenSCR());
}
live/liveMedia/H263plusVideoRTPSource.cpp 000444 001751 000000 00000007221 12265042432 020430 0 ustar 00rsf wheel 000000 000000 /**********
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the
Free Software Foundation; either version 2.1 of the License, or (at your
option) any later version. (See .)
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
more details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
**********/
// "liveMedia"
// Copyright (c) 1996-2014 Live Networks, Inc. All rights reserved.
// H.263+ Video RTP Sources
// Implementation
#include "H263plusVideoRTPSource.hh"
H263plusVideoRTPSource*
H263plusVideoRTPSource::createNew(UsageEnvironment& env, Groupsock* RTPgs,
unsigned char rtpPayloadFormat,
unsigned rtpTimestampFrequency) {
return new H263plusVideoRTPSource(env, RTPgs, rtpPayloadFormat,
rtpTimestampFrequency);
}
H263plusVideoRTPSource
::H263plusVideoRTPSource(UsageEnvironment& env, Groupsock* RTPgs,
unsigned char rtpPayloadFormat,
unsigned rtpTimestampFrequency)
: MultiFramedRTPSource(env, RTPgs,
rtpPayloadFormat, rtpTimestampFrequency),
fNumSpecialHeaders(0), fSpecialHeaderBytesLength(0) {
}
H263plusVideoRTPSource::~H263plusVideoRTPSource() {
}
Boolean H263plusVideoRTPSource
::processSpecialHeader(BufferedPacket* packet,
unsigned& resultSpecialHeaderSize) {
unsigned char* headerStart = packet->data();
unsigned packetSize = packet->dataSize();
// The H.263+ payload header is at least 2 bytes in size.
// Extract the known fields from the first 2 bytes:
unsigned expectedHeaderSize = 2;
if (packetSize < expectedHeaderSize) return False;
//unsigned char RR = headerStart[0]>>3;
Boolean P = (headerStart[0]&0x4) != 0;
Boolean V = (headerStart[0]&0x2) != 0;
unsigned char PLEN = ((headerStart[0]&0x1)<<5)|(headerStart[1]>>3);
//unsigned char PEBIT = headerStart[1]&0x7;
if (V) {
// There's an extra VRC byte at the end of the header:
++expectedHeaderSize;
if (packetSize < expectedHeaderSize) return False;
}
if (PLEN > 0) {
// There's an extra picture header at the end:
expectedHeaderSize += PLEN;
if (packetSize < expectedHeaderSize) return False;
}
fCurrentPacketBeginsFrame = P;
if (fCurrentPacketBeginsFrame) {
fNumSpecialHeaders = fSpecialHeaderBytesLength = 0;
}
// Make a copy of the special header bytes, in case a reader
// can use them:
unsigned bytesAvailable
= SPECIAL_HEADER_BUFFER_SIZE - fSpecialHeaderBytesLength - 1;
if (expectedHeaderSize <= bytesAvailable) {
fSpecialHeaderBytes[fSpecialHeaderBytesLength++] = expectedHeaderSize;
for (unsigned i = 0; i < expectedHeaderSize; ++i) {
fSpecialHeaderBytes[fSpecialHeaderBytesLength++] = headerStart[i];
}
fPacketSizes[fNumSpecialHeaders++] = packetSize;
}
if (P) {
// Prepend two zero bytes to the start of the payload proper.
// Hack: Do this by shrinking this special header by 2 bytes:
expectedHeaderSize -= 2;
headerStart[expectedHeaderSize] = 0;
headerStart[expectedHeaderSize+1] = 0;
}
// The RTP "M" (marker) bit indicates the last fragment of a frame:
fCurrentPacketCompletesFrame = packet->rtpMarkerBit();
resultSpecialHeaderSize = expectedHeaderSize;
return True;
}
char const* H263plusVideoRTPSource::MIMEtype() const {
return "video/H263-1998";
}
live/liveMedia/DVVideoRTPSource.cpp 000444 001751 000000 00000004267 12265042432 017422 0 ustar 00rsf wheel 000000 000000 /**********
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the
Free Software Foundation; either version 2.1 of the License, or (at your
option) any later version. (See