recutils-1.8/ 0000755 0000000 0000000 00000000000 13413354014 010161 5 0000000 0000000 recutils-1.8/Makefile.am 0000644 0000000 0000000 00000001607 13253164723 012151 0000000 0000000 # Top-level Makefile.am
# Copyright (C) 2009-2015 Jose E. Marchesi
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
ACLOCAL_AMFLAGS = -I m4
SUBDIRS = lib libcsv src utils bash doc po man torture etc
if BUILD_A68
SUBDIRS += algol68
endif
perf:
$(MAKE) -C torture/utils perf
.PHONY: perf
# End of Makefile.am
recutils-1.8/doc/ 0000755 0000000 0000000 00000000000 13413354013 010725 5 0000000 0000000 recutils-1.8/doc/Makefile.am 0000644 0000000 0000000 00000001456 13413345560 012716 0000000 0000000 # doc/Makefile.am
# Copyright (C) 2009-2019 Jose E. Marchesi
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
info_TEXINFOS = recutils.texi rec-mode.texi
recutils_TEXINFOS = parse-datetime.texi fdl.texi
# End of Makefile.am
recutils-1.8/doc/fdl.texi 0000644 0000000 0000000 00000055606 13253164723 012332 0000000 0000000 @c The GNU Free Documentation License.
@center Version 1.3, 3 November 2008
@c This file is intended to be included within another document,
@c hence no sectioning command or @node.
@display
Copyright @copyright{} 2000, 2001, 2002, 2007, 2008 Free Software Foundation, Inc.
@uref{http://fsf.org/}
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
@end display
@enumerate 0
@item
PREAMBLE
The purpose of this License is to make a manual, textbook, or other
functional and useful document @dfn{free} in the sense of freedom: to
assure everyone the effective freedom to copy and redistribute it,
with or without modifying it, either commercially or noncommercially.
Secondarily, this License preserves for the author and publisher a way
to get credit for their work, while not being considered responsible
for modifications made by others.
This License is a kind of ``copyleft'', which means that derivative
works of the document must themselves be free in the same sense. It
complements the GNU General Public License, which is a copyleft
license designed for free software.
We have designed this License in order to use it for manuals for free
software, because free software needs free documentation: a free
program should come with manuals providing the same freedoms that the
software does. But this License is not limited to software manuals;
it can be used for any textual work, regardless of subject matter or
whether it is published as a printed book. We recommend this License
principally for works whose purpose is instruction or reference.
@item
APPLICABILITY AND DEFINITIONS
This License applies to any manual or other work, in any medium, that
contains a notice placed by the copyright holder saying it can be
distributed under the terms of this License. Such a notice grants a
world-wide, royalty-free license, unlimited in duration, to use that
work under the conditions stated herein. The ``Document'', below,
refers to any such manual or work. Any member of the public is a
licensee, and is addressed as ``you''. You accept the license if you
copy, modify or distribute the work in a way requiring permission
under copyright law.
A ``Modified Version'' of the Document means any work containing the
Document or a portion of it, either copied verbatim, or with
modifications and/or translated into another language.
A ``Secondary Section'' is a named appendix or a front-matter section
of the Document that deals exclusively with the relationship of the
publishers or authors of the Document to the Document's overall
subject (or to related matters) and contains nothing that could fall
directly within that overall subject. (Thus, if the Document is in
part a textbook of mathematics, a Secondary Section may not explain
any mathematics.) The relationship could be a matter of historical
connection with the subject or with related matters, or of legal,
commercial, philosophical, ethical or political position regarding
them.
The ``Invariant Sections'' are certain Secondary Sections whose titles
are designated, as being those of Invariant Sections, in the notice
that says that the Document is released under this License. If a
section does not fit the above definition of Secondary then it is not
allowed to be designated as Invariant. The Document may contain zero
Invariant Sections. If the Document does not identify any Invariant
Sections then there are none.
The ``Cover Texts'' are certain short passages of text that are listed,
as Front-Cover Texts or Back-Cover Texts, in the notice that says that
the Document is released under this License. A Front-Cover Text may
be at most 5 words, and a Back-Cover Text may be at most 25 words.
A ``Transparent'' copy of the Document means a machine-readable copy,
represented in a format whose specification is available to the
general public, that is suitable for revising the document
straightforwardly with generic text editors or (for images composed of
pixels) generic paint programs or (for drawings) some widely available
drawing editor, and that is suitable for input to text formatters or
for automatic translation to a variety of formats suitable for input
to text formatters. A copy made in an otherwise Transparent file
format whose markup, or absence of markup, has been arranged to thwart
or discourage subsequent modification by readers is not Transparent.
An image format is not Transparent if used for any substantial amount
of text. A copy that is not ``Transparent'' is called ``Opaque''.
Examples of suitable formats for Transparent copies include plain
ASCII without markup, Texinfo input format, La@TeX{} input
format, SGML or XML using a publicly available
DTD, and standard-conforming simple HTML,
PostScript or PDF designed for human modification. Examples
of transparent image formats include PNG, XCF and
JPG. Opaque formats include proprietary formats that can be
read and edited only by proprietary word processors, SGML or
XML for which the DTD and/or processing tools are
not generally available, and the machine-generated HTML,
PostScript or PDF produced by some word processors for
output purposes only.
The ``Title Page'' means, for a printed book, the title page itself,
plus such following pages as are needed to hold, legibly, the material
this License requires to appear in the title page. For works in
formats which do not have any title page as such, ``Title Page'' means
the text near the most prominent appearance of the work's title,
preceding the beginning of the body of the text.
The ``publisher'' means any person or entity that distributes copies
of the Document to the public.
A section ``Entitled XYZ'' means a named subunit of the Document whose
title either is precisely XYZ or contains XYZ in parentheses following
text that translates XYZ in another language. (Here XYZ stands for a
specific section name mentioned below, such as ``Acknowledgements'',
``Dedications'', ``Endorsements'', or ``History''.) To ``Preserve the Title''
of such a section when you modify the Document means that it remains a
section ``Entitled XYZ'' according to this definition.
The Document may include Warranty Disclaimers next to the notice which
states that this License applies to the Document. These Warranty
Disclaimers are considered to be included by reference in this
License, but only as regards disclaiming warranties: any other
implication that these Warranty Disclaimers may have is void and has
no effect on the meaning of this License.
@item
VERBATIM COPYING
You may copy and distribute the Document in any medium, either
commercially or noncommercially, provided that this License, the
copyright notices, and the license notice saying this License applies
to the Document are reproduced in all copies, and that you add no other
conditions whatsoever to those of this License. You may not use
technical measures to obstruct or control the reading or further
copying of the copies you make or distribute. However, you may accept
compensation in exchange for copies. If you distribute a large enough
number of copies you must also follow the conditions in section 3.
You may also lend copies, under the same conditions stated above, and
you may publicly display copies.
@item
COPYING IN QUANTITY
If you publish printed copies (or copies in media that commonly have
printed covers) of the Document, numbering more than 100, and the
Document's license notice requires Cover Texts, you must enclose the
copies in covers that carry, clearly and legibly, all these Cover
Texts: Front-Cover Texts on the front cover, and Back-Cover Texts on
the back cover. Both covers must also clearly and legibly identify
you as the publisher of these copies. The front cover must present
the full title with all words of the title equally prominent and
visible. You may add other material on the covers in addition.
Copying with changes limited to the covers, as long as they preserve
the title of the Document and satisfy these conditions, can be treated
as verbatim copying in other respects.
If the required texts for either cover are too voluminous to fit
legibly, you should put the first ones listed (as many as fit
reasonably) on the actual cover, and continue the rest onto adjacent
pages.
If you publish or distribute Opaque copies of the Document numbering
more than 100, you must either include a machine-readable Transparent
copy along with each Opaque copy, or state in or with each Opaque copy
a computer-network location from which the general network-using
public has access to download using public-standard network protocols
a complete Transparent copy of the Document, free of added material.
If you use the latter option, you must take reasonably prudent steps,
when you begin distribution of Opaque copies in quantity, to ensure
that this Transparent copy will remain thus accessible at the stated
location until at least one year after the last time you distribute an
Opaque copy (directly or through your agents or retailers) of that
edition to the public.
It is requested, but not required, that you contact the authors of the
Document well before redistributing any large number of copies, to give
them a chance to provide you with an updated version of the Document.
@item
MODIFICATIONS
You may copy and distribute a Modified Version of the Document under
the conditions of sections 2 and 3 above, provided that you release
the Modified Version under precisely this License, with the Modified
Version filling the role of the Document, thus licensing distribution
and modification of the Modified Version to whoever possesses a copy
of it. In addition, you must do these things in the Modified Version:
@enumerate A
@item
Use in the Title Page (and on the covers, if any) a title distinct
from that of the Document, and from those of previous versions
(which should, if there were any, be listed in the History section
of the Document). You may use the same title as a previous version
if the original publisher of that version gives permission.
@item
List on the Title Page, as authors, one or more persons or entities
responsible for authorship of the modifications in the Modified
Version, together with at least five of the principal authors of the
Document (all of its principal authors, if it has fewer than five),
unless they release you from this requirement.
@item
State on the Title page the name of the publisher of the
Modified Version, as the publisher.
@item
Preserve all the copyright notices of the Document.
@item
Add an appropriate copyright notice for your modifications
adjacent to the other copyright notices.
@item
Include, immediately after the copyright notices, a license notice
giving the public permission to use the Modified Version under the
terms of this License, in the form shown in the Addendum below.
@item
Preserve in that license notice the full lists of Invariant Sections
and required Cover Texts given in the Document's license notice.
@item
Include an unaltered copy of this License.
@item
Preserve the section Entitled ``History'', Preserve its Title, and add
to it an item stating at least the title, year, new authors, and
publisher of the Modified Version as given on the Title Page. If
there is no section Entitled ``History'' in the Document, create one
stating the title, year, authors, and publisher of the Document as
given on its Title Page, then add an item describing the Modified
Version as stated in the previous sentence.
@item
Preserve the network location, if any, given in the Document for
public access to a Transparent copy of the Document, and likewise
the network locations given in the Document for previous versions
it was based on. These may be placed in the ``History'' section.
You may omit a network location for a work that was published at
least four years before the Document itself, or if the original
publisher of the version it refers to gives permission.
@item
For any section Entitled ``Acknowledgements'' or ``Dedications'', Preserve
the Title of the section, and preserve in the section all the
substance and tone of each of the contributor acknowledgements and/or
dedications given therein.
@item
Preserve all the Invariant Sections of the Document,
unaltered in their text and in their titles. Section numbers
or the equivalent are not considered part of the section titles.
@item
Delete any section Entitled ``Endorsements''. Such a section
may not be included in the Modified Version.
@item
Do not retitle any existing section to be Entitled ``Endorsements'' or
to conflict in title with any Invariant Section.
@item
Preserve any Warranty Disclaimers.
@end enumerate
If the Modified Version includes new front-matter sections or
appendices that qualify as Secondary Sections and contain no material
copied from the Document, you may at your option designate some or all
of these sections as invariant. To do this, add their titles to the
list of Invariant Sections in the Modified Version's license notice.
These titles must be distinct from any other section titles.
You may add a section Entitled ``Endorsements'', provided it contains
nothing but endorsements of your Modified Version by various
parties---for example, statements of peer review or that the text has
been approved by an organization as the authoritative definition of a
standard.
You may add a passage of up to five words as a Front-Cover Text, and a
passage of up to 25 words as a Back-Cover Text, to the end of the list
of Cover Texts in the Modified Version. Only one passage of
Front-Cover Text and one of Back-Cover Text may be added by (or
through arrangements made by) any one entity. If the Document already
includes a cover text for the same cover, previously added by you or
by arrangement made by the same entity you are acting on behalf of,
you may not add another; but you may replace the old one, on explicit
permission from the previous publisher that added the old one.
The author(s) and publisher(s) of the Document do not by this License
give permission to use their names for publicity for or to assert or
imply endorsement of any Modified Version.
@item
COMBINING DOCUMENTS
You may combine the Document with other documents released under this
License, under the terms defined in section 4 above for modified
versions, provided that you include in the combination all of the
Invariant Sections of all of the original documents, unmodified, and
list them all as Invariant Sections of your combined work in its
license notice, and that you preserve all their Warranty Disclaimers.
The combined work need only contain one copy of this License, and
multiple identical Invariant Sections may be replaced with a single
copy. If there are multiple Invariant Sections with the same name but
different contents, make the title of each such section unique by
adding at the end of it, in parentheses, the name of the original
author or publisher of that section if known, or else a unique number.
Make the same adjustment to the section titles in the list of
Invariant Sections in the license notice of the combined work.
In the combination, you must combine any sections Entitled ``History''
in the various original documents, forming one section Entitled
``History''; likewise combine any sections Entitled ``Acknowledgements'',
and any sections Entitled ``Dedications''. You must delete all
sections Entitled ``Endorsements.''
@item
COLLECTIONS OF DOCUMENTS
You may make a collection consisting of the Document and other documents
released under this License, and replace the individual copies of this
License in the various documents with a single copy that is included in
the collection, provided that you follow the rules of this License for
verbatim copying of each of the documents in all other respects.
You may extract a single document from such a collection, and distribute
it individually under this License, provided you insert a copy of this
License into the extracted document, and follow this License in all
other respects regarding verbatim copying of that document.
@item
AGGREGATION WITH INDEPENDENT WORKS
A compilation of the Document or its derivatives with other separate
and independent documents or works, in or on a volume of a storage or
distribution medium, is called an ``aggregate'' if the copyright
resulting from the compilation is not used to limit the legal rights
of the compilation's users beyond what the individual works permit.
When the Document is included in an aggregate, this License does not
apply to the other works in the aggregate which are not themselves
derivative works of the Document.
If the Cover Text requirement of section 3 is applicable to these
copies of the Document, then if the Document is less than one half of
the entire aggregate, the Document's Cover Texts may be placed on
covers that bracket the Document within the aggregate, or the
electronic equivalent of covers if the Document is in electronic form.
Otherwise they must appear on printed covers that bracket the whole
aggregate.
@item
TRANSLATION
Translation is considered a kind of modification, so you may
distribute translations of the Document under the terms of section 4.
Replacing Invariant Sections with translations requires special
permission from their copyright holders, but you may include
translations of some or all Invariant Sections in addition to the
original versions of these Invariant Sections. You may include a
translation of this License, and all the license notices in the
Document, and any Warranty Disclaimers, provided that you also include
the original English version of this License and the original versions
of those notices and disclaimers. In case of a disagreement between
the translation and the original version of this License or a notice
or disclaimer, the original version will prevail.
If a section in the Document is Entitled ``Acknowledgements'',
``Dedications'', or ``History'', the requirement (section 4) to Preserve
its Title (section 1) will typically require changing the actual
title.
@item
TERMINATION
You may not copy, modify, sublicense, or distribute the Document
except as expressly provided under this License. Any attempt
otherwise to copy, modify, sublicense, or distribute it is void, and
will automatically terminate your rights under this License.
However, if you cease all violation of this License, then your license
from a particular copyright holder is reinstated (a) provisionally,
unless and until the copyright holder explicitly and finally
terminates your license, and (b) permanently, if the copyright holder
fails to notify you of the violation by some reasonable means prior to
60 days after the cessation.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.
Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, receipt of a copy of some or all of the same material does
not give you any rights to use it.
@item
FUTURE REVISIONS OF THIS LICENSE
The Free Software Foundation may publish new, revised versions
of the GNU Free Documentation License from time to time. Such new
versions will be similar in spirit to the present version, but may
differ in detail to address new problems or concerns. See
@uref{http://www.gnu.org/copyleft/}.
Each version of the License is given a distinguishing version number.
If the Document specifies that a particular numbered version of this
License ``or any later version'' applies to it, you have the option of
following the terms and conditions either of that specified version or
of any later version that has been published (not as a draft) by the
Free Software Foundation. If the Document does not specify a version
number of this License, you may choose any version ever published (not
as a draft) by the Free Software Foundation. If the Document
specifies that a proxy can decide which future versions of this
License can be used, that proxy's public statement of acceptance of a
version permanently authorizes you to choose that version for the
Document.
@item
RELICENSING
``Massive Multiauthor Collaboration Site'' (or ``MMC Site'') means any
World Wide Web server that publishes copyrightable works and also
provides prominent facilities for anybody to edit those works. A
public wiki that anybody can edit is an example of such a server. A
``Massive Multiauthor Collaboration'' (or ``MMC'') contained in the
site means any set of copyrightable works thus published on the MMC
site.
``CC-BY-SA'' means the Creative Commons Attribution-Share Alike 3.0
license published by Creative Commons Corporation, a not-for-profit
corporation with a principal place of business in San Francisco,
California, as well as future copyleft versions of that license
published by that same organization.
``Incorporate'' means to publish or republish a Document, in whole or
in part, as part of another Document.
An MMC is ``eligible for relicensing'' if it is licensed under this
License, and if all works that were first published under this License
somewhere other than this MMC, and subsequently incorporated in whole
or in part into the MMC, (1) had no cover texts or invariant sections,
and (2) were thus incorporated prior to November 1, 2008.
The operator of an MMC Site may republish an MMC contained in the site
under CC-BY-SA on the same site at any time before August 1, 2009,
provided the MMC is eligible for relicensing.
@end enumerate
@page
@heading ADDENDUM: How to use this License for your documents
To use this License in a document you have written, include a copy of
the License in the document and put the following copyright and
license notices just after the title page:
@smallexample
@group
Copyright (C) @var{year} @var{your name}.
Permission is granted to copy, distribute and/or modify this document
under the terms of the GNU Free Documentation License, Version 1.3
or any later version published by the Free Software Foundation;
with no Invariant Sections, no Front-Cover Texts, and no Back-Cover
Texts. A copy of the license is included in the section entitled ``GNU
Free Documentation License''.
@end group
@end smallexample
If you have Invariant Sections, Front-Cover Texts and Back-Cover Texts,
replace the ``with@dots{}Texts.'' line with this:
@smallexample
@group
with the Invariant Sections being @var{list their titles}, with
the Front-Cover Texts being @var{list}, and with the Back-Cover Texts
being @var{list}.
@end group
@end smallexample
If you have Invariant Sections without Cover Texts, or some other
combination of the three, merge those two alternatives to suit the
situation.
If your document contains nontrivial examples of program code, we
recommend releasing these examples in parallel under your choice of
free software license, such as the GNU General Public License,
to permit their use in free software.
@c Local Variables:
@c ispell-local-pdict: "ispell-dict"
@c End:
recutils-1.8/doc/rec-mode.texi 0000644 0000000 0000000 00000037611 13413345577 013262 0000000 0000000 \input texinfo
@comment %**start of header
@setfilename rec-mode.info
@include version-rec-mode.texi
@settitle rec-mode for Emacs
@afourpaper
@comment %**end of header
@c Macros used in this file.
@macro ie
@i{i.e.@:}
@end macro
@macro reccmd{key,command}
@item @kbd{\key\} @tie{}@tie{}@tie{}@tie{}(@code{\command\})
@end macro
@copying
This manual is for rec-mode for Emacs, which is part of the GNU
recutils suite (version @value{VERSION}, @value{UPDATED}).
Copyright @copyright{} 2012-2019 Jose E. Marchesi
@quotation
Permission is granted to copy, distribute and/or modify this document
under the terms of the GNU Free Documentation License, Version 1.3 or
any later version published by the Free Software Foundation; with no
Invariant Sections, no Front-Cover Texts, and no Back-Cover Texts. A
copy of the license is included in the section entitled ``GNU Free
Documentation License''.
@end quotation
@end copying
@dircategory Database
@direntry
* rec-mode: (rec-mode). Emacs mode for editing recfiles.
@end direntry
@titlepage
@title rec-mode: an Emacs mode for editing recfiles
@subtitle for version @value{VERSION}, @value{UPDATED}
@author by Jose E. Marchesi
@page
@vskip 0pt plus 1filll
@insertcopying
@end titlepage
@contents
@ifnottex
@node Top
@top rec-mode: an Emacs mode for editing recfiles
This manual documents version @value{VERSION} of rec-mode.
@insertcopying
@end ifnottex
@menu
* Introduction:: Getting started
* Navigation mode:: User-friendly interface for browing recfiles
* Edition modes:: Edit recfiles in rec-format
* Configuration:: Adapting rec-mode to your needs
* GNU Free Documentation License:: Distribution terms for this document
@detailmenu
--- The Detailed Node Listing ---
Introduction
* Installation:: How to install rec-mode
* Activation:: How to activate rec-mode for certain buffers
* Feedback:: Bug reports, ideas, patches etc.
Navigation mode
* Record navigation:: Moving through records.
* Field navigation:: Moving through fields in a record.
* Field folding:: Hiding and showing the values of fields.
* Field edition:: Changing the values of fields.
* Searches:: Finding records fufilling some criteria.
* Statistics:: Counting records.
* Data integrity:: Verifying the integrity of the recfile.
Edition modes
* Edition modes:: Edit recfiles in rec-format
Configuration
* Finding the recutils:: Specifying the location of the recutils.
* Records appearance:: Setting the way records are displayed.
@end detailmenu
@end menu
@node Introduction
@chapter Introduction
rec-mode is a mode for browsing and editing recfiles, which are text
files containing data structured in fields and records. It is part of
the GNU recutils@footnote{@url{http://www.gnu.org/software/recutils}}
suite.
Recfiles are text-based databases which are easy to read and write
manually using a text editor. At the same time they feature enough
structure so they can be read, edited and processed automatically by
programs.
@menu
* Installation:: How to install rec-mode
* Activation:: How to activate rec-mode for certain buffers
* Feedback:: Bug reports, ideas, patches etc.
@end menu
@node Installation
@section Installation
rec-mode is implemented in a self-contained elisp file called
@file{rec-mode.el}. It can be obtained in several ways:
@itemize @minus
@item As part of a released tarball of recutils. @file{rec-mode.el}
can be found in the @file{etc/} directory in the tarball contents.
@item As part of the source tree cloned from the development git
repo. @file{rec-mode.el} can be found in the @file{etc/} directory in
the recutils sources tree.
@item As a single file downloaded form some other location in
internet.
@item It may be already installed as part of a binary package in some
distribution.
@end itemize
@noindent In the first three cases you need to tell Emacs where to locate the
@file{rec-mode.el} file and to load it. Add the following to your
@file{.emacs} file.
@lisp
(add-to-list 'load-path "~/path/to/recmode/")
(require 'rec-mode)
@end lisp
@noindent If @file{rec-mode.el} was installed as part of a binary
package in a distribution then you usually don't have to touch the
@code{load-path} variable. Depending on the specific case you may
have to @code{require} the package.
@node Activation
@section Activation
@cindex activation
To make sure files with extension @file{.rec} use rec-mode, add the
following line to your @file{.emacs} file.
@lisp
(add-to-list 'auto-mode-alist '("\\.rec\\'" . rec-mode))
@end lisp
@noindent rec-mode buffers need font-lock to be turned on - this is
the default in Emacs@footnote{If you don't use font-lock globally,
turn it on in the rec buffer with @code{(add-hook 'rec-mode-hook
'turn-on-font-lock)}}.
With this setup, all files with extension @samp{.rec} will be put into
rec mode. As an alternative, make the first line of a recfile look
like this:
@example
# -*- mode: rec -*-
@end example
@noindent which will select rec-mode for this buffer no matter what
the file's name is.
@node Feedback
@section Feedback
@cindex feedback
@cindex bug reports
@cindex maintainer
@cindex author
If you find problems with rec-mode, or if you have questions, remarks,
or ideas about it, please mail to the recutils mailing list
@email{bug-recutils@@gnu.org}. If you are not a member of the mailing
list, your mail will be passed to the list after a moderator has
approved it@footnote{Please consider subscribing to the mailing list,
in order to minimize the work the mailing list moderators have to do.
The subscription can be done online at
@url{http://lists.gnu.org/mailman/listinfo/bug-recutils}.}.
@node Navigation mode
@chapter Navigation mode
When a recfile is visited in Emacs and rec-mode is activated, the
contents of the file are examined and parsed in order to determine if
it is a valid recfile and, in that case, to extract information like
the kind of records stored in the file.
If the file does not contain valid rec data then the buffer is put in
@code{fundamental-mode} and a description of the syntax error, along
its location, is notified in the echo area.
If the file contains valid rec data, the mode sets itself in what is
known as ``navigation mode''. In this mode the buffer is made
read-only and it is narrowed to the first record present in the file.
Also, the presentation of the record contents is slightly changed in
order to improve the visualization of the data: continuation line
marks are replaced by indentation, big fields are folded, etc. The
modeline is changed in order to reflect the type of the records being
navigated.
At this point the user can navigate through the records and fields
contained in the file, and edit the contents of the fields and the
structure of the records, by using the commands described in the
following subsections.
@menu
* Record navigation:: Moving through records
* Field navigation:: Moving through fields in a record
* Field folding:: Hiding and showing the values of fields
* Field edition:: Changing the values of fields
* Searches:: Finding records fulfilling some criteria
* Statistics:: Counting records
* Data integrity:: Verifying the integrity of the recfile
@end menu
@node Record navigation
@section Record navigation
@cindex motion, between records
@cindex jumping, to records
@cindex record navigation
The following commands jump to other records in the buffer.
@table @asis
@reccmd{n,rec-cmd-goto-next-rec}
Display the next record of the same type in the buffer.
@*`C-u N n' will move next N times.
@reccmd{p,rec-cmd-goto-previous-rec}
Display the previous record of the same type in the buffer.
@*`C-u N p' will move backwards N times.
@reccmd{d, rec-cmd-show-descriptor}
Display the record descriptor applicable to the current record. If
the current record is anonymous, @ie{} there is not record
descriptor. then this command does nothing.
@reccmd{b, rec-cmd-jump-back}
Display the record previously displayed in the buffer.
@reccmd{C-c t, rec-find-type}
Prompt the user for one of the record types present in the recfile and
display the first record of the selected type.
@end table
@node Field navigation
@section Field navigation
The following commands iterate through the fields in a record, and to
get information about some of the properties of the fields.
@table @asis
@reccmd{TAB, rec-cmd-goto-next-field}
Move the cursor to the beginning of the name of the next field in the
current record. If the cursor is currently located at the last field
of the record then move it to the beginning of the first field.
@reccmd{t, rec-cmd-show-type}
Show information about the type of the field under the cursor, if it
is defined.
@end table
@node Field folding
@section Field folding
Fields in recfiles can contain data of any size, and sometimes it is
difficult to have an overview of the contents of the record. The
following commands fold and unfold the value of the field under the
cursor.
@table @asis
@reccmd{SPC, rec-cmd-toggle-field-visibility}
Toggle the visibility of the field under the cursor. When a field is
folded then three dots are displayed in the buffer instead of the
value of the field.
It is possible to automatically fold any field whose value exceeds a
certain limit which can be configured by the user. @xref{Records
appearance}.
@end table
@node Field edition
@section Field edition
The following commands change the value of the field under the cursor.
@table @asis
@reccmd{e, rec-cmd-edit-field}
Edit the value of the field under the cursor. The specific action
depends on the type of the field in the corresponding record
descriptor:
@itemize @minus
@item For @b{date} fields a calendar buffer is opened in another window
and the focus is moved there. The user can then select a date by
moving the cursor there and press @kbd{RET} in order to set that date
as the value for the field. Alternatively the user can press @kbd{t}
in order to set the field to ``now'', or @kbd{q} to cancel the
operation. In the later case the value of the field is left
untouched.
@item For @b{enumerated} and @b{bool} fields a fast-select buffer is opened in
another window, showing a list of labeled options. The labels are
single digits and letters. The user can then select ony of the
options by pressing the corresponding label, or cancel the operation
by pressing @kbd{RET}. In the later case the value of the field is
left untouched.
@item For any other kind of fields an edition buffer is opened in
another window, showing the current contents of the field. The user
can then edit the buffer as desired. When she is done, the user can
then press @kbd{C-c C-c} in order to set the new value of the field,
or just kill the buffer to cancel the operation.
@end itemize
@reccmd{m, rec-cmd-trim-field-value}
Trim the value of the field under the cursor, removing any sequence of
leading and trailing blank characters.
@end table
@node Searches
@section Searches
The following commands jump to the first record in the buffer
satisfying some criteria.
@table @asis
@reccmd{s q, rec-cmd-select-fast}
Display the first record having a field whose value matches a given
fixed pattern. This is equivalent of using the command line option
@option{-q} of @command{recsel}. If a prefix argument is specified
then the search is case-insensitive.
@reccmd{s s, rec-cmd-select-sex}
Display the first record in the buffer satisfying a given selection
expression. This is equivalent of using the command line option
@option{-e} of @command{recsel}. If a prefix argument is specified
then the search is case-insensitive.
@end table
@node Statistics
@section Statistics
The following commands allow to count records in the current buffer
based on some provided criteria.
@table @asis
@reccmd{I, rec-cmd-show-info}
Show the number of records in the buffer categorized by type.
@reccmd{#, rec-cmd-count}
Count the number of records in the buffer having the same type as the
current record. With a numeric prefix N, ask for a selection
expression and count the number of records in the buffer satisfying
the expression.
Note that rec-mode tries to guess a reasonable default for the
selection expression, depending on the type of the field and its
value. If the user press @kbd{RET} then the provided default
selection expression is used.
@reccmd{%, rec-cmd-statistics}
If the field under the cursor contains an enumerated value, show the
percentages of records in the current record set having fields with
each of the possible values of the enumerated type.
@end table
@node Data integrity
@section Data integrity
The integrity of the rec data stored in the file can be checked using
the following commands.
@table @asis
@reccmd{c, rec-cmd-compile}
Compile the buffer with @command{recfix} and open a compilation window
showing the result of the command. In case some error or warning is
reported, the user can jump to the location triggering the error by
pressing @kbd{RET} in the compilation window.
@end table
@node Edition modes
@chapter Edition modes
The navigation mode described in a previous chapter is mainly intended
for browsing recdata and doing changes at the record level: editing
the contents of a field, adding or removing fields, etc. In order to
perform broader changes, such as adding/deleting record descriptors,
records or comment blocks, the user must enter into one of the
``edition modes''.
There are three edition modes, covering different areas of the
recfile: record, record type and buffer. When an edition mode is
entered the buffer is set in read/write mode, it is narrowed to the
desired area and any embellishment used in navigation mode is
removed@footnote{Exceptuating font-lock}. As a general rule, the
commands available in navigation mode are also available in the
edition mode prefixed with @kbd{C-c}. Thus, @kbd{C-c n} would make
the cursor to jump to the beginning of the next record.
The following commands are used to enter into one of the available
edition modes from the navigation mode.
@table @asis
@reccmd{R, rec-edit-record}
Edit the record being navigated.
@reccmd{T, rec-edit-type}
Edit the record set being navigated.
@reccmd{B, rec-edit-buffer}
Edit the buffer.
@end table
@noindent After doing modifications in the buffer, the user can go
back to navigation mode by using the following command.
@table @asis
@reccmd{C-c C-c, rec-finish-editing}
Finish the current edition and return to navigation mode. If a
syntactic error was introduced in the edition activity then the error
is reported in the echo area and navigation mode is not entered.
@end table
@node Configuration
@chapter Configuration
TBC
@menu
* Finding the recutils:: Specifying the location of the recutils.
* Records appearance:: Setting the way records are displayed
@end menu
@node Finding the recutils
@section Finding the recutils
@code{rec-mode} makes use of the several utilities which are part of
the recutils. The following variables tell the mode where to find the
utilities. The default values of these variables must work if the
recutils are installed system-wide in the system.
@table @code
@item rec-recsel
Name of the @command{recsel} utility from the GNU recutils.
@item rec-recinf
Name of the @command{recinf} utility from the GNU recutils.
@item rec-recfix
Name of the @command{recfix} utility from the GNU recutils.
@end table
@node Records appearance
@section Records appearance
The appearance of the records in navigation mode can be customised by
tweaking the value of the following variables.
@table @code
@item rec-max-lines-in-fields
Values in fiels having more than the specified number of lines will be
hidden by default in navigation mode. When hidden, an ellipsis is
shown instead of the value of the field. Default is @code{15}.
@end table
@node GNU Free Documentation License
@appendix GNU Free Documentation License
@include fdl.texi
@bye
recutils-1.8/doc/stamp-1 0000644 0000000 0000000 00000000136 13413353273 012061 0000000 0000000 @set UPDATED 3 January 2019
@set UPDATED-MONTH January 2019
@set EDITION 1.8
@set VERSION 1.8
recutils-1.8/doc/recutils.info 0000644 0000000 0000000 00000707537 13413353275 013410 0000000 0000000 This is recutils.info, produced by makeinfo version 6.3 from
recutils.texi.
This manual is for GNU recutils (version 1.8, 3 January 2019).
Copyright (C) 2009-2019 Jose E. Marchesi
Copyright (C) 1994-2014 Free Software Foundation, Inc.
Permission is granted to copy, distribute and/or modify this
document under the terms of the GNU Free Documentation License,
Version 1.3 or any later version published by the Free Software
Foundation; with no Invariant Sections, no Front-Cover Texts, and
no Back-Cover Texts. A copy of the license is included in the
section entitled "GNU Free Documentation License".
INFO-DIR-SECTION Database
START-INFO-DIR-ENTRY
* recutils: (recutils). The GNU Recutils manual.
END-INFO-DIR-ENTRY
INFO-DIR-SECTION Individual utilities
START-INFO-DIR-ENTRY
* recinf: (recutils)Invoking recinf. Get info about recfiles.
* recsel: (recutils)Invoking recsel. Read records.
* recins: (recutils)Invoking recins. Insert records.
* recdel: (recutils)Invoking recdel. Delete records.
* recset: (recutils)Invoking recset. Manage fields.
* recfix: (recutils)Invoking recfix. Fix recfiles.
* csv2rec: (recutils)Invoking csv2rec. CSV to recfiles.
* rec2csv: (recutils)Invoking rec2csv. Recfiles to CSV.
* mdb2rec: (recutils)Invoking mdb2rec. MDB to recfiles.
END-INFO-DIR-ENTRY
File: recutils.info, Node: Top, Next: Introduction, Up: (dir)
GNU Recutils
************
This manual documents version 1.8 of the GNU recutils.
This manual is for GNU recutils (version 1.8, 3 January 2019).
Copyright (C) 2009-2019 Jose E. Marchesi
Copyright (C) 1994-2014 Free Software Foundation, Inc.
Permission is granted to copy, distribute and/or modify this
document under the terms of the GNU Free Documentation License,
Version 1.3 or any later version published by the Free Software
Foundation; with no Invariant Sections, no Front-Cover Texts, and
no Back-Cover Texts. A copy of the license is included in the
section entitled "GNU Free Documentation License".
* Menu:
The Basics
* Introduction:: Introducing recutils.
* The Rec Format:: Writing recfiles.
Using the Recutils
* Querying Recfiles:: Extracting data from recfiles.
* Editing Records:: Inserting and deleting records.
* Editing Fields:: Inserting, modifying and deleting fields.
Data Integrity
* Field Types:: Restrictions on the values of fields.
* Constraints on Record Sets:: Requiring or forbidding specific fields.
* Checking Recfiles:: Making sure the data is ok.
Advanced Topics
* Remote Descriptors:: Implementing distributed databases.
* Grouping and Aggregates:: Statistics.
* Queries which Join Records:: Crossing record of different types.
* Auto-Generated Fields:: Counters and time-stamps.
* Encryption:: Storing sensitive information.
* Generating Reports:: Formatted output with templates.
* Interoperability:: Importing and exporting to other formats.
* Bash Builtins:: Boosting the recutils in the shell.
Reference Material
* Invoking the Utilities:: Exhaustive list of command line arguments.
* Regular Expressions:: Flavor of regexps supported in recutils.
* Date input formats:: Specifying dates and times.
* GNU Free Documentation License:: Distribution terms for this document.
Indexes
* Concept Index::
-- The Detailed Node Listing --
----------------------
Here are some other nodes which are really subnodes of the ones
already listed, mentioned here so you can get to them in one step:
Introduction
* Purpose:: Why recutils.
* A Little Example:: Recutils in action.
The Rec Format
* Fields:: The key-value pairs which comprise the data.
* Records:: The main entities of a recfile.
* Comments:: Information for humans' benefit only.
* Record Descriptors:: Describing different types of records.
Querying Recfiles
* Simple Selections:: Introducing 'recsel'.
* Selecting by Type:: Get the records of some given type.
* Selecting by Position:: Get the record occupying some position.
* Random Records:: Get a set of random records.
* Selection Expressions:: Get the records satisfying some expression.
* Field Expressions:: Selecting a subset of fields.
* Sorted Output:: Get the records in a given order.
Editing Records
* Inserting Records:: Inserting data into recfiles.
* Deleting Records:: Removing entries.
* Sorting Records:: Physical reordering of records.
Editing Fields
* Setting Fields:: Editing field values.
* Adding Fields:: Adding new fields to records.
* Deleting Fields:: Removing or commenting-out fields.
Field Types
* Declaring Types:: Declaration of types in record descriptors.
* Types and Fields:: Associating fields with types.
* Scalar Field Types:: Numbers and ranges.
* String Field Types:: Lines, limited strings and regular expressions.
* Enumerated Field Types:: Enumerations and boolean values.
* Date and Time Types:: Dates and times.
* Other Field Types:: Emails, fields, UUIDs, ...
Constraints on Record Sets
* Mandatory Fields:: Requiring the presence of fields.
* Prohibited Fields:: Forbidding the presence of fields.
* Allowed Fields:: Restricting the presence of fields.
* Keys and Unique Fields:: Fields characterizing records.
* Size Constraints:: Limiting the size of a record set.
* Arbitrary Constraints:: Constraints records must comply with.
Checking Recfiles
* Syntactical Errors:: Fixing structure errors in recfiles.
* Semantic Errors:: Fixing semantic errors in recfiles.
Remote Descriptors
Grouping and Aggregates
* Grouping Records:: Combining records by fields.
* Aggregate Functions:: Statistics and more.
Joins
* Foreign Keys:: Referring records from another records.
* Joining Records:: Performing cross-joins.
Auto-Generated Fields
* Counters:: Generating incremental Ids.
* Unique Identifiers:: Generating universally unique Ids.
* Time-Stamps:: Tracking the creation of records.
Encryption
* Confidential Fields:: Declaring fields as sensitive data.
* Encrypting Files:: Encrypt confidential fields.
* Decrypting Data:: Reading encrypted fields.
Generating Reports
* Templates:: Formatted output.
Interoperability
* CSV Files:: Converting recfiles to/from csv files.
* Importing MDB Files:: Importing MS Access Databases.
Bash Builtins
* readrec:: Exporting the contents of records to the shell.
Invoking the Utilities
* Invoking recinf:: Printing information about rec files.
* Invoking recsel:: Selecting records.
* Invoking recins:: Inserting records.
* Invoking recdel:: Deleting records.
* Invoking recset:: Managing fields.
* Invoking recfix:: Fixing broken rec files, and diagnostics.
* Invoking recfmt:: Formatting records using templates.
* Invoking csv2rec:: Converting csv data into rec data.
* Invoking rec2csv:: Converting rec data into csv data.
* Invoking mdb2rec:: Converting mdb files into rec files.
File: recutils.info, Node: Introduction, Next: The Rec Format, Prev: Top, Up: Top
1 Introduction
**************
* Menu:
* Purpose:: Why recutils.
* A Little Example:: Recutils in action.
File: recutils.info, Node: Purpose, Next: A Little Example, Up: Introduction
1.1 Purpose
===========
GNU recutils is a set of tools and libraries to access human-editable,
text-based databases called _recfiles_. The data is stored as a
sequence of records, each record containing an arbitrary number of named
fields. Advanced capabilities usually found in other data storage
systems are supported: data types, data integrity (keys, mandatory
fields, etc.) as well as the ability of records to refer to other
records (sort of foreign keys). Despite its simplicity, recfiles can be
used to store medium-sized databases.
So, yet another data storage system? The mere existence of this
package deserves an explanation. There is a rich set of already
available free data storage systems, covering a broad range of
requirements. Big systems having complex data storage requirements will
probably make use of some full-fledged relational system such as MySQL
or PostgreSQL. Less demanding applications, or applications with
special deployment requirements, may find it more convenient to use a
simpler system such as SQLite, where the data is stored in a single
binary file. XML files are often used to store configuration settings
for programs, and to encode data for transmission through networks.
So it looks like all the needs are covered by the existing solutions
... but consider the following characteristics of the data storage
systems mentioned in the previous paragraph:
- The stored data is not directly human readable.
- The stored data is definitely not directly writable by humans.
- They are program dependent.
- They are not easily managed by version control systems.
Regarding the first point (human readability), while it is clearly
true for the binary files, some may argue XML files are indeed human
readable... well... 'try to r&iamp;ead
this
'. YAML (1) is an example of a hierarchical data
storage format which is much more readable than XML. The problem with
YAML is that it was designed as a "data serialization language" and thus
to map the data constructs usually found in programming languages. That
makes it too complex for the simple task of storing plain lists of
items.
Recfiles are human-readable, human-writable and still easy to parse
and to manipulate automatically. Obviously they are not suitable for
any task (for example, it can be difficult to manage hierarchies in
recfiles) and performance is somewhat sacrificed in favor of
readability. But they are quite handy to store small to medium simple
databases.
The GNU recutils suite comprises:
- This Texinfo manual, describing the Rec format and the accompanying
software.
- A C library (librec) that provides a rich set of functions to
manipulate rec data.
- A set utilities that can be used in shell scripts and in the
command line to operate on rec files.
- An emacs mode, 'rec-mode'.
---------- Footnotes ----------
(1) Yet Another Markup Language
File: recutils.info, Node: A Little Example, Prev: Purpose, Up: Introduction
1.2 A Little Example
====================
Everyone loves to grow a nice book collection at home. Unfortunately,
in most cases the management of our private books gets uncontrolled:
some books get lost, some of them may be loaned to some friend, there
are some duplicated (or even triplicated!) titles because we forgot
about the existence of the previous copy, and many more details.
In order to improve the management of our little book collection we
could make use of a complex data storage system such as a relational
database. The problem with that approach, as explained in the previous
section, is that the tool is too complicated for the simple task: we do
not need the full power of a relational database system to maintain a
simple collection of books.
With GNU recutils it is possible to maintain such a little database
in a text file. Let's call it 'books.rec'. The following table resumes
the information items that we want to store for each title, along with
some common-sense restrictions.
- Every book has a title, even if it is "No Title".
- A book can have several titles.
- A book can have more than one author.
- For some books the author is not known.
- Sometimes we don't care about who the author of a book is.
- We usually store our books at home.
- Sometimes we loan books to friends.
- On occasions we lose track of the physical location of a book. Did
we loan it to anyone? Was it lost in the last move? Is it in some
hidden place at home?
The contents of the rec file follows:
# -*- mode: rec -*-
%rec: Book
%mandatory: Title
%type: Location enum loaned home unknown
%doc:
+ A book in my personal collection.
Title: GNU Emacs Manual
Author: Richard M. Stallman
Publisher: FSF
Location: home
Title: The Colour of Magic
Author: Terry Pratchett
Location: loaned
Title: Mio Cid
Author: Anonymous
Location: home
Title: chapters.gnu.org administration guide
Author: Nacho Gonzalez
Author: Jose E. Marchesi
Location: unknown
Title: Yeelong User Manual
Location: home
# End of books.rec
Simple. The file contains a set of records separated by blank lines.
Each record comprises a set of fields with a name and a value.
The GNU recutils can then be used to access the contents of the file.
For example, we could get a list of the names of loaned books by
invoking 'recsel' in the following way:
$ recsel -e "Location = 'loaned'" -P Title books.rec
The Colour of Magic
File: recutils.info, Node: The Rec Format, Next: Querying Recfiles, Prev: Introduction, Up: Top
2 The Rec Format
****************
A recfile is nothing but a text file which conforms to a few simple
rules. This chapter shows you how, by observing these rules, recfiles
of arbitrary complexity can be written.
* Menu:
* Fields:: The key-value pairs which comprise the data.
* Records:: The main entities of a recfile.
* Comments:: Information for humans' benefit only.
* Record Descriptors:: Describing different types of records.
File: recutils.info, Node: Fields, Next: Records, Up: The Rec Format
2.1 Fields
==========
A "field" is the written form of an association between a label and a
value. For example, if we wanted to associate the label 'Name' with the
value 'Ada Lovelace' we would write:
Name: Ada Lovelace
The separator between the field name and the field value is a colon
followed by a blank character (space and tabs, but not newlines). The
name of the field shall begin in the first column of the line.
A "field name" is a sequence of alphanumeric characters plus
underscores ('_'), starting with a letter or the character '%'. The
regular expression denoting a field name is:
[a-zA-Z%][a-zA-Z0-9_]*
Field names are case-sensitive. 'Foo' and 'foo' are different field
names.
The following list contains valid field names (the final colon is not
part of the names):
Foo:
foo:
A23:
ab1:
A_Field:
The "value of a field" is a sequence of characters terminated by a
single newline character ('\n').
Sometimes a value is too long to fit in the usual width of terminals
and screens. In that case, depending on the specific tool used to
access the file, the readability of the data would not be that good. It
is therefore possible to physically split a logical line by escaping a
newline with a backslash character, as in:
LongLine: This is a quite long value \
comprising a single unique logical line \
split in several physical lines.
The sequence '\n' (newline) '+' (PLUS) and an optional '_' (SPACE) is
interpreted as a newline when found in a field value. For example, the
C string '"bar1\nbar2\n bar3"' would be encoded in the following way in
a field value:
Foo: bar1
+ bar2
+ bar3
File: recutils.info, Node: Records, Next: Comments, Prev: Fields, Up: The Rec Format
2.2 Records
===========
A "record" is a group of one or more fields written one after the other:
Name1: Value1
Name2: Value2
Name2: Value3
It is possible for several fields in a record to share the same name
or/and the field value. The following is a valid record containing
three fields:
Name: John Smith
Email: john.smith@foomail.com
Email: john@smith.name
The "size of a record" is defined as the number of fields that it
contains. A record cannot be empty, so the minimum size for a record is
1. The maximum number of fields for a record is only limited by the
available physical resources. The size of the previous record is 3.
Records are separated by one or more blank lines. For instance, the
following example shows a file named 'personalities.rec' featuring three
records:
Name: Ada Lovelace
Age: 36
Name: Peter the Great
Age: 53
Name: Matusalem
Age: 969
File: recutils.info, Node: Comments, Next: Record Descriptors, Prev: Records, Up: The Rec Format
2.3 Comments
============
Any line having an '#' (ASCII 0x23) character in the first column is a
comment line.
Comments may be used to insert information that is not part of the
database but useful in other ways. They are completely ignored by
processing tools and can only be seen by looking at the recfile itself.
It is also quite convenient to comment-out information from the
recfile without having to remove it in a definitive way: you may want to
recover the data into the database later! Comment lines can be used to
comment-out both full registers and single fields:
Name: Jose E. Marchesi
# Occupation: Software Engineer
# Severe lack of brain capacity
# Fired on 02/01/2009 (without compensation)
Occupation: Unoccupied
Comments are also useful for headers, footers, comment blocks and all
kind of markers:
# -*- mode: rec -*-
#
# TODO
#
# This file contains the Bugs database of GNU recutils.
#
# Blah blah...
...
# End of TODO
Unlike some file formats, comments in recfiles must be complete
lines. You cannot start a comment in the middle of a line. For
example, in the following record, the '#' does _not_ start a comment:
Name: Peter the Great # Russian Tsar
Age: 53
File: recutils.info, Node: Record Descriptors, Prev: Comments, Up: The Rec Format
2.4 Record Descriptors
======================
Certain properties of a set of records can be specified by preceding
them with a "record descriptor". A record descriptor is itself a
record, and uses fields with some predefined names to store properties.
* Menu:
* Record Sets:: Defining different types of records.
* Naming Record Types:: Some conventions on naming record sets.
* Documenting Records:: Documenting your record sets.
* Record Sets Properties:: Introducing the special fields.
File: recutils.info, Node: Record Sets, Next: Naming Record Types, Up: Record Descriptors
2.4.1 Record Sets
-----------------
The most basic property that can be specified for a set of records is
their "type". The special field name '%rec' is used for that purpose:
%rec: Entry
Id: 1
Name: Entry 1
Id: 2
Name: Entry 2
The records following the descriptors are then identified as having
its type. So in the example above we would say there are two records of
type "Entry". Or in a more colloquial way we would say there are two
"Entries" in the database.
The effect of a record descriptor ends when another descriptor is
found in the stream of records. This allows you to store different
kinds of records in the same database. For example, suppose you are
maintaining a depot. You will need to keep track of both what items are
available and when they are sold or restocked.
The following example shows the usage of two record descriptors to
store both kind of records: articles and stock.
%rec: Article
Id: 1
Title: Article 1
Id: 2
Title: Article 2
%rec: Stock
Id: 1
Type: sell
Date: 20 April 2011
Id: 2
Type: stock
Date: 21 April 2011
The collection of records having same types in recfiles are known as
"record sets" in recutils jargon. In the example above two record sets
are defined: one containing articles and the other containing stock
movements.
Nothing prevents having empty record sets in databases. This is in
fact usually the case when a new recfile is written but no data exists
yet. In our depot example we could write a first version of the
database containing just the record descriptors:
%rec: Article
%rec: Stock
Special records are not required, and many recfiles do not have them.
This is because all the records contained in the file are of the same
type, and their nature can usually be inferred from both the file name
and their contents. For example, 'contacts.rec' could simply contain
records representing contacts without an explicit '%rec: Contact' record
descriptor. In this case we say that the type of the anonymous records
stored in the file is the "default record type".
Another possible situation, although not usual, is to have a recfile
containing both non-typed (default) and typed record types:
Id: 1
Title: Blah
Id: 2
Title: Bleh
%rec: Movement
Date: 13-Aug-2012
Concept: 20
Date: 24-Sept-2012
Concept: 12
In this case the records preceding the movements are of the "default"
type, whereas the records following the record descriptor are of type
'Movement'. Even though it is supported by the format and the
utilities, it is generally not recommended to mix non-typed and typed
records in a recfile.
File: recutils.info, Node: Naming Record Types, Next: Documenting Records, Prev: Record Sets, Up: Record Descriptors
2.4.2 Naming Record Types
-------------------------
It is up to you how to name your record sets. Any string comprising
only alphanumeric characters or underscores, and that starts with a
letter will be a legal name. However, it is recommended to use the
singular form of a noun in order to describe the "type" of the records
in the records set. Examples are 'Article', 'Contributor', 'Employee'
and 'Movement'.
The used noun should be specific enough in order to characterize the
property of the records which matters. For example, in a contributor's
database it would be better to have a record set named 'Contributor'
than 'Person'.
The reason of using singular nouns instead of their plural forms is
that it works better with the utilities: it is more natural to read
'recsel -t Contributor' ('-t' is for "type") than 'recsel -t
Contributors'.
File: recutils.info, Node: Documenting Records, Next: Record Sets Properties, Prev: Naming Record Types, Up: Record Descriptors
2.4.3 Documenting Records
-------------------------
As well as a name, it is a good idea to provide a description of the
record set. This is sometimes called the record set's "documentation"
and is specified using the '%doc' field.
Whereas the name is usually short and can contain only alphanumeric
characters and underscores, no such restriction applies to the
documentation. The documentation is typically more verbose than the
name provided by the '%rec' field and may contain arbitrary characters
such as punctuation and parentheses. It is somewhat similar to a
comment (*note Comments::), but it can be managed more easily in a
programmatic way. Unlike a comment, the '%doc' field is recognized by
tools such as 'recinf' (*note Invoking recinf::) which processes record
descriptors. For example, you might have two record sets with '%rec'
and '%doc' fields as follows:
%rec: Contact
%doc: Family, friends and acquaintances (other than business).
Name: Granny
Phone: +12 23456677
Name: Edwina
Phone: +55 0923 8765
%rec: Associate
%doc: Colleagues and other business contacts
Name: Karl Schmidt
Phone: +49 88234566
Name: Genevieve Curie
Phone: +33 34 87 65
File: recutils.info, Node: Record Sets Properties, Prev: Documenting Records, Up: Record Descriptors
2.4.4 Record Sets Properties
----------------------------
Besides determining the type of record that follows in the stream,
record descriptors can be used to describe other properties of those
records. This can be done by using "special fields", which have special
names from a predefined set. Consider for example the following
database, where record descriptors are used to specify a (optional)
numeric 'Id' and a mandatory 'Title' field:
%rec: Item
%type: Id int
%mandatory: Title
Id: 10
Title: Notebook (big)
Id: 11
Title: Fountain Pen
Note that the names of special fields always start with the character
'%'. Also note that it is also possible to use non-special fields in a
record descriptor, but such fields will have no effect on the described
record set.
Every record set must contain one, and only one, field named '%rec'.
It is not mandated that that field must occupy the first position in the
record. However, it is considered a good style to place it as the first
field in the record set, in order for the casual reader to easily
identify the type of the records.
The following list briefly describes the special fields defined in
the recutils format, along with references to the sections of this
manual describing their usage in depth.
'%rec'
Naming record types. Also, they allow using external and remote
descriptors. *Note Remote Descriptors::.
'%mandatory, %allowed and %prohibit'
Requiring or forbidding specific fields. *Note Mandatory Fields::.
*Note Prohibited Fields::. *Note Allowed Fields::.
'%unique and %key'
Working with keys. *Note Keys and Unique Fields::.
'%doc'
Documenting your database. *Note Documenting Records::.
'%typedef and %type'
Field types. *Note Field Types::.
'%auto'
Auto-counters and time-stamps. *Note Auto-Generated Fields::.
'%sort'
Keeping your record sets sorted. *Note Sorted Output::.
'%size'
Restricting the size of your database. *Note Size Constraints::.
'%constraint'
Enforcing arbitrary constraints. *Note Arbitrary Constraints::.
'%confidential'
Storing confidential information. *Note Encryption::.
File: recutils.info, Node: Querying Recfiles, Next: Editing Records, Prev: The Rec Format, Up: Top
3 Querying Recfiles
*******************
Since recfiles are always human readable, you could lookup data simply
by opening an editor and searching for the desired information. Or you
could use a standard tool such as 'grep' to extract strings matching a
pattern. However, recutils provides a more powerful and flexible way to
lookup data. The following sections explore how the recutils can be
used in order to extract data from recfiles, from very basic and simple
queries to quite complex examples.
* Menu:
* Simple Selections:: Introducing 'recsel'.
* Selecting by Type:: Get the records of some given type.
* Selecting by Position:: Get the record occupying some position.
* Random Records:: Get a set of random records.
* Selection Expressions:: Get the records satisfying some expression.
* Field Expressions:: Selecting a subset of fields.
* Sorted Output:: Get the records in a given order.
File: recutils.info, Node: Simple Selections, Next: Selecting by Type, Up: Querying Recfiles
3.1 Simple Selections
=====================
'recsel' is an utility whose primary purpose is to select records from a
recfile and print them on standard output. Consider the following
example record set, which we shall assume is saved in a recfile called
'acquaintances.rec':
# This database contains a list of both real and fictional people
# along with their age.
Name: Ada Lovelace
Age: 36
Name: Peter the Great
Age: 53
# Name: Matusalem
# Age: 969
Name: Bart Simpson
Age: 10
Name: Adrian Mole
Age: 13.75
If we invoke 'recsel acquaintances.rec' we will get a list of all the
records stored in the file in the terminal:
$ recsel acquaintances.rec
Name: Ada Lovelace
Age: 36
Name: Peter the Great
Age: 53
Name: Bart Simpson
Age: 10
Name: Adrian Mole
Age: 13.75
Note that the commented out parts of the file, in this case the
explanatory header and the record corresponding to Matusalem, are not
part of the output produced by 'recsel'. This is because 'recsel' is
concerned only with the data.
'recsel' will also "pack" the records so any extra empty lines that
may be between records are not echoed in the output:
*acquaintances.rec:* $ recsel acquaintances.rec
Name: Peter the Great
Name: Peter the Great Age: 53
Age: 53
Name: Bart Simpson
# Note the extra empty lines. Age: 10
Name: Bart Simpson
Age: 10
It is common to store data gathered in several recfiles. For example,
we could have a 'contacts.rec' file containing general contact records,
and also a 'work-contacts.rec' file containing business contacts:
*contacts.rec:* *work-contacts.rec:*
Name: Granny Name: Yoyodyne Corp.
Phone: +12 23456677 Email: sales@yoyod.com
Phone: +98 43434433
Name: Doctor
Phone: +12 58999222 Name: Robert Harris
Email: robert.harris@yoyod.com
Note: Sales Department.
Both files can be passed to 'recsel' in the command line. In that
case 'recsel' will simply process them and output their records in the
same order they were specified:
$ recsel contacts.rec work-contacts.rec
Name: Granny
Phone: +12 23456677
Name: Doctor
Phone: +12 58999222
Name: Yoyodyne Corp.
Email: sales@yoyod.com
Phone: +98 43434433
Name: Robert Harris
Email: robert.harris@yoyod.com
Note: Sales Department.
As mentioned above, the output follows the ordering on the command line,
so 'recsel work-contacts.rec contacts.rec' would output the records of
'work-contacts.rec' first and then the ones from 'contacts.rec'.
Note however that 'recsel' will merge records from several files
specified in the command line only if they are anonyomuse. If the
contacts in our files were typed:
*contacts.rec:* *work-contacts.rec:*
%rec: Contact %rec: Contact
Name: Granny Name: Yoyodyne Corp.
Phone: +12 23456677 Email: sales@yoyod.com
Phone: +98 43434433
Name: Doctor
Phone: +12 58999222 Name: Robert Harris
Email: robert.harris@yoyod.com
Note: Sales Department.
Then we would get the following error message:
$ recsel contacts.rec work-contacts.rec
recsel: error: duplicated record set 'Contact' from work-contacts.rec.
File: recutils.info, Node: Selecting by Type, Next: Selecting by Position, Prev: Simple Selections, Up: Querying Recfiles
3.2 Selecting by Type
=====================
As we saw in the section discussing record descriptors, it is possible
to have several different types of records in a single recfile.
Consider for example a 'gnu.rec' file containing information about
maintainers and packages in the GNU Project:
%rec: Maintainer
Name: Jose E. Marchesi
Email: jemarch@gnu.org
Name: Luca Saiu
Email: positron@gnu.org
%rec: Package
Name: GNU recutils
LastRelease: 12 February 2014
Name: GNU epsilon
LastRelease: 10 March 2013
If 'recsel' is invoked in that file it will complain:
$ recsel gnu.rec
recsel: error: several record types found. Please use -t to specify one.
This is because 'recsel' does not know which records to output: the
maintainers or the packages. This can be resolved by using the '-t'
command line option:
$ recsel -t Package gnu.rec
Name: GNU recutils
LastRelease: 12 February 2014
Name: GNU epsilon
LastRelease: 10 March 2013
By default 'recsel' never outputs record descriptors. This is because
most of the time the user is only interested in the data. However, with
the '-d' command line option, the record descriptor of the selected type
is printed preceding the data records:
$ recsel -d -t Maintainer gnu.rec
%rec: Maintainer
Name: Jose E. Marchesi
Email: jemarch@gnu.org
Name: Luca Saiu
Email: positron@gnu.org
Note that at the moment it is not possible to select non-typed (default)
records when other record sets are stored in the same file. This is one
of the reasons why mixing non-typed records and typed records in a
single recfile is not recommended.
Note also that if a nonexistent record type is specified in '-t' then
'recsel' does nothing.
File: recutils.info, Node: Selecting by Position, Next: Random Records, Prev: Selecting by Type, Up: Querying Recfiles
3.3 Selecting by Position
=========================
As was explained in the previous sections, 'recsel' outputs all the
records of some record set. The records are echoed in the same order
they are written in the recfile. However, often it is desirable to
select a subset of the records, determined by the position they occupy
in their record set.
The '-n' command line option to 'recsel' supports doing this in a
natural way. This is how we would retrieve the first contact listed in
a contacts database using 'recsel':
$ recsel -n 0 contacts.rec
Name: Granny
Phone: +12 23456677
Note that the index is zero-based. If we want to retrieve more records
we can specify several indexes to '-n' separated by commas. If a given
index is too big, it is simply ignored:
$ recsel -n 0,1,999 contacts.rec
Name: Granny
Phone: +12 23456677
Name: Doctor
Phone: +12 58999222
With '-n', the order in which the records are echoed does not depend on
the order of the indexes passed to '-n'. For example, the output of
'recsel -n 0,1' will be identical to the output of 'recsel -n 1,0'.
Ranges of indexes can also be used to select a subset of the records.
For example, the following call would also select the first three
contacts of the database:
$ recsel -n 0-2 contacts.rec
Name: Granny
Phone: +12 23456677
Name: Doctor
Phone: +12 58999222
Name: Dad
Phone: +12 88229900
It is possible to mix single indexes and index ranges in the same call.
For example, 'recsel -n 0,5-6' would select the first, sixth and seventh
records.
File: recutils.info, Node: Random Records, Next: Selection Expressions, Prev: Selecting by Position, Up: Querying Recfiles
3.4 Random Records
==================
Consider a database in which each record is a cooking recipe. It is
always difficult to decide what to cook each day, so it would be nice if
we could ask 'recsel' to pick up a random recipe. This can be achieved
using the '-m' ('--random') command line option of 'recsel':
$ recsel -m 1 recipes.rec
Title: Curry chicken
Ingredient: A whole chicken
Ingredient: Curry
Preparation: ...
If we need two recipes, because we will be cooking at both lunch and
dinner, we can pass a different number to '-m':
$ recsel -m 2 recipes.rec
Title: Fabada Asturiana
Ingredient: 300 gr of fabes.
Ingredient: Chorizo
Ingredient: Morcilla
Preparation: ...
Title: Pasta with ragu
Ingredient: 500 gr of spaghetti.
Ingredient: 2 tomatoes.
Ingredient: Minced meat.
Preparation: ...
The algorithm used to implement '-m' guarantees that you will never get
multiple instances of the same record. This means that if a record set
has N records and you ask for N random records, you will get all the
records in a random order.
File: recutils.info, Node: Selection Expressions, Next: Field Expressions, Prev: Random Records, Up: Querying Recfiles
3.5 Selection Expressions
=========================
"Selection expressions", also known as "sexes" in recutils jargon, are
infix expressions that can be applied to a record. A "sex" is a
predicate which selects a subset of records within a recfile. They can
be simple expressions involving just one operator and a pair of
operands, or complex compound expressions with parenthetical
sub-expressions and many operators and operands. One of their most
common uses is to examine records matching a particular set of
conditions.
* Menu:
* Selecting by predicate:: Selecting records which satisfy conditions.
* SEX Operands:: Literal values, fields and sub-expressions.
* SEX Operators:: Arithmetic, logical and other operators.
* SEX Evaluation:: Selection expressions are like generators.
File: recutils.info, Node: Selecting by predicate, Next: SEX Operands, Up: Selection Expressions
3.5.1 Selecting by predicate
----------------------------
Consider the example recfile 'acquaintances.rec' introduced earlier. It
contains names of people along with their respective ages. Suppose we
want to get a list of the names of all the children. It would not be
easy to do this using 'grep'. Neither would it, for any reasonably
large recfile, be feasible to search manually for the children.
Fortunately the 'recsel' command provides an easy way to do such a
lookup:
$ recsel -e "Age < 18" -P Name acquaintances.rec
Bart Simpson
Adrian Mole
Let us look at each of the arguments to 'recsel' in turn. Firstly we
have '-e' which tells 'recsel' to lookup records matching the expression
'Age < 18' -- in other words all those people whose ages are less than
18. This is an example of a "selection expression". In this case it is
a simple test, but it can be as complex as needed.
Next, there is '-P' which tells 'recsel' to print out the value of
the 'Name' field -- because we want just the name, not the entire
record. The final argument is the name of the file from whence the
records are to come: 'acquaintances.rec'.
Rather than explicitly storing ages in the recfile, a more realistic
example might have the date of birth instead (otherwise it would be
necessary to update the people's ages in the recfile on every birthday).
# Date of Birth
%type: Dob date
Name: Alfred Nebel
Dob: 20 April 2010
Email: alf@example.com
Name: Bertram Worcester
Dob: 3 January 1966
Email: bert@example.com
Name: Charles Spencer
Dob: 4 July 1997
Email: charlie@example.com
Name: Dirk Hogart
Dob: 29 June 1945
Email: dirk@example.com
Name: Ernest Wright
Dob: 26 April 1978
Email: ernie@example.com
Now we can achieve a similar result as before, by looking up the names
of all those people who were born after a particular date:
$ recfix acquaintances.rec
$ recsel -e "Dob >> '31 July 1994'" -p Name acquaintances.rec
Name: Alfred Nebel
Name: Charles Spencer
The '>>' operator means "later than", and is used here to select a date
of birth after 31st July 1994. Note also that this example uses a lower
case '-p' whereas the preceding example used the upper case '-P'. The
difference is that '-p' prints the field name and field value, whereas
'-P' prints just the value.
'recsel' accepts more than one '-e' argument, each introducing a
selection expression, in which case the records which satisfy all
expressions are selected. You can provide more than one field label to
'-P' or '-p' in order to select additional fields to be displayed. For
example, if you wanted to send an email to all children 14 to 18 years
of age, and today's date were 1st August 2012, then you could use the
following command to get the name and email address of all such
children:
$ recfix acquaintances.rec
$ recsel -e "Dob >> '31 July 1994' && Dob << '01 August 1998'" \
-p Name,Email acquaintances.rec
Name: Charles Spencer
Email: charlie@example.com
As you can see, there is only one such child in our record set.
Note that the example command shown above contains both double quotes
'"' and single quotes '''. The double quotes are interpreted by the
shell (e.g. 'bash') and the single quotes are interpreted by 'recsel',
defining a string. (And the backslash is interpreted by the shell, the
usual continuation character so that this manual doesn't have a too-long
line.)
File: recutils.info, Node: SEX Operands, Next: SEX Operators, Prev: Selecting by predicate, Up: Selection Expressions
3.5.2 SEX Operands
------------------
The supported operands are: numbers, strings, field names and
parenthesized expressions.
3.5.2.1 Numeric Literals
........................
The supported numeric literals are integer numbers and real numbers.
The usual sign character '-' is used to denote negative values. Integer
values can be denoted in base 10, base 16 using the '0x' prefix, and
base 8 using the '0' prefix. Examples are:
10000
0
0xFF
-0xa
012
-07
-1342
.12
-3.14
3.5.2.2 String Literals
.......................
String values are delimited by either the ''' character or the '"'
character. Whichever delimiter is used, the delimiter closing the
literal must be the same as the delimiter used to open it.
Newlines and tabs can be part of a string literal.
Examples are:
'Hello.'
'The following example is the empty string.'
''
The ''' and '"' characters can be part of a string if they are
escaped with a backslash, as in:
'This string contains an apostrophe: \'.'
"This one a double quote: \"."
3.5.2.3 Field Values
....................
The value of a field value can be included in a selection expression by
writing its name. The field name is replaced by a string containing the
field value, to handle the possibility of records with more than one
field by that name. Examples:
Name
Email
long_field_name
It is possible to use the role part of a field if it is not empty.
So, for example, if we are searching for the issues opened by 'John
Smith' in a database of issues we could write:
$ recsel -e "OpenedBy = 'John Smith'"
instead of using a full field name:
$ recsel -e "Hacker:Name:OpenedBy = 'John Smith'"
When the name of a field appears in an expression, the expression is
applied to all the fields in the record featuring that name. So, for
example, the expression:
Email ~ "\\.org"
matches any record in which there is a field named 'Email' whose value
terminates in (the literal string) '.org'. If we are interested in the
value of some specific email, we can specify its relative position in
the containing record by using "subscripts". Consider, for example:
Email[0] ~ "\\.org"
Will match for:
Name: Mr. Foo
Email: foo@foo.org
Email: mr.foo@foo.com
But not for:
Name: Mr. Foo
Email: mr.foo@foo.com
Email: foo@foo.org
The regexp syntax supported in selection expressions is POSIX EREs,
with several GNU extensions. *Note Regular Expressions::.
3.5.2.4 Parenthesized Expressions
.................................
Parenthesis characters ('(' and ')') can be used to group sub
expressions in the usual way.
File: recutils.info, Node: SEX Operators, Next: SEX Evaluation, Prev: SEX Operands, Up: Selection Expressions
3.5.3 Operators
---------------
The supported operators are arithmetic operators (addition, subtraction,
multiplication, division and modulus), logical operators, string
operators and field operators.
3.5.3.1 Arithmetic Operators
............................
Arithmetic operators for addition ('+'), subtraction ('-'),
multiplication ('*'), integer division ('/') and modulus ('%') are
supported with their usual meanings.
These operators require either numeric operands or string operands
whose value can be interpreted as numbers (integer or real).
3.5.3.2 Boolean Operators
.........................
The boolean operators *and* ('&&'), *or* ('||') and *not* ('!') are
supported with the same semantics as their C counterparts.
A compound boolean operator '=>' is also supported in order to ease
the elaboration of constraints in records: 'A => B', which can be read
as "A implies B", translates into '!A || (A && B)'.
The boolean operators expect integer operands, and will try to
convert any string operand to an integer value.
3.5.3.3 Comparison Operators
............................
The compare operators *less than* ('<'), *greater than* ('>'), *less
than or equal* ('<='), *greater than or equal* ('>='), *equal* ('=') and
*unequal* ('!=') are supported with their usual meaning.
Strings can be compared with the equality operator ('=').
The match operator ('~') can be used to match a string with a given
regular expression (*note Regular Expressions::).
3.5.3.4 Date Comparison Operators
.................................
The compare operators *before* ('<<'), *after* ('>>') and *same time*
('==') can be used with fields and strings containing parseable dates.
*Note Date input formats::.
3.5.3.5 Field Operators
.......................
Field counters are replaced by the number of occurrences of a field with
the given name in the record. For example:
#Email
The previous expression is replaced with the number of fields named
'Email' in the record. It can be zero if the record does not have a
field with that name.
3.5.3.6 String Operators
........................
The string concatenation operator ('&') can be used to concatenate any
number of strings and field values.
'foo' & Name & 'bar'
3.5.3.7 Conditional Operator
............................
The ternary conditional operator can be used to select alternatives
based on the value of some expression:
expr1 ? expr2 : expr3
If 'expr1' evaluates to true (i.e. it is an integer or the string
representation of an integer and its value is not zero) then the
operator yields 'expr2'. Otherwise it yields 'expr3'.
File: recutils.info, Node: SEX Evaluation, Prev: SEX Operators, Up: Selection Expressions
3.5.4 Evaluation of Selection Expressions
-----------------------------------------
Given that:
- It is possible to refer to fields by name in selection expressions.
- Records can have several fields with the same name.
It is clear that some backtracking mechanism is needed in the evaluation
of the selection expressions. For example, consider the following
expression that is deciding whether a "registration" in a webpage should
be rejected:
((Email ~ "foomail\.com") || (Age <= 18)) && !#Fixed
The previous expression will be evaluated for every possible
permutation of the fields "Email", "Age" and "Fixed" present in the
record, until one of the combinations succeeds. At that point the
computation is interrupted.
When used to decide whether a record matches some criteria, the goal
of a selection expression is to act as a boolean expression. In that
case the final value of the expression depends on both the type and the
value of the result launched by the top-most subexpression:
- If the result is an integer, the expression is true if its value is
not zero.
- If the result is a real, or a string, the expression evaluates to
false.
Sometimes a selection expression is used to compute a result instead
of a boolean. In that case the returned value is converted to a string.
This is used when replacing the slots in templates (*note Templates::).
File: recutils.info, Node: Field Expressions, Next: Sorted Output, Prev: Selection Expressions, Up: Querying Recfiles
3.6 Field Expressions
=====================
"Field expressions" (also known as "fexes") are a way to select fields
of a record. They also allow you to do certain transformations on the
selected fields, such as changing their names.
A FEX comprises a sequence of "elements" separated by commas:
ELEM_1,ELEM_2,...,ELEM_N
Each element makes a reference to one or more fields in a record
identified by a given name and an optional subscript:
FIELD_NAME[MIN-MAX]
MIN and MAX are zero-based indexes. It is possible to refer to a field
occupying a given position. For example, consider the following record:
Name: Mr. Foo
Email: foo@foo.com
Email: foo@foo.org
Email: mr.foo@foo.org
We would select all the emails of the record with:
Email
The first email with:
Email[0]
The third email with:
Email[2]
The second and the third email with:
Email[1-2]
And so on. It is possible to select the same field (or range of
fields) more than once just by repeating them in a field expression.
Thus, the field expression:
Email[0],Name,Email
will print the first email, the name, and then all the email fields
including the first one.
It is possible to include a "rewrite rule" in an element of a field
expression, which specifies an alias for the selected fields:
FIELD_NAME[MIN-MAX]:ALIAS
For example, the following field expression specifies an alias for the
fields named 'Email' in a record:
Name,Email:ElectronicMail
Since the rewrite rules only affect the fields selected in a single
element of the field expression, it is possible to define different
aliases to several fields having the same name but occupying different
positions:
Name,Email[0]:PrimaryEmail,Email[1]:SecondaryEmail
When that field expression is applied to the following record:
Name: Mr. Foo
Email: primary@email.com
Email: secondary@email.com
Email: other@email.com
the result will be:
Name: Mr. Foo
PrimaryEmail: primary@email.com
SecondaryEmail: secondary@email.com
Email: other@email.com
It is possible to use the dot notation in order to refer to field and
sub-fields. This is mainly used in the context of joins, where new
fields are created having compound names such as 'Foo_Bar'. A reference
to such a field can be done in the fex using dot notation as follows:
Foo.Bar
File: recutils.info, Node: Sorted Output, Prev: Field Expressions, Up: Querying Recfiles
3.7 Sorted Output
=================
This special field sets sorting criteria for the records contained in a
record set. Its usage is:
%sort: FIELD1 FIELD2 ...
Meaning that the desired order for the records will be determined by the
contents of the fields named in the '%sort' value. The sorting is
always done in ascending order, and there may be records that lack the
involved fields, i.e. the sorting fields need not be mandatory.
It is an error to have more than one '%sort' field in the same record
descriptor, as only one field list can be used as sorting criteria.
Consider for example that we want to keep the records in our
inventory system ordered by entry date. We could achieve that by using
the following record descriptor in the database:
%rec: Item
%type: Date date
%sort: Date
Id: 1
Title: Staplers
Date: 10 February 2011
Id: 2
Title: Ruler Pack 20
Date: 2 March 2009
...
As you can see in the example above, the fact we use '%sort' in a
database does not mean that the database will be always physically
ordered. Unsorted record sets are not a data integrity problem, and
thus the diagnosis tools must not declare a recfile as +invalid because
of this. The utility 'recfix' provides a way +to physically order the
fields in the file (*note Invoking recfix::).
On the other hand any program listing, presenting or processing data
extracted from the recfile must honor the '%sort' entry. For example,
when using the following 'recsel' program in the database above we would
get the output sorted by date:
$ recsel inventory.rec
Id: 2
Title: Ruler Pack 20
Date: 2 March 2009
Id: 1
Title: Staplers
Date: 10 February 2011
The sorting of the selected field depends on its type:
- Numeric fields (integers, ranges, reals) are numerically ordered.
- Boolean fields are ordered considering that "false" values come
first.
- Dates are ordered chronologically.
- Any other kind of field is ordered using a lexicographic order.
It is possible to specify several fields as the sorting criteria. In
that case the records are sorted using a lexicographic order. Consider
for example the following unsorted database containing marks for several
students:
%rec: Marks
%type: Class enum A B C
%type: Score real
Name: Mr. One
Class: C
Score: 6.8
Name: Mr. Two
Class: A
Score: 6.8
Name: Mr. Three
Class: B
Score: 9.2
Name: Mr. Four
Class: A
Score: 2.1
Name: Mr. Five
Class: C
Score: 4
If we wanted to sort it by 'Class' and by 'Score' we would insert a
'%sort' special field in the descriptor, having:
%rec: Marks
%type: Class enum A B C
%type: Score real
%sort: Class Score
Name: Mr. Four
Class: A
Score: 2.1
Name: Mr. Two
Class: A
Score: 6.8
Name: Mr. Three
Class: B
Score: 9.2
Name: Mr. Five
Class: C
Score: 4
Name: Mr. One
Class: C
Score: 6.8
The order of the fields in the '%sort' field is significant. If we
reverse the order in the example above then we get a different sorted
set:
%rec: Marks
%type: Class enum A B C
%type: Score real
%sort: Score Class
Name: Mr. Four
Class: A
Score: 2.1
Name: Mr. Five
Class: C
Score: 4
Name: Mr. Two
Class: A
Score: 6.8
Name: Mr. One
Class: C
Score: 6.8
Name: Mr. Three
Class: B
Score: 9.2
In this last case, 'Mr. One' comes after 'Mr. Two' because the class 'A'
comes before the class 'B' even though the score is the same ('6.8').
File: recutils.info, Node: Editing Records, Next: Editing Fields, Prev: Querying Recfiles, Up: Top
4 Editing Records
*****************
The simplest way of editing a recfile is to start your favourite text
editor and hack the contents of the file as desired. However, the rec
format is structured enough so recfiles can be updated automatically by
programs. This is useful for writing shell scripts or when there are
complex data integrity rules stored in the file that we want to be sure
to preserve.
The following sections discuss the usage of the recutils for altering
recfiles in the level of record: adding new records, deleting or
commenting them out, sorting them, etc.
* Menu:
* Inserting Records:: Inserting data into recfiles.
* Deleting Records:: Removing data.
* Sorting Records:: Physical reordering of records.
File: recutils.info, Node: Inserting Records, Next: Deleting Records, Up: Editing Records
4.1 Inserting Records
=====================
Adding new records to a recfile is pretty trivial: open it with your
text editor and just write down the fields comprising the records. This
is really the best way to add contents to a recfile containing simple
data. However, complex databases may introduce some difficulties:
_Multi-line values._
It can be tedious to manually encode the several lines.
_Data integrity._
It is difficult to manually maintain the integrity of data stored
in the data base.
_Counters and timestamps._
Some record sets feature auto-generated fields, which are commonly
used to implement counters and time-stamps. *Note Auto-Generated
Fields::.
Thus, to facilitate the insertion of new data a command line utility
called 'recins' is included in the recutils. The usage of 'recins' is
very simple, and can be used both in the command line or called from
another program. The following subsections discuss several aspects of
using this utility.
* Menu:
* Adding Records With recins:: Basics of the 'recins' utility.
* Replacing Records With recins:: Substituting records in a file.
* Adding Anonymous Records:: Inserting or replacing records with no
type.
File: recutils.info, Node: Adding Records With recins, Next: Replacing Records With recins, Up: Inserting Records
4.1.1 Adding Records With recins
--------------------------------
Each invocation of 'recins' adds one record to the targeted database.
The fields comprising the records are specified using pairs of '-f' and
'-v' command line arguments. For example, this is how we would add the
first entry to a previously empty contacts database:
$ recins -f Name -v "Mr Foo" -f Email -v foo@bar.baz contacts.rec
$ cat contacts.rec
Name: Mr. Foo
Email: foo@bar.baz
If we invoke 'recins' again on the same database we will be adding a
second record:
$ recins -f Name -v "Mr Bar" -f Email -v bar@gnu.org contacts.rec
$ cat contacts.rec
Name: Mr. Foo
Email: foo@bar.baz
name: Mr. Bar
Email: bar@gnu.org
There is no limit on the number of '-f' '-v' pairs that can be
specified to 'recins', other than any limit on command line arguments
which may be imposed by the shell.
The field values provided using '-v' are encoded to follow the rec
format conventions, including multi-line field values. Consider the
following example:
$ recins -f Name -v "Mr. Foo" -f Address -v '
Foostrs. 19
Frankfurt am Oder
Germany' contacts.rec
$ cat contacts.rec
Name: Mr. Foo
Address:
+ Foostrs. 19
+ Frankfurt am Oder
+ Germany
It is also possible to provide fields already encoded as rec data for
their addition, using the '-r' command line argument. This argument can
be intermixed with '-f' '-v'.
$ recins -f Name -v "Mr. Foo" -r "Email: foo@bar.baz" contacts.rec
$ cat contacts.rec
Name: Mr. Foo
Email: foo@bar.baz
If the string passed to '-r' is not valid rec data then 'recins' will
complain with an error and the operation will be aborted.
At this time, it is not possible to add new records containing
comments.
File: recutils.info, Node: Replacing Records With recins, Next: Adding Anonymous Records, Prev: Adding Records With recins, Up: Inserting Records
4.1.2 Replacing Records With recins
-----------------------------------
'recins' can also be used to replace existing records in a database with
a provided record. This is done by specifying some criteria selecting
the record (or records) to be replaced.
Consider for example the following command applied to our contacts
database:
$ recins -e "Email = 'foo@bar.baz'" -f Name -v "Mr. Foo" \
-f Email -v "new@bar.baz" contacts.rec
The contact featuring an email 'foo@bar.baz' gets replaced with the
following record:
Name: Mr. Foo
Email: new@bar.baz
The records to be replaced can also be specified by index, or a range
of indexes. For example, the following command replaces the first,
second and third records in a database with dummy records:
$ recins -n 0,1-2 -f Dummy -v XXX foo.rec
$ cat foo.rec
Dummy: XXX
Dummy: XXX
Dummy: XXX
... Other records ...
File: recutils.info, Node: Adding Anonymous Records, Prev: Replacing Records With recins, Up: Inserting Records
4.1.3 Adding Anonymous Records
------------------------------
In a previous chapter we noted that 'recsel' interprets the absence of a
'-t' argument depending on the actual contents of the file. If the
recfile contains records of just one type the command assumes that the
user is referring to these records.
'recins' does not follow this convention, and the absence of an
explicit type always means to insert (or replace) an anonymous record.
Consider for example the following database:
%rec: Marks
%type: Class enum A B C
Name: Alfred
Class: A
Name: Bertram
Class: B
If we want to insert a new mark we have to specify the type explicitly
using '-t':
$ cat marks.rec | recins -t Marks -f Name -v Xavier -f Class -v C
%rec: Marks
%type: Class enum A B C
Name: Alfred
Class: A
Name: Bertram
Class: B
Name: Xavier
Class: C
If we forget to specify the type then an anonymous record is created
instead:
$ cat marks.rec | recins -f Name -v Xavier -f Class -v C
Name: Xavier
Class: C
%rec: Marks
%type: Class enum A B C
Name: Alfred
Class: A
Name: Bertram
Class: B
File: recutils.info, Node: Deleting Records, Next: Sorting Records, Prev: Inserting Records, Up: Editing Records
4.2 Deleting Records
====================
Just as 'recins' inserts records, the utility 'recdel' deletes them.
Consider the following recfile 'stock.rec':
%rec: Item
%type: Expiry date
%sort: Title
Title: First Aid Kit
Expiry: 2 May 2009
Title: Emergency Rations
Expiry: 10 August 2009
Title: Life raft
Expiry: 2 March 2009
Suppose we wanted to delete all items with an 'Expiry' value before a
certain date, we could do this with the following command:
$ recdel -t Item -e 'Expiry << "5/12/2009"' stock.rec
After running this command, only one record will remain in the file
(viz: the one titled 'Emergency Rations') because all the others have
expiry dates prior to 12 May 2009. (1) The '-t' option can be omitted
if, and only if, there is no '%rec' field in the recfile.
'recdel' tries to warn you if you attempt to perform a delete
operation which it deems to be too pervasive. In such cases, it will
refuse to run, unless you give the '--force' flag. However, you should
not rely upon 'recdel' to protect you, because it cannot always
correctly guess that you might be deleting more records than intended.
For this reason, it may be wise to use the '-c' flag, which causes the
relevant records to be commented out, rather than deleted. (And of
course backups are always wise.)
The complete options available to the 'recdel' command are explained
later. *Note Invoking recdel::.
---------- Footnotes ----------
(1) '5/12/2009' means the 12th day of May 2009, _not_ the fifth day
of December, even if your 'LC_TIME' environment variable has been set to
suggest otherwise.
File: recutils.info, Node: Sorting Records, Prev: Deleting Records, Up: Editing Records
4.3 Sorting Records
===================
In the example above, note the existence of the '%sort: Title' line.
This field was discussed previously (*note Sorted Output::) and, as
mentioned, does not imply that the records need to be stored in the
recfile in any particular order.
However, if desired, you can automatically arrange the recfile in
that order using 'recfix' with the '--sort' flag. After running the
command
$ recfix --sort stock.rec
the file 'stock.rec' will have its records sorted in alphabetical order
of the 'Title' fields, thus:
%rec: Item
%type: Expiry date
%sort: Title
Title: Emergency Rations
Expiry: 10 August 2009
Title: First Aid Kit
Expiry: 2 May 2009
Title: Liferaft
Expiry: 2 March 2009
File: recutils.info, Node: Editing Fields, Next: Field Types, Prev: Editing Records, Up: Top
5 Editing Fields
****************
Fields of a recfile can, of course, be edited manually using an editor
and this is often the easiest way when only a few fields need to be
changed or when the nature of the changes do not follow any particular
pattern. If, however, a large number of similar changes to several
records are required,the 'recset' command can make the job easier.
The formal description of 'recset' is presented later (*note Invoking
recset::). In this chapter some typical usage examples are discussed.
As with 'recdel', 'recset' if used erroneously has the potential to make
very pervasive changes, which could result in a large loss of data. It
is prudent therefore to take a copy of a recfile before running such
commands.
* Menu:
* Adding Fields:: Adding new fields to records.
* Setting Fields:: Editing field values.
* Deleting Fields:: Removing or commenting-out fields.
* Renaming Fields:: Changing the name of a field.
File: recutils.info, Node: Adding Fields, Next: Setting Fields, Up: Editing Fields
5.1 Adding Fields
=================
As mentioned above, the command 'recins' adds new records to a recfile,
but it cannot add fields to an existing record. This task can be
achieved automatically using 'recset' with its '-a' flag.
Suppose that (after a stock inspection) you wanted to add an
'Inspected' field to all the items in the recfile. The following
command could be used.
$ recset -t Item -f Inspected -a 'Yes' stock.rec
Here, because no record selection flag was provided, the command
affected _all_ the records of type 'Item'. We could limit the effect of
the command using the '-e', '-q', '-n' or '-m' flags. For example to
add the 'Inspected' field to only the first item the following command
would work:
$ recset -t Item -n 0 -f Inspected -a 'Yes' stock.rec
Similarly, a selection expression could have been used with the '-e'
flag in order to add the field only to records which satisfy the
expression.
If you use 'recset' with the '-a' flag on a field that already
exists, a new field (in addition to those already present) will be
appended with the given value.
File: recutils.info, Node: Setting Fields, Next: Deleting Fields, Prev: Adding Fields, Up: Editing Fields
5.2 Setting Fields
==================
It is also possible to update the value of a field. This is done using
'recset' with its '-s' flag. In the previous example, an 'Inspected'
flag was added to certain records, with the value 'yes'. After
reflection, one might want to record the date of inspection, rather than
a simple yes/no flag. Records which have no such field will remain
unchanged.
$ recset -t Item -f Inspected -s '30 October 2006' stock.rec
Although the above command does not have any selection criteria, it
will only affect those records for which a 'Inspected' field exists.
This is because the '-s' flag only sets values of existing fields. It
will not create any fields.
If instead the '-S' flag is used, this will create the field (if it
does not already exist) _and_ set its value.
$ recset -t Item -f Inspected -S '30 October 2006' stock.rec
File: recutils.info, Node: Deleting Fields, Next: Renaming Fields, Prev: Setting Fields, Up: Editing Fields
5.3 Deleting Fields
===================
You can delete fields using 'recset''s '-d' flag. For example, if we
wanted to delete the 'Inspected' field which we introduced above, we
could do so as follows:
$ recset -t Item -f Inspected -d stock.rec
This would delete _all_ fields named 'Inspected' from _all_ records of
type 'Item'. It may be that, we only wanted to delete the 'Inspected'
fields from records which satisfy a certain condition. The following
would delete the fields only from items whose 'Expiry' date was before 2
January 2010:
$ recset -t Item -e 'Expiry << "2 January 2010"' -f Inspected -d stock.rec
File: recutils.info, Node: Renaming Fields, Prev: Deleting Fields, Up: Editing Fields
5.4 Renaming Fields
===================
Another use of 'recset' is to rename existing fields. This is achieved
using the '-r' flag. To rename all instances of the 'Expiry' field
occurring in any record of type 'Item' to 'UseBy', the following command
suffices:
$ recset -t Item -f Expiry -r 'UseBy' stock.rec
As with most operations, this could be done selectively, using the '-e'
flag and a selection expression.
File: recutils.info, Node: Field Types, Next: Constraints on Record Sets, Prev: Editing Fields, Up: Top
6 Field Types
*************
Field values are, by default, unrestricted text strings. However, it is
often useful to impose some restrictions on the values of certain
fields. For example, consider the following record:
Id: 111
Name: Jose E. Marchesi
Age: 30
MaritalStatus: single
Phone: +49 666 666 66
The values of the fields must clearly follow some structure in order
to make sense. 'Id' is a numeric identifier for a person. 'Name' will
never use several lines. 'Age' will typically be in the range '0..120',
and there are only a few valid values for 'MaritalStatus': single,
married, divorced, and widow(er). Phones may be restricted to some
standard format as well to be valid. All these restrictions (and many
others) can be enforced by using "field types".
There are two kind of field types: "anonymous" and "named". Those
are described in the following subsections.
* Menu:
* Declaring Types:: Declaration of types in record descriptors.
* Types and Fields:: Associating fields with types.
* Scalar Field Types:: Numbers and ranges.
* String Field Types:: Lines, limited strings and regular expressions.
* Enumerated Field Types:: Enumerations and boolean values.
* Date and Time Types:: Dates and times.
* Other Field Types:: Emails, fields, UUIDs, ...
File: recutils.info, Node: Declaring Types, Next: Types and Fields, Up: Field Types
6.1 Declaring Types
===================
A type can be declared in a record descriptor by using the '%typedef'
special field. The syntax is:
%typedef: TYPE_NAME TYPE_DESCRIPTION
Where TYPE_NAME is the name of the new type, and TYPE_DESCRIPTION a
description which varies depending of the kind of type. For example,
this is how a type 'Age_t' could be defined as numbers in the range
'0..120':
%typedef: Age_t range 0 120
Type names are identifiers having the following syntax:
[a-zA-Z][a-zA-Z0-9_]*
Even though any identifier with that syntax could be used for types, it
is a good idea to consistently follow some convention to help
distinguishing type names from field names. For example, the '_t'
suffix could be used for types.
A type can be declared to be an alias for another type. The syntax
is:
%typedef: TYPE_NAME OTHER_TYPE_NAME
Where TYPE_NAME is declared to be a synonym of OTHER_TYPE_NAME. This is
useful to avoid duplicated type descriptions. For example, consider the
following example:
%typedef: Id_t int
%typedef: Item_t Id_t
%typedef: Transaction_t Id_t
Both 'Item_t' and 'Transaction_t' are aliases for the type 'Id_t'.
Which is in turn an alias for the type 'int'. So, they are both numeric
identifiers.
The order of the '%typedef' fields is not relevant. In particular, a
type definition can forward-reference another type that is defined
subsequently. The previous example could have been written as:
%typedef: Item_t Id_t
%typedef: Transaction_t Id_t
%typedef: Id_t int
Integrity check will complain if undefined types are referenced. As
well as when any aliases up referencing back (looping back directly or
indirectly) in type declarations. For example, the following set of
declarations contains a loop. Thus, it's invalid:
%typedef: A_t B_t
%typedef: B_t C_t
%typedef: C_t A_t
The scope of a type is the record descriptor where it is defined.
File: recutils.info, Node: Types and Fields, Next: Scalar Field Types, Prev: Declaring Types, Up: Field Types
6.2 Types and Fields
====================
Fields can be declared to have a given type by using the '%type' special
field in a record descriptor. The synopsis is:
%type: FIELD_LIST TYPE_NAME_OR_DESCRIPTION
Where FIELD_LIST is a list of field names separated by commas.
TYPE_NAME_OR_DESCRIPTION can be either a type name which has been
previously declared using '%typedef', or a type description. Type names
are useful when several fields are declared to be of the same type:
%typedef: Id_t int
%type: Id Id_t
%type: Product Id_t
Anonymous types can be specified by writing a type description instead
of a type name. They help to avoid superfluous type declarations in the
common case where a type is used by just one field. A record containing
a single 'Id' field, for example, can be defined without having to use a
'%typedef' in the following way:
%rec: Task
%type: Id int
File: recutils.info, Node: Scalar Field Types, Next: String Field Types, Prev: Types and Fields, Up: Field Types
6.3 Scalar Field Types
======================
The rec format supports the declaration of fields of the following
scalar types: integer numbers, ranges and real numbers.
Signed "integers" are supported by using the 'int' declaration:
%typedef: Id_t int
Given the declaration above, fields of type 'Id_t' must contain
integers, and they may be negative. Hexadecimal values can be written
using the '0x' prefix, and octal values using an extra '0'. Valid
examples are:
%type: Id Id_t
Id: 100
Id: -23
Id: -0xFF
Id: 020
Sometimes it is desirable to reduce the "range" of integers allowed in a
field. This can be achieved by using a range type declaration:
%typedef: Interrupt_t range 0 15
Note that it is possible to omit the minimum index in ranges. In that
case it is implicitly zero:
%typedef: Interrupt_t range 15
It is possible to use the keywords 'MIN' and 'MAX' instead of a numeral
literal in one or both of the points conforming the range. They mean
the minimum and the maximum integer value supported by the
implementation respectively. See the following examples:
%typedef: Negative range MIN -1
%typedef: Positive range 0 MAX
%typedef: AnyInt range MIN MAX
%typedef: Impossible range MAX MIN
Hexadecimal and octal numbers can be used to specify the limits in a
range. This helps to define scalar types whose natural base is not ten,
like for example:
%typedef: Address_t range 0x0000 0xFFFF
%typedef: Perms_t range 755
"Real" number fields can be declared with the 'real' type specifier. A
wide range of real numbers can be represented this way, only limited by
the underlying floating point representation. The decimal separator is
always the dot ('.') character regardless of the locale setting. For
example:
%typedef: Longitude_t real
Examples of fields of type real:
%rec: Rectangle
%typedef: Longitude_t real
%type: Width Longitude_t
%type: Height Longitude_t
Width: 25.01
Height: 10
File: recutils.info, Node: String Field Types, Next: Enumerated Field Types, Prev: Scalar Field Types, Up: Field Types
6.4 String Field Types
======================
The 'line' field type specifier can be used to restrict the value of a
field to a single line, i.e. no newline characters are allowed. For
example, a type for proper names could be declared as:
%typedef: Name_t line
Examples of fields of type line:
Name: Mr. Foo Bar
Name: Mrs. Bar Baz
Name: This is
+ invalid
Sometimes it is the maximum size of the field value that shall be
restricted. The 'size' field type specifier can be used to define the
maximum number of characters a field value can have. For example, if we
were collecting input that will get written in a paper-based forms
system allowing up to 25 characters width entries, we could declare the
entries as:
%typedef: Address_t size 25
Note that hexadecimal and octal integer constants can also be used to
specify field sizes:
%typedef: Address_t size 0x18
Arbitrary restrictions can be defined by using regular expressions. The
"regexp" field type specifier introduces an ERE (extended regular
expression) that will be matched against fields having that name. The
synopsis is:
%typedef: TYPE_NAME regexp /RE/
where RE is the regular expression to match.
For example, consider the 'Id_t' type designed to represent the
encoding of the identifier of ID cards in some country:
%typedef: Id_t regexp /[0-9]{9}[a-zA-Z]/
Examples of fields of type 'Id_t' are:
IDCard: 123456789Z
IDCard: invalid id card
Note that the slashes delimiting the RE can be replaced with any other
character that is not itself used as part of the regexp. That is useful
in some cases such as:
%typedef: Path_t regexp |(/[^/]/?)+|
The regexp flavor supported in recfiles are the POSIX EREs plus several
GNU extensions. *Note Regular Expressions::.
File: recutils.info, Node: Enumerated Field Types, Next: Date and Time Types, Prev: String Field Types, Up: Field Types
6.5 Enumerated Field Types
==========================
Fields of this type contain symbols taken from an enumeration.
The type is described by writing the sequence of symbols comprising
the enumeration. Enumeration symbols are strings described by the
following regexp:
[a-zA-Z0-9][a-zA-Z0-9_-]*
The symbols are separated by blank characters (including newlines). For
example:
%typedef: Status_t enum NEW STARTED DONE CLOSED
%typedef: Day_t enum Monday Tuesday Wednesday Thursday Friday
+ Saturday Sunday
It is possible to insert comments when describing an enum type. The
comments are delimited by parenthesis pairs. The contents of the
comments can be any character but parentheses. For example:
%typedef: TaskStatus_t enum
+ NEW (The task was just created)
+ IN_PROGRESS (Task started)
+ CLOSED (Task closed)
"Boolean" fields, declared with the type specifier 'bool', can be seen
as special enumerations holding the binary values true and false.
%typedef: Yesno_t bool
The literals allowed in boolean fields are 'yes/no', '0/1' and
'true/false'. Examples are:
SwitchedOn: 1
SwitchedOn: yes
SwitchedOn: false
File: recutils.info, Node: Date and Time Types, Next: Other Field Types, Prev: Enumerated Field Types, Up: Field Types
6.6 Date and Time Types
=======================
The "date" field type specifier can be used to declare dates and times.
The synopsis is:
%typedef: TYPE_NAME date
There are many permitted date formats, described in detail later in this
manual (*note Date input formats::). Of particular note are the
following:
- Dates and times read from recfiles are not affected by the locale
or the timezone. This means that the 'LC_TIME' and the 'TZ'
environment variables are ignored. If you wish, for example, to
specify a time which must be interpreted as UTC, you must
explicitly append the time zone correction: e.g. '2001-1-10
12:09Z'.
- The field value '1/10/2001' means January 10, 2001, *not* October
1, 2001.
- Relative times and dates (such as '1 day ago') are permitted but
are not particularly useful.
File: recutils.info, Node: Other Field Types, Prev: Date and Time Types, Up: Field Types
6.7 Other Field Types
=====================
The "Email" field type specifier is used to declare electronic
addresses. The synopsis is:
%typedef: Email_t email
Sometimes it is useful to make fields to store field names. For that
purpose the "Field" field type specifier is supported. The synopsis is:
%typedef: Field_t field
Universally Unique Identifiers (also known as UUIDs) are a way to assign
a globally unique label to some object. The "uuid" field type specifier
serves that purpose. The synopsis is:
%typedef: Id_t uuid
The format of the uuids is specified as 32 hexadecimal digits, displayed
in five groups separated by hyphens. For example:
550e8400-e29b-41d4-a716-446655440000
There is one other possible field type, viz: a foreign key. The
following example defines the type 'Maintainer_t' to be of type "record
'Hacker'"; in other words, a foreign key referring to a record in the
'Hacker' record set.
%typedef: Maintainer_t rec Hacker
This essentially means that the values to be stored in fields of type
'Maintainer_t' are of whatever type is defined for the primary key of
the 'Hacker' record set. Why this is useful is discussed later. *Note
Queries which Join Records::.
File: recutils.info, Node: Constraints on Record Sets, Next: Checking Recfiles, Prev: Field Types, Up: Top
7 Constraints on Record Sets
****************************
The records in a recfile are by default not restricted to any particular
structure except that they must contain one or more fields and optional
comments. This provides the format with huge expressive power; but in
many cases, it is also desirable to impose some restrictions in order to
reflect some of the properties of the data stored in the database. It
is also useful in order to preserve data integrity and thus avoid data
corruption.
The following sections describe the usage of some predefined special
fields whose purpose is to impose this kind of restriction in the
structure of the records.
* Menu:
* Mandatory Fields:: Requiring the presence of fields.
* Prohibited Fields:: Forbidding the presence of fields.
* Allowed Fields:: Restricting the presence of fields.
* Keys and Unique Fields:: Fields characterizing records.
* Size Constraints:: Constraints on the number of records in a set.
* Arbitrary Constraints:: Constraints records must comply with.
File: recutils.info, Node: Mandatory Fields, Next: Prohibited Fields, Up: Constraints on Record Sets
7.1 Mandatory Fields
====================
Sometimes, you want to make sure that _every_ record of a particular
type contains certain fields. To do this, use the special field
'%mandatory'. The usage is:
%mandatory: FIELD1 FIELD2 ... FIELDN
The field names are separated by one or more blank characters.
The fields listed in a '%mandatory' entry are non-optional; i.e. at
least one field with this name shall be present in any record of this
kind. Records violating this restriction are invalid and a checking
tool will report the situation as a data integrity failure.
Consider for example an "address book" database where each record
stores the information associated with a contact. The records will be
heterogeneous, in the sense they won't all contain exactly the same
fields: the contact of an Internet shop will probably have a 'URL'
field, while the entry for our grandmother probably won't. We still
want to make sure that every entry has a field with the name of the
contact. In this case, we could use '%mandatory' as follows:
%rec: Contact
%mandatory: Name
Name: Granny
Phone: +12 23456677
Name: Yoyodyne Corp.
Email: sales@yoyod.com
Phone: +98 43434433
A word of caution, however: In many situations, especially in day to
day social interaction, it is common to find that certain information is
simply unavailable. For example, although every person has a date of
birth, some people will refuse to provide that information.
It is probably wise therefore to avoid stipulating a field as
mandatory, unless it is essential to the enterprise. Otherwise, a data
entry clerk faced with this situation will have to make the choice
between dropping the entry entirely or entering some fake data to keep
the system happy.
File: recutils.info, Node: Prohibited Fields, Next: Allowed Fields, Prev: Mandatory Fields, Up: Constraints on Record Sets
7.2 Prohibited Fields
=====================
The inverse of '%mandatory' is '%prohibit'. Prohibited fields may not
occur in _any_ record of the given type. The usage is:
%prohibit: FIELD1 FIELD2 ... FIELDN
The field names are separated by one or more blank characters.
Fields listed in a '%prohibit' entry are forbidden; i.e. no field with
this name should be present in any record of this kind. Again, records
violating this restriction are invalid.
Several '%prohibit' fields can appear in the same record descriptor.
The set of prohibited fields is the union of all the entries. For
example, in the following database both 'Id' and 'id' are prohibited:
%rec: Entry
%prohibit: Id
%prohibit: id
One possible use case for prohibited fields arises when some field
name is reserved for some future use. For example, if we were
organizing a sports competition, we would want competitors to register
before the event. However a competitor's 'result' should not and cannot
be entered before the competition takes place. Initially then, we would
change the record descriptor as follows:
%rec: Contact
%mandatory: Name
%prohibit: result
At the start of the event, the '%prohibit' line can be deleted, to allow
results to be entered.
File: recutils.info, Node: Allowed Fields, Next: Keys and Unique Fields, Prev: Prohibited Fields, Up: Constraints on Record Sets
7.3 Allowed Fields
==================
In some cases we know the set of fields that may appear in the records
of a given type, even if they are not mandatory. The '%allowed' special
field is used to specify this restriction. The usage is:
%allowed: FIELD1 FIELD2 ... FIELDN
The field names are separated by one or more blank chracters.
If there are more or one '%allowed' fields in a record descriptor, all
fields of all the records in the record set must be in the union of
'%allowed', '%mandatory' and '%key'. Otherwise an integrity error is
raised.
Several '%allowed' fields can appear in the same record descriptor. The
set of allowed fields is the union of all the entries.
File: recutils.info, Node: Keys and Unique Fields, Next: Size Constraints, Prev: Allowed Fields, Up: Constraints on Record Sets
7.4 Keys and Unique Fields
==========================
The '%unique' and '%key' special fields are used to avoid several
instances of the same field in a record, and to implement keys in record
sets. Their usage is:
%unique: FIELD1 FIELD2 ... FIELDN
%key: FIELD
The field names are separated by one or more blank characters.
Normally it is permitted for a record to contain two or more fields
of the same name. The '%unique' special field revokes this
permissiveness. A field declared "unique" cannot appear more than once
in a single record.
For example, an entry in an address book database could contain an
'Age' field. It does not make sense for a single person to be of
several ages. So, a field could be declared as "unique" in the
corresponding record descriptor as follows:
%rec: Contact
%mandatory: Name
%unique: Age
Several '%unique' fields can appear in the same record descriptor. The
set of unique fields is the union of all the entries.
'%key' makes the referenced field the primary key of the record set.
The primary key behaves as if both '%unique' and '%mandatory' had been
specified for that field. Additionally, there is further restriction,
viz: a given value of a primary key field may appear no more than once
within a record set.
Consider for example a database of items in stock. Each item is
identified by a numerical 'Id' field. No item may have more than one
'Id', and no items may exist without an associated 'Id'. Additionally,
no two items may share the same 'Id'. This common situation can be
implementing by declaring 'Id' as the key in the record descriptor:
%rec: Item
%key: Id
%mandatory: Title
Id: 1
Title: Box
Id: 2
Title: Sticker big
It would not make sense to have several primary keys in a record set.
Thus, it is not allowed to have several '%key' fields in the same record
descriptor. It is also forbidden for two items to share the same 'Id'
value. Both of these situations would be data integrity violations, and
will be reported by a checking tool.
Elsewhere, we discuss how primary keys can be used to link one record
set to another using primary keys together with foreign keys. *Note
Queries which Join Records::.
File: recutils.info, Node: Size Constraints, Next: Arbitrary Constraints, Prev: Keys and Unique Fields, Up: Constraints on Record Sets
7.5 Size Constraints
====================
Sometimes it is desirable to place constraints on entire records. This
can be done with the '%size' special field which is used to limit the
number of records in a record set. Its usage is:
%size: [RELATIONAL_OPERATOR] NUMBER
If no operator is specified then NUMBER is interpreted as the exact
number of records of this type. The number can be any integer literal,
including hexadecimal and octal constants. For example:
%rec: Day
%size: 7
%type: Name enum
+ Monday Tuesday Wednesday Thursday Friday
+ Saturday Sunday
%doc: There should be exactly 7 days.
The optional RELATIONAL_OPERATOR shall be one of '<', '<=', '>' and
'>='. For example:
%rec: Item
%key: Id
%size: <= 100
%doc: We have at most 100 different articles.
It is valid to specify a size of '0', meaning that no records of this
type shall exist in the file.
Only one '%size' field shall appear in a record descriptor.
File: recutils.info, Node: Arbitrary Constraints, Prev: Size Constraints, Up: Constraints on Record Sets
7.6 Arbitrary Constraints
=========================
Occasionally, '%mandatory', '%prohibit' and '%size' are just not
flexible enough. We might, for instance, want to ensure that _if_ a
field is present, then it must have a certain relationship to other
fields. Or we might want to stipulate that under certain conditions
only, a record contains a particular field.
To this end, recutils provides a way for arbitrary field constraints
to be defined. These permit restrictions on the presence and/or value
of fields, based upon the value or presence of other fields within that
record. This is done using the '%constraint' special field. Its usage
is:
%constraint: EXPR
where EXPR is a selection expression (*note Selection Expressions::).
When a constraint is present in a record set it means that all the
records of that type must satisfy the selection expression, i.e. the
evaluation of the expression with the record returns 1. Otherwise an
integrity error is raised.
Consider for example a record type 'Task' featuring two fields of
type date called 'Start' and 'End'. We can use a constraint in the
record set to specify that the task cannot start after it finishes:
%rec: Task
%type: Start,End date
%constraint: Start << End
The "implies" operator '=>' is especially useful when defining
constraints, since it can be used to specify conditional constraints,
i.e. constraints applying only in certain records. For example, we
could specify that if a task is closed then it must have an 'End' date
in the following way:
%rec: Task
%type: Start,End date
%constraint: Start << End
%constraint: Status = 'CLOSED' => #End
It is acceptable to declare several constraints in the same record
set.
File: recutils.info, Node: Checking Recfiles, Next: Remote Descriptors, Prev: Constraints on Record Sets, Up: Top
8 Checking Recfiles
*******************
Sometimes, when creating a recfile by hand, typographical errors or
other mistakes will occur. If a recfile contains such mistakes, then
one cannot rely upon the results of queries or other operations.
Fortunately there is a tool called 'recfix' which can find these errors.
It is a good idea to get into the habit of running 'recfix' on a file
after editing it, and before trying other commands.
* Menu:
* Syntactical Errors:: Fixing structure errors in recfiles.
* Semantic Errors:: Fixing semantic errors in recfiles.
File: recutils.info, Node: Syntactical Errors, Next: Semantic Errors, Up: Checking Recfiles
8.1 Syntactical Errors
======================
One easy mistake is to forget the colon separating the field name from
its value.
%rec: Article
%key Id
Name: Thing
Id: 0
Running 'recfix' on this file will immediately tell us that there is a
problem:
$ recfix --check inventory.rec
inventory.rec: 2: error: expected a record
Here, 'recfix' has diagnosed a problem in the file 'inventory.rec' and
the problem lies at line 2. If, as in this case, 'recfix' shows there
is a problem with the recfile, you should attend to that problem before
trying to use any other recutils program on that file, otherwise strange
things could happen. The '--check' flag is optional but in normal
execution not required because that is the default operation.
File: recutils.info, Node: Semantic Errors, Prev: Syntactical Errors, Up: Checking Recfiles
8.2 Semantic Errors
===================
However 'recfix' checks more than the syntactical integrity of the
recfile. It also checks certain semantics and that the data is
self-consistent. To do this, it uses the special fields of the record,
some of which were introduced above (*note Constraints on Record
Sets::). It is a good idea to use the special fields to stipulate the
"enterprise rules" of the data.
Errors will be reported if any of the following special keywords are
present and the data does not match the stipulated conditions
'%mandatory'
The mandated fields are missing from a record.
'%prohibit'
The prohibited fields are present in a record.
'%unique'
There is more than one field in a single record of the given name.
'%key'
Two or more records share the same value of the field which is the
key field.
'%typedef and %type'
A field has a value which does not conform to the specified type.
'%size'
The number of records does not conform to the specified
restriction.
'%constraint'
A field does not conform to the specified constraint.
'%confidential'
An unencrypted value exists for a confidential field.
File: recutils.info, Node: Remote Descriptors, Next: Grouping and Aggregates, Prev: Checking Recfiles, Up: Top
9 Remote Descriptors
********************
The '%rec' special field is used for two main purposes: to identify a
record as a record descriptor, and to provide a name for the described
record set. The synopsis of the usage of the field is the following:
%rec: TYPE [URL_OR_FILE]
TYPE is the name of the kind of records described by the descriptor. It
is mandatory to specify it, and it follows the same lexical conventions
used by field names. *Note Fields::. There is a non-enforced
convention to use singular nouns, because the name makes reference to
the type of a single entity, even if it applies to all the records
contained in the record set. For example, the following record set
contains transactions, and the type specified in the record descriptor
is 'Transaction'.
%rec: Transaction
Id: 10
Title: House rent
Id: 11
Title: Loan
Only one '%rec' field should be in a record descriptor. If there are
more it is an integrity violation. It is highly recommended (but not
enforced) to place this field in the first position of the record
descriptor.
Sometimes it is convenient to store records of the same type in
different files. The duplication of record descriptors in this case
would surely lead to consistency problems. A possible solution would be
to keep the record descriptor in a separated file and then include it in
any operation by using pipes. For example:
$ cat descriptor.rec data.rec | recsel ...
For those cases it is more convenient to use a "external descriptor".
External descriptors can be built appending a file path to the '%rec'
field value, like:
%rec: FSD_Entry /path/to/file.rec
The previous example indicates that a record descriptor describing
the 'FSD_Entry' records shall be read from the file '/path/to/file.rec'.
A record descriptor for 'FSD_Entry' may not exist in the external file.
Both relative and absolute paths can be specified there.
URLs can be used as sources for external descriptors as well. In
that case we talk about "remote descriptors". For example:
%rec: Department http://www.myorg.com/Org.rec
The URL shall point to a text file containing rec data. If there is a
record descriptor in the remote file documenting the 'Department' type,
it will be used.
Note that the local record descriptor can provide additional fields
to "expand" the record type. For example:
%rec: FSD_Entry http://www.jemarch.net/downloads/FSD.rec
%mandatory: Rating
The record descriptor above is including the contents of the 'FSD_Entry'
record descriptor from the URL, and adding them to the local record
descriptor, that in this case contains just the '%mandatory' field.
If you are using GNU recutils (*note Invoking the Utilities::) to
process your recfiles, any URL schema supported by 'libcurl' will work.
File: recutils.info, Node: Grouping and Aggregates, Next: Queries which Join Records, Prev: Remote Descriptors, Up: Top
10 Grouping and Aggregates
**************************
Grouping and aggregate functions are two related features which are
useful to extract statistics from a record set, or a subset of that
record set.
* Menu:
* Grouping Records:: Combining records by fields.
* Aggregate Functions:: Statistics and more.
File: recutils.info, Node: Grouping Records, Next: Aggregate Functions, Up: Grouping and Aggregates
10.1 Grouping Records
=====================
Consider a recfile containing a list of items in a shop inventory. For
each item it is stored its type, its category, its price, the date of
the last selling operation of an item of that type, and the amount of
items currently available in stock. A sample of such a database could
be:
Type: EC Car
Category: Toy
Price: 12.2
LastSell: 20-April-2012
Available: 623
Type: Terria
Category: Food
Price: 0.60
LastSell: 22-April-2012
Available: 8239
Type: Typex
Category: Office
Price: 1.20
LastSell: 22-April-2012
Available: 10878
Type: Notebook
Category: Office
Price: 1.00
LastSell: 21-April-2012
Available: 77455
Type: Sexy Puzzle
Category: Toy
Price: 6.20
LastSell: 6.20
Available: 12
Now imagine we are interested in grouping the contents of the 'Items'
record set in groups of items of the same category. We can do it using
the '-G' command line argument for 'recsel'. This argument accepts a
list of fields separated by commas. The argument can be read as "group
by".
In this case we want to group by 'Category', so we would do:
$ recsel -G Category
Type: Terria
Category: Food
Price: 0.60
LastSell: 22-April-2012
Available: 8239
Type: Typex
Category: Office
Price: 1.20
LastSell: 22-April-2012
Available: 10878
Type: Notebook
Price: 1.00
LastSell: 21-April-2012
Available: 77455
Type: EC Car
Category: Toy
Price: 12.2
LastSell: 20-April-2012
Available: 623
Type: Sexy Puzzle
Price: 6.20
LastSell: 6.20
Available: 12
We can see that the output is three records, corresponding to the three
different categories of items present in the database. However, we are
only interested in the types of products in each category, so we can
remove unwanted information using '-p':
$ recsel -G Category -p Category,Type items.rec
Category: Food
Type: Terria
Category: Office
Type: Typex
Type: Notebook
Category: Toy
Type: EC Car
Type: Sexy Puzzle
It is also possible to group by several fields. We could group by both
'Category' and 'LastSell':
$ recsel -G Category,LastSell -p Category,LastSell,Type items.rec
Category: Food
LastSell: 22-April-2012
Type: Terria
Category: Office
LastSell: 21-April-2012
Type: Notebook
Category: Office
LastSell: 22-April-2012
Type: Typex
Category: Toy
LastSell: 20-April-2012
Type: EC Car
Category: Toy
LastSell: 6.20
Type: Sexy Puzzle
File: recutils.info, Node: Aggregate Functions, Prev: Grouping Records, Up: Grouping and Aggregates
10.2 Aggregate Functions
========================
recutils supports "aggregate functions". These are so called because
they accept a record set and a field name as inputs and generate a
single result. Usually this result is numerical.
The supported aggregate functions are the following:
'Count(FIELD)'
Counts the number of occurrences of a field.
'Avg(FIELD)'
Calculates the average (mean) of the numerical values of a field.
'Sum(FIELD)'
Calculates the sum of the numerical values of a field.
'Min(FIELD)'
Calculates the minimum numerical value of a field.
'Max(FIELD)'
Calculates the maximum numerical value of a field.
The aggregate functions are to be invoked in the field expressions in
'recsel'. By default they are applied to the totality of the records in
a record set. For example, using the items database from the previous
section, we can do calculations as in the following examples.
The SQL aggregate functions can be applied to the totality of the
tuples in the relation. For example, using the 'Count' aggregate
function we can calculate the number of fields named 'Category' present
in the record set as follows:
$ recsel -p "Count(Category)" items.rec
Count_Category: 5
The result is a field whose name is derived from the function name and
the field passed as its parameter, separated by an underline. This name
scheme probably suffices for most purposes, but it is always possible to
use a rewrite rule to obtain something different:
$ recsel -p "Count(Category):NumCategories" items.rec
NumCategories: 5
You can use different letter case in writing the name of the aggregate,
and this will be reflected in the field name:
$ recsel -p "CoUnT(Category)" items.rec
CoUnT_Category: 5
It is possible to use more than one aggregate function in the field
expression. Suppose we are also interested in the average price of the
items we sell. We can use the 'Avg' aggregate:
$ recsel -p "Count(Category),Avg(Price)" items.rec
Count_Category: 5
Avg_Price: 4.240000
Now let's add a field along with an aggregate function to the field
expression and see what we get:
$ recsel -p "Type,Avg(Price)" items.rec
Type: EC Car
Avg_Price: 12.200000
Type: Terria
Avg_Price: 0.600000
Type: Typex
Avg_Price: 1.200000
Type: Notebook
Avg_Price: 1
Type: Sexy Puzzle
Avg_Price: 6.200000
We get five records! The reason is that when _only_ aggregate functions
are part of the field expression, they are applied to the single record
that would result from concatenating all the records in the record set
together. However, when a regular field appears in the field expression
the aggregate functions are applied to the individual records. This is
still useful in some cases, such as a database of maintainers:
Name: Jose E. Marchesi
Email: jemarch@gnu.org
Email: jemarch@es.gnu.org
Name: Luca Saiu
Email: positron@gnu.org
Lets see how many emails each maintainer has:
$ recsel -p "Name,Count(Email)" maintainers.rec
Name: Jose E. Marchesi
Count_Email: 2
Name: Luca Saiu
Count_Email: 1
Aggregate functions are most useful when we combine them with grouping.
This is when we are interested in some property of a subset of the
records in the database. For example, the average prices of each item
category stored in the database can be obtained by executing:
$ recsel -p "Category,Avg(Price)" -G Category items.rec
Category: Food
Avg_Price: 0.600000
Category: Office
Avg_Price: 1.100000
Category: Toy
Avg_Price: 9.200000
If we were interested in the actual prices that result in each average
we can do:
$ recsel -p "Category,Price,Avg(Price)" -G Category items.rec
Category: Food
Price: 0.60
Avg_Price: 0.600000
Category: Office
Price: 1.20
Price: 1.00
Avg_Price: 1.100000
Category: Toy
Price: 12.2
Price: 6.20
Avg_Price: 9.200000
File: recutils.info, Node: Queries which Join Records, Next: Auto-Generated Fields, Prev: Grouping and Aggregates, Up: Top
11 Queries which Join Records
*****************************
Suppose you wanted to add the residential address of the people in the
'acquaintances.rec' file from *note Simple Selections::.
One way to do this is as follows:
%type: Dob date
Name: Alfred Nebel
Dob: 20 April 2010
Email: alf@example.com
Address: 42 Abbeter Way, Inprooving, WORCS
Telephone: 01234 5676789
Name: Mandy Nebel
Dob: 21 February 1972
Email: mandy@example.com
Address: 42 Abbeter Way, Inprooving, WORCS
Telephone: 01234 5676789
Name: Bertram Nebel
Dob: 3 January 1966
Email: bert@example.com
Address: 42 Abbeter Way, Inprooving, WORCS
Telephone: 01234 5676789
Name: Charles Spencer
Dob: 4 July 1997
Email: charlie@example.com
Address: 2 Serpe Rise, Little Worning, SURREY
Telephone: 09876 5432109
Name: Dirk Spencer
Dob: 29 June 1945
Email: dirk@example.com
Address: 2 Serpe Rise, Little Worning, SURREY
Telephone: 09876 5432109
Name: Ernest Wright
Dob: 26 April 1978
Email: ernie@example.com
Address: 1 Wanter Rise, Greater Inncombe, BUCKS
This will work fine. However you will notice that there are two
addresses where more than one person live (presumably they are members
of the same family). This has a number of disadvantages:
- You have to type (or copy) the same information several times.
- Should a family move house, then you would have to update the
addresses (and telephone number) of all the family members.
- A typing error in one of the addresses would lead an automatic
query to erroneously suggest that the people lived at different
addresses.
- It unnecessarily increases the size of the recfile.
* Menu:
* Foreign Keys:: Referring to records from another records.
* Joining Records:: Performing cross-joins.
File: recutils.info, Node: Foreign Keys, Next: Joining Records, Up: Queries which Join Records
11.1 Foreign Keys
=================
A better way would be to separate the addresses and people into
different record sets. The first record set might look like this:
%rec: Person
%type: Dob date
%type: Abode rec Residence
Name: Alfred Nebel
Dob: 20 April 2010
Email: alf@example.com
Abode: 42AbbeterWay
Name: Mandy Nebel
Dob: 21 February 1972
Email: mandy@example.com
Mobile: 0555 342123
Abode: 42AbbeterWay
Name: Bertram Nebel
Dob: 3 January 1966
Email: bert@example.com
Abode: 42AbbeterWay
Name: Charles Spencer
Dob: 4 July 1997
Email: charlie@example.com
Abode: 2SerpeRise
Name: Dirk Spencer
Dob: 29 June 1945
Email: dirk@example.com
Mobile: 0555 342123
Abode: 2SerpeRise
Name: Ernest Wright
Dob: 26 April 1978
Abode: ChezGrampa
and the second (following in the same file), like this:
%rec: Residence
%key: Id
Address: 42 Abbeter Way, Inprooving, WORCS
Telephone: 01234 5676789
Id: 42AbbeterWay
Address: 2 Serpe Rise, Little Worning, SURREY
Telephone: 09876 5432109
Id: 2SerpeRise
Address: 1 Wanter Rise, Greater Inncombe, BUCKS
Id: ChezGrampa
Here you can see that there are two record sets viz: 'Person' and
'Residence'. There are six people, but only three residences, because
some residences accommodate more than one person. Note also that the
'Residence' descriptor has the entry '%key: Id' whilst the 'Person'
descriptor has '%type: Abode rec Residence'. This is because 'Abode' is
the foreign key which identifies the residence where a person lives.
We could have declared the 'Id' field as '%auto'. This would have
had the advantage that we need not manually update it. However, we
decided that the 'Abode' field values in the 'Person' records are better
as alphanumeric fields, so that they can contain human readable values.
In this way, it is self-evident by reading a 'Person' record where that
person lives. Yet since the 'Id' field is declared using the '%key'
special field name, you can be sure that you don't accidentally reuse an
existing key.
File: recutils.info, Node: Joining Records, Prev: Foreign Keys, Up: Queries which Join Records
11.2 Joining Records
====================
The above example has also added a new field to the 'Person' record set
to contain that person's mobile phone number. Note that the 'Telephone'
field belongs to the 'Residence' record set because that contains the
telephone number of the home, whereas 'Mobile' belongs to 'Person' since
mobile telephones are normally used exclusively by one individual.
If we want to look up the name and address of a person in our
recfile, we can use 'recsel' as before. Because we now have more than
one record set in the 'acquaintances.rec' file, we have to tell 'recsel'
in which record set we want to look up records. We do this with the
'-t' flag as follows:
$ recsel -t Person -P Name,Abode acquaintances.rec
Alfred Nebel
42AbbeterWay
Mandy Nebel
42AbbeterWay
Bertram Nebel
42AbbeterWay
Charles Spencer
2SerpeRise
Dirk Spencer
2SerpeRise
Ernest Wright
ChezGrampa
This result tells us the names of all the people in the recfile, as
well as giving a concise and hopefully effective reminder telling us
where they live. However these results would not be useful to someone
unacquainted with the individuals. They need a list of names and full
addresses. We can use 'recsel' to produce such a list:
$ recsel -t Person -j Abode acquaintances.rec
Name: Charles Spencer
Dob: 4 July 1997
Email: charlie@example.com
Abode_Address: 2 Serpe Rise, Little Worning, SURREY
Abode_Telephone: 09876 5432109
Abode_Id: 2SerpeRise
Name: Dirk Spencer
Dob: 29 June 1945
Email: dirk@example.com
Mobile: 0555 342123
Abode_Address: 2 Serpe Rise, Little Worning, SURREY
Abode_Telephone: 09876 5432109
Abode_Id: 2SerpeRise
Name: Ernest Wright
Dob: 26 April 1978
Abode_Address: 1 Wanter Rise, Greater Inncombe, BUCKS
Abode_Id: ChezGrampa
The '-t' flag we have seen before. It tells 'recsel' that we want to
extract records of type 'Person'. The '-j' flag is new. It says that
we want to perform a "join". Specifically we want to join the 'Person'
records according to their 'Abode' field.
In the above example, 'recsel' displays several field names which do
not appear anywhere in the input e.g. 'Abode_Address'. This is the
'Address' field in the record joined by the foreign key 'Abode'. In
this example probably only the name and address are of interest. The
other information such as date of birth is incidental. The foreign key
'Abode_Id' is certainly not wanted in the output since it is redundant.
As usual, you can use the '-P' or '-p' options to limit the fields which
will be displayed. However the full joined field name, if appropriate,
must be specified. So the names and addresses without the other
information can be retrieved thus:
$ recsel -t Person -j Abode -p Name,Abode_Address acquaintances.rec
Name: Charles Spencer
Abode_Address: 2 Serpe Rise, Little Worning, SURREY
Name: Dirk Spencer
Abode_Address: 2 Serpe Rise, Little Worning, SURREY
Name: Ernest Wright
Abode_Address: 1 Wanter Rise, Greater Inncombe, BUCKS
File: recutils.info, Node: Auto-Generated Fields, Next: Encryption, Prev: Queries which Join Records, Up: Top
12 Auto-Generated Fields
************************
Consider for example a list of articles in stock in a toy store:
%rec: Item
%key: Description
Description: 2cm metal soldier WWII
Amount: 2111
Description: Flying Helicopter Indoor Maxi
Amount: 8
...
It would be natural to identify the items by their descriptions, but
it is also error prone: was it "Flying Helicopter Indoor Maxi" or
"Flying Helicopter Maxi Indoor"? Was "Helicopter" in lower case or
upper case?
Thus it is quite common in databases to use some kind of numeric "Id"
to uniquely identify items like those ones, because numbers are easy to
increment and manipulate. So we could add a new numeric 'Id' field and
use it as the primary key:
%rec: Item
%key: Id
%mandatory: Description
Id: 0
Description: 2cm metal soldier WWII
Amount: 2111
Id: 1
Description: Flying Helicopter Indoor Maxi
Amount: 8
...
A problem with this approach is that we must be careful to not assign
already used ids when we introduce more articles in the database. Other
than its uniqueness, it is not important which number is associated with
which article.
To ease the management of those Ids database systems use to provide a
facility called "auto-counters". Auto-counters can be implemented in
recfiles using the '%auto' directive in the record descriptor. Its
usage is:
%auto: FIELD1 FIELD2 ... FIELDN
The list of field names are separated by one or more blank characters.
There can be several '%auto' fields in the same record descriptor, the
effective list of auto-generated fields being the union of all the
entries.
When 'recins' inserts a new record in the recfile, it looks for any
declared auto field. If any of these fields are not provided explicitly
in the command line then 'recins' generates them along with the
user-provided fields. Such auto fields are generated at the beginning
of the new records, in the same order they are found in the '%auto'
directives.
For example, consider a 'items.rec' database with an empty record
set:
%rec: Item
%key: Id
%auto: Id
%mandatory: Description
If we insert a new record and we do not specify an 'Id' then it will be
generated automatically by 'recins':
$ recins -t Item -f Description -v 'recutils t-shirts' \
-f Amount -v 200 \
items.rec
$ cat items.rec
%rec: Item
%key: Id
%auto: Id
%mandatory: Description
Id: 0
Description: recutils t-shirts
Amount: 200
The concrete effect of the '%auto' directive depends on the type of the
affected field. The following sections document how.
* Menu:
* Counters:: Generating incremental Ids.
* Unique Identifiers:: Generating universally unique Ids.
* Time-Stamps:: Tracking the creation of records.
File: recutils.info, Node: Counters, Next: Unique Identifiers, Up: Auto-Generated Fields
12.1 Counters
=============
If an auto field is of type 'integer' or 'range' then any newly
generated field will use the "next biggest" unused number in the record
set.
Consider the toy inventory database introduced above. We could
declare the 'Id' field to be generated automatically:
%rec: Item
%key: Id
%type: Id int
%mandatory: Description
%auto: Id
Id: 0
Description: 2cm metal soldier WWII
Amount: 2111
When the next new item is introduced in the database, 'recins' will note
the '%auto', and create a new 'Id' field for the new record with the
next-biggest unused integer, since 'Id' is declared to be of type 'int'.
In this example, the new record would have an Id of '1'. The database
can still provide an explicit Id for the new record. In that case the
field is not generated automatically.
Note that if no explicit type is defined for an auto generated field
then it is assumed to be an integer.
File: recutils.info, Node: Unique Identifiers, Next: Time-Stamps, Prev: Counters, Up: Auto-Generated Fields
12.2 Unique Identifiers
=======================
Universally Unique Identifiers, often abbreviated as UUIDs, can also be
auto-generated using recutils. Suppose you maintain a database with
events featuring the following record descriptor:
%rec: Event
%key: Id
%mandatory: Title Date
What would be appropriate to identify each event? We could use an
integer and declare it as auto-generated. After adding two events the
database would look like this:
%rec: Event
%key: Id
%mandatory: Title Date
Id: 0
Title: Team meeting
Date: 12-08-2013
Id: 1
Title: Dave's birthday
Date: 20-12-2013
However, suppose that we want to share our events with other people,
i.e. to send them event records and to incorporate their records into
our own database. In this case the 'Id's would collide. A good
solution is to use 'uuids' and declare them as 'auto':
%rec: Event
%key: Id
%type: Id uuid
%mandatory: Title Date
Id: f81d4fae-7dec-11d0-a765-00a0c91e6bf6
Title: Team meeting
Date: 12-08-2013
Id: f81d4fae-dc18-11d0-a765-a01328400a0c
Title: Dave's birthday
Date: 20-12-2013
File: recutils.info, Node: Time-Stamps, Prev: Unique Identifiers, Up: Auto-Generated Fields
12.3 Time-Stamps
================
Auto generated dates can be used to implement automatic timestamps.
Consider for example a "Transfer" record set registering bank transfers.
We want to save a timestamp every time a transfer is done, so we include
an '%auto' for the date:
%rec: Transfer
%key: Id
%type: Id int
%type: Date date
%auto: Id Date
File: recutils.info, Node: Encryption, Next: Generating Reports, Prev: Auto-Generated Fields, Up: Top
13 Encryption
*************
For ethical or security reasons it is sometimes necessary that
information in a recfile should not be readable by unauthorized people.
One way to prevent a recfile from being read is to use the security
features of the operating system. A more secure way would be to encrypt
the entire recfile using a free strong encryption program such as GnuPG
(http://gnu.org/software/gnupg). The disadvantage of both these methods
is that the entire recfile has to be secured when it may well be the
case that only certain data need to be protected.
Recutils offers a way to encrypt specified fields in a record, whilst
leaving the rest in clear text.
* Menu:
* Confidential Fields:: Declaring fields as sensitive data.
* Encrypting Files:: Encrypt confidential fields.
* Decrypting Data:: Reading encrypted fields.
File: recutils.info, Node: Confidential Fields, Next: Encrypting Files, Up: Encryption
13.1 Confidential Fields
========================
To specify that a field should be encrypted, use the '%confidential'
special field. This special field declares a set of fields as
"confidential", meaning they contain secret data such as passwords or
personal information. Its usage is:
%confidential: FIELD1 FIELD2 ... FIELDN
The field names are separated by one or more blank characters. There
can be several '%confidential' fields in the same record descriptor, the
effective list of confidential fields being the union of all the
entries.
Declaring a field as confidential indicates that its contents must
not be stored in plain text, but encrypted with a password-based
mechanism. When the information is retrieved from the database the
confidential fields are unencrypted if the correct password is provided.
Likewise, when information is inserted in the database the confidential
fields are encrypted with some given password.
For example, consider a database of users of some service. For each
user we want to store a name, a login name, an email address and a
password. All this information is public with the obvious exception of
the password. Thus we declare the 'Password' field as confidential in
the corresponding record descriptor:
%rec: Account
%type: Name line
%type: Login line
%type: Email email
%confidential: Password
The rec format does not impose the usage of a specific encryption
algorithm, but requires that:
- The algorithm must be password-based.
- The value of any encrypted field shall begin with the string
'encrypted-' followed by the encrypted data.
- The encrypted data must be encoded in some ASCII encoding such as
base64.
The above rules assure that it is possible to determine whether a
given field is encrypted. For example, the following is an excerpt from
the account database described above. It contains an entry with the
password encrypted and another with the password unencrypted:
Name: Mr. Foo
Login: foo
Email: foo@foo.com
Password: encrypted-AAABBBCCDDDEEEFFF
Name: Mr. Bar
Login: bar
Email: bar@bar.com
Password: secret
Unencrypted confidential fields are a data integrity error, and
utilities like 'recfix' will report it. The same utility can be used to
"fix" the database by massively encrypting any unencrypted field.
Nothing prevents the usage of several passwords in the same database.
This allows the establishment of several level of securities or security
profiles. For example, we may want to store different passwords for
different online services:
%rec: Account
%confidential: WebPassword ShellPassword
We could then encrypt WebPassword entries using a password shared among
all the webmasters, and the ShellPassword entries with a more restricted
password available only to the administrator of the machine.
Note that since the utilities only accept to specify one password at
a time different passwords cannot be specified at decryption time. This
means that in the example above the administrator would need to run
'recsel' twice in order to decrypt all the encrypted data in the
recfile.
The GNU recutils fully support encrypted fields. See the
documentation for 'recsel', 'recins' and 'recfix' for details on how to
operate on files containing confidential fields.
File: recutils.info, Node: Encrypting Files, Next: Decrypting Data, Prev: Confidential Fields, Up: Encryption
13.2 Encrypting Files
=====================
'recins' allows the insertion of encrypted fields in a database. When
the '-s' ('--password') command line option is specified in the command
line any field declared as confidential in the record descriptor will
get encrypted using the given passphrase. If the command is executed
interactively and '-s' is not used then the user is asked to provide a
password using the terminal. For example, the invocation:
$ recins -t Account -s mypassword -f Login -v foo -f Password \
-v secret accounts.rec
will encrypt the value of the 'Password' field with 'mypassword' as long
as the field is declared as confidential. (*note Confidential Fields::
for details on confidential fields).
'recins' will issue a warning if a confidential field is inserted in
the database but no password was provided to encrypt it. This is to
avoid having unencrypted sensitive data in the recfiles.
File: recutils.info, Node: Decrypting Data, Prev: Encrypting Files, Up: Encryption
13.3 Decrypting Data
====================
The contents of confidential fields can be read using the '-s'
('--password') command line option to 'recsel'. When used, any selected
record containing encrypted fields will try to decrypt them with the
given password. If the operation succeeds then the output will include
the unencrypted data. Otherwise the ASCII-encoded encrypted data will
be emitted.
If 'recsel' is invoked interactively and no password is specified
with '-s', the user will be asked for a password in case one is needed.
No echo of the password will appear in the screen. The provided
password will be used to decrypt all confidential fields as if it was
specified with '-s'.
For example, consider the following database storing information
about the user accounts of some online service. Each entry stores a
login, a full name, email and a password. The password is declared as
confidential:
%rec: Account
%key: Login
%confidential: Password
Login: foo
Name: Mr. Foo
Email: foo@foo.com
Password: encrypted-AAABBBCCCDDD
Login: bar
Name: Ms. Bar
Email: bar@bar.org
Password: encrypted-XXXYYYZZZUUU
If we use 'recsel' to get a list of records of type 'Account' without
specifying a password, or if the wrong password was specified in
interactive mode, then we would get the following output with the
encrypted values:
$ cat accounts.rec | recsel -t Account -p Login,Password
Login: foo
Password: encrypted-AAABBBCCCDDD
Login: bar
Password: encrypted-XXXYYYZZZUUU
If we specify a password and both entries were encrypted using that
password, we would get the unencrypted values:
$ recsel -t Account -s secret -p Login,Password accounts.rec
Login: foo
Password: foosecret
Login: bar
Password: barsecret
As mentioned above, a confidential field may be encrypted with
different passwords in different records (*note Confidential Fields::).
For example, we may have an entry in our database with data about the
account of the administrator of the online service. In that case we
might want to store the password associated with that account using a
different password than that for users. In that case the output of the
last command would have been:
$ recsel -t Account -s secret -p Login,Password accounts.rec
Login: foo
Password: foosecret
Login: bar
Password: barsecret
Login: admin
Password: encrypted-TTTVVVBBBNNN
We would need to invoke 'recsel' with the password used to encrypt the
admin entry in order to read it back unencrypted.
File: recutils.info, Node: Generating Reports, Next: Interoperability, Prev: Encryption, Up: Top
14 Generating Reports
*********************
Having a list of names and addresses, one might want to use this list to
address envelopes (say, to send annual greeting cards). Since addresses
are normally written on several lines, it would be appropriate then to
split the 'Address' field values across multiple lines as described in
*note Fields::. Suitable text can now be obtained thus:
$ recsel -t Person -j Abode -P Name,Abode_Address acquaintances.rec
Charles Spencer
2 Serpe Rise,
Little Worning,
SURREY
Dirk Spencer
2 Serpe Rise,
Little Worning,
SURREY
Ernest Wright
1 Wanter Rise,
Greater Inncombe,
BUCKS
A business enterprise might want to go one step further and generate
letters (such as an advertisement or a recall notice) to customers.
Since 'recsel' merely selects records and fields from record sets, on
its own it cannot do this; so there is another command designed for this
purpose, called 'recfmt'. This command uses a "template" which defines
the general form of the desired output. A letter template might look as
follows:
{{Name}}
{{Abode_Address}}
Dear {{Name}},
Re: Special offer for January
We are delighted to be able to offer you a 95% discount on all car and
truck hire contracts between 1 January and 2 February. Please call us
to take advantage of this offer.
Yours sincerely,
Karen van Rental (CEO)
^L
It is best to place such a template into a file, so that you can edit
it as you wish. Notice the instances of double braces enclosing a field
name, e.g. '{{Name}}'. These are called "spots" and indicate places
where the respective field's value should be placed. Let's assume this
template is in a file called 'offer.templ'. We can then pipe the output
from 'recsel' into 'recfmt' in order as follows:
$ recsel -t Person -j Abode acquaintances.rec | recfmt -f offer.templ
Charles Spencer
2 Serpe Rise,
Little Worning,
SURREY
Dear Charles Spencer,
Re: Special offer for January
We are delighted to be able to offer you a 95% discount on all car and
.
.
.
For each record that 'recsel' selects, one copy of 'offer.templ' will be
generated. Each spot will be replaced with the field value
corresponding to the field name in the spot.
* Menu:
* Templates:: Formatted output.
File: recutils.info, Node: Templates, Up: Generating Reports
14.1 Templates
==============
A recfmt template is a text string that may contain "template spots".
Those spots are substituted in the template using the information of a
given record. Any text that is not within a spot is copied literally to
the output.
Spots are written surrounded by double curly braces, like:
{{...}}
Spots contain selection expressions, that are executed every time the
template is applied to a record. The spot is then replaced by the
string representation of the value returned by the expression.
For example, consider the following template:
Task {{Id}}: {{Summary}}
------------------------
{{Description}}
--
Created at {{CreatedAt}}
When applied to the following record:
Id: 123
Summary: Fix recfmt.
CreatedAt: 12 December 2010
Description:
+ The recfmt tool shall be fixed, because right
+ now it is leaking 200 megabytes per processed record.
The result is:
Task 123: Fix recfmt.
------------------------
The recfmt tool shall be fixed, because right
now it is leaking 200 megabytes per processed record.
--
Created at 12 December 2010
You can use any selection expression in the slots, including
conditionals and string concatenation.
File: recutils.info, Node: Interoperability, Next: Bash Builtins, Prev: Generating Reports, Up: Top
15 Interoperability
*******************
Included in the recutils package are a number of utilities to assist in
the creation of recfiles using data which already exists in other
formats, and for exporting data from recfiles so that it can be used in
other applications.
* Menu:
* CSV Files:: Converting recfiles to/from csv files.
* Importing MDB Files:: Importing MS Access Databases.
File: recutils.info, Node: CSV Files, Next: Importing MDB Files, Up: Interoperability
15.1 CSV Files
==============
Many applications are able to read and write files containing so-called
"comma separated values". Such files generally contain tabular data
where the columns are separated by commas and the rows by line feed
and/or carriage return characters. Although record sets are not tables,
tables can be easily emulated using records having the same fields in
the same order. For example:
a: value
b: value
c: value
a: value
b: value
c: value
...
In several respects records are more flexible than tables:
- Fields can appear in a different order in several records.
- There can be several fields with the same name in a single record.
- Records can differ in the number of fields.
It is evident that records, such as those in recfiles, are a more
general structure than comma separated values. This means that when
converting from csv files to recfiles, certain decisions need to be
made. The 'rec2csv' utility (*note Invoking rec2csv::) implements an
algorithm to deal with this problem and generate a table that the user
expects.
The algorithm works as follows:
1. The utility first scans the specified record set, building a list
with the names that will become the table header.
2. For each field, a header is added with the form:
FIELDNAME[_N]
where N is a number in the range '2..inf' and is the "index" of the
field in its containing record plus one. For example, consider the
following record set:
a: a1
b: b11
b: b12
c: c1
a: a2
b: b2
d: d2
The corresponding list of headers being:
a b b_2 c a b d
3. Then duplicates are removed:
a b b_2 c d
4. The resulting list of headers is then used to build the table in
the generated csv file.
In the above example the result would be
"a","b","b_2","c","d"
"a1","b11","b12","c1",
"a2","b2",,,"d2"
As shown, missing fields are implemented as empty columns in the
generated csv.
File: recutils.info, Node: Importing MDB Files, Prev: CSV Files, Up: Interoperability
15.2 Importing MDB Files
========================
Access files ("mdb files") are collections of several relations, also
known as tables. Tables can be either "user tables" storing user data,
or "system tables" storing information such as forms, queries or the
relationships between the tables.
It is possible to get a listing with the names of all tables stored
in a mdb file by calling 'mdb2rec' in the following way:
$ mdb2rec -l sales.mdb
Customers
Products
Orders
So 'sales.mdb' stores user information in the tables Customers,
Products and Orders. If we want to include system tables in the listing
we can use the '-s' command line option:
$ mdb2rec -s -l sales.mdb
MSysObjects
MSysACEs
MSysQueries
MSysRelationships
Customers
Products
Orders
The tables with names starting with 'MSys' are system tables. The
data stored in those tables is either not relevant to the recutils user
(used by the Access program to create forms and the like) or is used in
an indirect way by 'mdb2rec' (such as the information from
MSysRelationships).
Let's read some data from the 'mdb' file. We can get the relation of
Products in rec format:
$ mdb2rec sales.mdb Products
%rec: Products
%type: ProductID int
%type: ProductName size 80
%type: Discontinued bool
ProductID: 1
ProductName: GNU generation T-shirt
Discontinued: 0
...
A "record descriptor" is created for the record set containing the
generated records, called Products. As seen in the example, 'mdb2rec'
is able to generate type information for the fields. The list of
customers is similar:
$ mdb2rec sales.mdb Customers
%rec: Customers
%type: CustomerID size 4
%type: CompanyName size 80
%type: ContactName size 60
CustomerID: GSOFT
CompanyName: GNU Soft
ContactName: Jose E. Marchesi
...
If no table is specified in the invocation to 'mdb2rec' all the
tables in the file are processed, with the exception of the system
tables, which requires '-s' to be used:
$ mdb2rec sales.mdb
%rec: Products
...
%rec: Customers
...
%rec: Orders
...
File: recutils.info, Node: Bash Builtins, Next: Invoking the Utilities, Prev: Interoperability, Up: Top
16 Bash Builtins
****************
The command-line utilities described in *note Invoking the Utilities::
are designed to be used interactively in the shell. Together, and often
combined with the standard shell utilities, they provide a quite
complete user interface. However, the user's experience can be greatly
improved by a closer integration between the recutils and the shell.
The following sections describe several extensions for 'bash', the GNU
shell (*note (bash)Top::). These extensions make the shell "aware" of
the recutils.
As with any bash built-in, help is available in the command line
using the 'help' command. For example:
$ help readrec
If you installed recutils using a binary package in a GNU/Linux
distribution, odds are that the built-in commands described in this
chapter are already available to you. Otherwise (you get a "command not
found" or similar error) you may have to register the built-in commands
with your bash. This is very easy using the 'enable' bash command. The
registering command for readrec would be:
$ enable -f readrec.so readrec
Note however that some systems require the full path to 'readrec.so'
in order for this command to work.
* Menu:
* readrec:: Exporting the contents of records to the shell.
File: recutils.info, Node: readrec, Up: Bash Builtins
16.1 readrec
============
The bash built-in 'read', when invoked with no options, consumes one
line from standard input and makes it available in the predefined
'REPLY' environment variable, or any other variable whose name is passed
as an argument. This allows processing data structured in lines in a
quite natural way. For example, the following program prints the third
field of each line, with fields separated by commas, until standard
input is exhausted:
# Process one line at a time.
while read
do
echo "The third field is " `echo $REPLY | cut -d, -f 2`
done
However, 'read' is not very useful when it comes to processing
recutils records in the shell. Even though it is possible to customize
the character used by 'read' to split the input into records, we would
need to ignore the empty records in the likely case of more than one
empty line separating records. Also, we would need to use 'recsel' to
access to the record fields. Too complicated!
Thus, the 'readrec' bash built-in is similar to 'read' with the
difference that it reads records instead of lines. It also "exports"
the contents of the record to the user as the values of several
environment variables:
- 'REPLY_REC' is set to the record read from standard input.
- A set of variables 'FIELD' named after each field found in the
record are set to the (decoded) value of the fields found in the
input record. When several fields with the same name are found in
the input record then a bash array is created.
Consider for example the following simple database containing
contacts information:
Name: Mr. Foo
Email: foo@bar.com
Email: bar@baz.net
Checked: no
Name: Mr. Bar
Email: bar@foo.com
Telephone: 999666000
Checked: yes
We would like to write some shell code to send an email to all the
contacts, but only if the contact has not been checked before, i.e. the
'Checked' field contains 'no'. The following code snippet would do the
job nicely using 'readrec':
recsel contacts.rec | while readrec
do
if [ $Checked = "no" ]
then
mail -s "You are being checked." ${Email[0]} < email.txt
recset -e "Email = '$Email'" -f Checked -S yes contacts.rec
sleep 1
fi
done
Note the usage of the bash array when accessing the primary email
address of each contact. Note also that we update each contact to
figure as "checked", using 'recset', so she won't get pestered again the
next time the script is run.
File: recutils.info, Node: Invoking the Utilities, Next: Regular Expressions, Prev: Bash Builtins, Up: Top
17 Invoking the Utilities
*************************
Certain options are available in all of these programs. Rather than
writing identical descriptions for each of the programs, they are listed
here.
'--version'
Print the version number, then exit successfully.
'--help'
Print a help message, then exit successfully.
'--'
Delimit the option list. Later arguments, if any, are treated as
operands even if they begin with '-'. For example, 'recsel -- -p'
reads from the file named '-p'.
* Menu:
* Invoking recinf:: Printing information about rec files.
* Invoking recsel:: Selecting records.
* Invoking recins:: Inserting records.
* Invoking recdel:: Deleting records.
* Invoking recset:: Managing fields.
* Invoking recfix:: Fixing broken rec files, and diagnostics.
* Invoking recfmt:: Formatting records using templates.
* Invoking csv2rec:: Converting csv data into rec data.
* Invoking rec2csv:: Converting rec data into csv data.
* Invoking mdb2rec:: Converting mdb files into rec files.
File: recutils.info, Node: Invoking recinf, Next: Invoking recsel, Up: Invoking the Utilities
17.1 Invoking recinf
====================
'recinf' reads the given rec files (or the data from standard input if
no file is specified) and prints a summary of the record types contained
in the input.
Synopsis:
recinf [OPTION]... [FILE]...
The default behavior is to emit a line per record type in the input
containing its name and the number of records of that type:
$ recinf hackers.rec tasks.rec
25 Hacker
102 Task
If the input contains anonymous records, i.e. records that are before
the first record descriptor, the corresponding output line won't have a
type name:
$ recinf data.rec
10
In addition to the common options described earlier the program
accepts the following options.
'-t TYPE'
'--type=TYPE'
Select records of a given type only.
'-d'
'--descriptor'
Print all the record descriptors present in the file.
'-n'
'--names-only'
Output just the names of the record types found in the input. If
the input contains only anonymous records then output nothing.
'-S'
'--print-sexps'
Print the data in the form of sexps (Lisp expressions) instead of
rec format. This option can be useful for, of course, Lisp
programs.
File: recutils.info, Node: Invoking recsel, Next: Invoking recins, Prev: Invoking recinf, Up: Invoking the Utilities
17.2 Invoking recsel
====================
'recsel' reads the given rec files (or the data in the standard input if
no file is specified) and prints out records (or part of records) based
upon some criteria specified by the user.
'recsel' searches rec files for records satisfying certain criteria.
Synopsis:
recsel [OPTION]... \
[-n INDEXES | -e RECORD_EXPR | -q STR | -m NUM] \
[-c | (-p|-P|-R) FIELD_EXPR] \
[FILE]...
If no FILE is specified then the command acts like a filter, getting
the data from standard input and writing the result to standard output.
In addition to the common options described earlier (*note Common
Options::) the program accepts the following options.
The following "global options" are available.
'-i'
'--case-insensitive'
Make string matching case-insensitive in selection expressions.
'-C'
'--collapse'
Do not section the result in records with newlines.
'-d'
'--include-descriptors'
Print record descriptors along with the matched records.
'-s SECRET'
'--password=SECRET'
Try to decrypt confidential fields with the given password.
'-S'
'--sort=FIELDS'
Sort the output by the comma-separated list of field names, FIELDS.
This option takes precedence over any sorting criteria specified in
the corresponding record descriptor with '%sort'.
'-U'
'--uniq'
Remove duplicated fields in the output records. Fields are
duplicated if they have the same field name and the same value.
'-G'
'--group-by=FIELDS'
Group the output records by the provided comma-separated list of
FIELDS. Grouping is performed before sorting.
The "selection options" are used to select a subset of the records in
the input.
'-n INDEXES'
'--number=INDEXES'
Match the records occupying the given positions in its record set.
INDEXES must be a comma-separated list of numbers or ranges, with
ranges being two numbers separated with dashes. For example, the
following list denotes the first, the third, the fourth and all
records up to the tenth: '-n 0,2,4-9'.
'-e EXPR'
'--expression=EXPR'
A record selection expression (*note Selection Expressions::).
Only the records matched by the expression will be taken into
account to compute the output.
'-q STR'
'--quick=STR'
Select records having a field whose value contains the substring
STR.
'-m NUM'
'--random=NUM'
Select NUM random records. If NUM is zero then select all the
records.
'-t TYPE'
'--type=TYPE'
Select records of a given type only.
'-j FIELD'
'--field=FIELD'
Perform an inner join of the record set selected by '-t' and the
record set for which FIELD is a foreign key. FIELD must be a field
declared with type 'rec' and thus must be a foreign key. If a join
is performed then any selection expression and field expression
operate on the joined record sets.
The "output options" are used to determine what information about the
selected records to display to the user, and how to display it.
'-p NAME_LIST'
'--print=NAME_LIST'
List of fields to print for each record. NAME_LIST is a list of
field names separated by commas. For example:
-p Name,Email
means to print the Name and the Email of every matching record,
both the field names and values.
If this option is not specified then all the fields of the matching
records are printed to standard output.
'-P NAME_LIST'
'--print-values=NAME_LIST'
Same as '-p', but print only the values of the selected fields.
'-R NAME_LIST'
'--print-row=NAME_LIST'
Same as '-P', but print the values separated by single spaces
instead of newlines.
'-c'
'--count'
If this option is specified then 'recsel' will print the number of
matching records instead of the records themselves. This option is
incompatible with '-p', '-P' and '-R'.
This "special option" is available to ease the communication between
the recutils and other programs, namely Lisp interpreters. This option
is not intended to be used by human operators.
'--print-sexps'
Print the data using sexps instead of rec format.
File: recutils.info, Node: Invoking recins, Next: Invoking recdel, Prev: Invoking recsel, Up: Invoking the Utilities
17.3 Invoking recins
====================
'recins' adds new records to a rec file or to rec data read from
standard input. Synopsis:
recins [OPTION]... [-t TYPE] \
[-n INDEXES | -e RECORD_EXPR | -q STR | -m NUM] \
[( -f STR -v STR]|[-r RECDATA )]... \
[FILE]
The new record to be inserted by the command is constructed by using
pairs of '-f' and '-v' options, or '-r'. Each pair defines a field.
The order of the parameters is significant.
If no FILE is specified then the command acts like a filter, getting
the data from standard input and writing the result to standard output.
If the specified FILE does not exist, it is created.
In addition to the common options described earlier (*note Common
Options::) the program accepts the following options.
'-t'
'--type=EXPR'
The type of the new record. If there is a record set in the input
data matching this type then the new record is added there.
Otherwise a new record set is created. If this parameter is not
specified then the new record is anonymous.
'-f'
'--field=NAME'
Declares the name of a field. This option must be followed by a
'-v'.
'-v'
'--value=VALUE'
The value of the field being defined.
'-r'
'--record=VALUE'
Add the fields of the record in VALUE. This option can be
intermixed with '-f ... -v' pairs.
'-s'
'--password'
Encrypt confidential fields with the given password.
'--no-external'
Don't use external record descriptors.
'--verbose'
Be verbose when reporting integrity problems.
'--no-auto'
Don't generate "auto" fields. *Note Auto-Generated Fields::.
Record selection arguments are supported too. If they are used then
'recins' uses "replacement mode": instead of appending the new record,
matched records are replaced by copies of the provided record. The
selection arguments are the same as in 'recsel':
'-n INDEXES'
'--number=INDEXES'
Match the records occupying the given positions in its record set.
INDEXES must be a comma-separated list of numbers or ranges, the
ranges being two numbers separated with dashes. For example, the
following list denotes the first, the third, the fourth and all
records up to the tenth: '-n 0,2,4-9'.
'-e RECORD_EXPR'
'--expression=EXPR'
A record selection expression (*note Selection Expressions::).
Matching records will get replaced.
'-q STR'
'--quick=STR'
Remove records having a field whose value contains the substring
STR.
'-m NUM'
'--random=NUM'
Select NUM random records. If NUM is zero then all records are
selected, i.e. no replace mode is activated.
'-i'
'--case-insensitive'
Make strings case-insensitive in selection expressions.
'--force'
Insert the requested record even in potentially dangerous
situations, such as when the data integrity of the database is
compromised.
File: recutils.info, Node: Invoking recdel, Next: Invoking recset, Prev: Invoking recins, Up: Invoking the Utilities
17.4 Invoking recdel
====================
'recdel' removes records from a rec file, or from rec data read from
standard input. Synopsis:
recdel [OPTIONS]... [-t TYPE] \
[-n INDEXES | -e RECORD_EXPR | -q STR | -m NUM] \
[FILE]
If no FILE is specified then the command acts like a filter, getting
the data from standard input and writing the result to standard output.
In addition to the common options described earlier (*note Common
Options::) the program accepts the following options.
'-t'
'--type=EXPR'
Remove records of the given type. If this parameter is not
specified then records of any type will be removed.
'-n INDEXES'
'--number=INDEXES'
Match the records occupying the given positions in its record set.
INDEXES must be a comma-separated list of numbers or ranges, the
ranges being two numbers separated with dashes. For example, the
following list denotes the first, the third, the fourth and all
records up to the tenth: '-n 0,2,4-9'.
'-e RECORD_EXPR'
'--expression=EXPR'
A record selection expression (*note Selection Expressions::).
Only the records matched by the expression will be removed from the
file.
'-q STR'
'--quick=STR'
Remove records having a field whose value contains the substring
STR.
'-m NUM'
'--random=NUM'
Remove NUM random records. If NUM is zero then remove all the
records.
'-c'
'--comment'
Comment the matching records out instead of removing them.
'--force'
Delete even in potentially dangerous situations, such as a request
to delete all the records of some type.
'--no-external'
Don't use external record descriptors.
'-i'
'--case-insensitive'
Make strings case-insensitive in selection expressions.
'--verbose'
Be verbose when reporting integrity problems.
File: recutils.info, Node: Invoking recset, Next: Invoking recfix, Prev: Invoking recdel, Up: Invoking the Utilities
17.5 Invoking recset
====================
'recset' manipulates the fields of records in a rec file, or rec data
read from standard input. Synopsis:
recset [OPTION]... [FILE]...
If no FILE is specified then the command acts like a filter, getting
the data from standard input and writing the result to standard output.
In addition to the common options described earlier (*note Common
Options::) the program accepts the following options.
Record selection options:
'-i'
'--case-insensitive'
Make strings case-insensitive in selection expressions.
'-t'
'--type=EXPR'
Operate on the records of the given type. If this parameter is not
specified then records of any type will be affected.
'-n INDEXES'
'--number=INDEXES'
Operate on the records occupying the given positions in its record
set. INDEXES must be a comma-separated list of numbers or ranges,
the ranges being two numbers separated with dashes. For example,
the following list denotes the first, the third, the fourth and all
records up to the tenth: '-n 0,2,4-9'.
'-e EXPR'
'--expression=EXPR'
A record selection expression (*note Selection Expressions::).
Only the records matched by the expression will be processed.
'-q STR'
'--quick=STR'
Operate on records having a field whose value contains the
substring STR.
'-m NUM'
'--random=NUM'
Operate on NUM random records. If NUM is zero then operate on all
the records.
Field selection options:
'-f'
'--fields=FEX'
Field selection expression (*note Field Expressions::) to select
the fields to operate.
Actions:
'-s'
'--set=VALUE'
Set the value of the selected fields to VALUE.
'-a'
'--add=VALUE'
Add a new field to the selected record with value VALUE.
'-S'
'--set-add=VALUE'
Set the value of the selected fields to VALUE. If some of the
fields don't exist in a record, append it with the specified value.
'-r'
'--rename=VALUE'
Rename a field; VALUE must be a valid field name. The field
expression associated with this action must contain a single field
name and an optional subscript. If an entire record set is
selected then the field is renamed in the record descriptor as
well.
'-d'
'--delete'
Delete the selected fields in the selected records.
'-c'
'--comment'
Comment out the selected fields in the selected records.
'--no-external'
Don't use external record descriptors.
'--verbose'
Be verbose when reporting integrity problems.
'--force'
Perform the requested operation even in potentially dangerous
situations, or when the integrity of the data stored in the file is
affected.
File: recutils.info, Node: Invoking recfix, Next: Invoking recfmt, Prev: Invoking recset, Up: Invoking the Utilities
17.6 Invoking recfix
====================
'recfix' checks and fixes rec files. Synopsis:
recfix [OPTION]... [OPERATION] [OP_OPTION]... [FILE]
If no FILE is specified then the command acts like a filter, getting
the data from standard input and writing the result to standard output.
In addition to the common options described earlier (*note Common
Options::) the program accepts the following global options.
'--no-external'
Don't use external record descriptors.
The effect of running 'recfix' depends on the operation it performs.
The operation mode is selected by using one of the following options.
'--check'
Check the integrity of the database contained in the file, printing
diagnostics messages in case something is not right. This is the
default operation.
'--sort'
Perform a physical sort of all the records contained in the file
(or standard input) after checking for its integrity. The sorting
criteria are provided by the '%sort' special field, if any. If
there is an integrity failure the sorting is not performed.
This is a destructive operation.
'--decrypt'
'--encrypt'
Decrypt (encrypt) all the (non-)encrypted fields in the database
which are marked as confidential. This operation requires a
password. If no password is specified with '-s' and the program is
run in a terminal, a prompt is given to get the password from the
user.
If encryption is performed on a file having encrypted fields, the
operation will fail unless '--force' is used.
These are destructive operations.
'--auto'
Insert auto-generated fields as appropriate in the records which
are missing them.
This is a destructive operation.
As described above, some operations make use of these additional
options:
'-s SECRET'
'--password=SECRET'
Password used to encrypt or decrypt fields.
'--force'
Force potentially dangerous operations.
File: recutils.info, Node: Invoking recfmt, Next: Invoking csv2rec, Prev: Invoking recfix, Up: Invoking the Utilities
17.7 Invoking recfmt
====================
'recfmt' formats records using templates. Synopsis:
recfmt [OPTION]... [TEMPLATE]
This program always works as a filter, getting the data from the
standard input and writing the result to standard output.
In addition to the common options described earlier (*note Common
Options::) the program accepts the following options.
'-f'
'--filename=PATH'
Read the template from the file in PATH instead of the command
line.
File: recutils.info, Node: Invoking csv2rec, Next: Invoking rec2csv, Prev: Invoking recfmt, Up: Invoking the Utilities
17.8 Invoking csv2rec
=====================
'csv2rec' reads the given comma-separated-values file (or the data from
standard input if no file is specified) and prints out the converted rec
data, if possible. Synopsis:
csv2rec [OPTION]... [CSV_FILE]
In addition to the common options described earlier (*note Common
Options::) the program accepts the following options.
'-t TYPE'
'--type=TYPE'
Type of the converted records. If no type is specified then no
type is used.
'-s'
'--strict'
Be strict parsing the csv file.
'-e'
'--omit-empty'
Omit empty fields.
File: recutils.info, Node: Invoking rec2csv, Next: Invoking mdb2rec, Prev: Invoking csv2rec, Up: Invoking the Utilities
17.9 Invoking rec2csv
=====================
'rec2csv' reads the given rec files (or the data in the standard input
if no file is specified) and prints out the converted
comma-separated-values. Synopsis:
rec2csv [OPTION]... [REC_FILE]...
The rec data can be read from files specified in the command line, or
from standard input. The program writes the converted data to standard
output.
In addition to the common options described earlier (*note Common
Options::) the program accepts the following options.
'-t TYPE'
'--type=TYPE'
Type of the records to convert. If no type is specified then the
default records (with no name) are converted.
'-S'
'--sort=FIELDS'
Sort the output by the comma-separated list of field names FIELDS.
This option has precedence to whatever sorting criteria are
specified in the corresponding record descriptor with '%sort'.
'-d'
'--delim=CHAR'
Use CHAR as the delimiter character separating fields in the
output. Defaults to ','.
File: recutils.info, Node: Invoking mdb2rec, Prev: Invoking rec2csv, Up: Invoking the Utilities
17.10 Invoking mdb2rec
======================
'mdb2rec' reads the given mdb file and prints out the converted rec
data, if possible. Synopsis:
mdb2rec [OPTION]... MDB_FILE [TABLE]
All the tables contained in the mdb file are exported unless a table
is specified in the command line.
In addition to the common options described earlier (*note Common
Options::) the program accepts the following options.
'-s'
'--system-tables'
Include system tables in the output.
'-l'
'--list-tables'
Dump a list of the table names contained in the mdb file, one per
line.
'-e'
'--keep-empty-fields'
Don't prune empty fields in the rec output.
File: recutils.info, Node: Regular Expressions, Next: Date input formats, Prev: Invoking the Utilities, Up: Top
18 Regular Expressions
**********************
The character '.' matches any single character except the null
character.
'+'
match one or more occurrences of the previous atom or regexp.
'?'
match zero or one occurrences of the previous atom or regexp.
'\+'
matches a '+'
'\?'
matches a '?'.
Bracket expressions are used to match ranges of characters. Bracket
expressions where the range is backward, for example '[z-a]', are
invalid. Within square brackets, '\' is taken literally. Character
classes are supported; for example '[[:digit:]]' matches a single
decimal digit.
GNU extensions are supported:
'\w'
matches a character within a word
'\W'
matches a character which is not within a word
'\<'
matches the beginning of a word
'\>'
matches the end of a word
'\b'
matches a word boundary
'\B'
matches characters which are not a word boundary
'\`'
matches the beginning of the whole input
'\''
matches the end of the whole input
Grouping is performed with parentheses '()'. An unmatched ')'
matches just itself. A backslash followed by a digit acts as a
back-reference and matches the same thing as the previous grouped
expression indicated by that number. For example, '\2' matches the
second group expression. The order of group expressions is determined
by the position of their opening parenthesis '('.
The alternation operator is '|'.
The characters '^' and '$' always represent the beginning and end of
a string respectively, except within square brackets. Within brackets,
an initial '^' inverts the character class being matched.
'*', '+' and '?' are special at any point in a regular expression
except the following places, where they are not allowed:
1. At the beginning of a regular expression
2. After an open-group, '('
3. After the alternation operator, '|'
Intervals are specified by '{' and '}'. Invalid intervals such as
'a{1z' are not accepted.
The longest possible match is returned; this applies to the regular
expression as a whole and (subject to this constraint) to
sub-expressions within groups.
File: recutils.info, Node: Date input formats, Next: GNU Free Documentation License, Prev: Regular Expressions, Up: Top
19 Date input formats
*********************
First, a quote:
Our units of temporal measurement, from seconds on up to months,
are so complicated, asymmetrical and disjunctive so as to make
coherent mental reckoning in time all but impossible. Indeed, had
some tyrannical god contrived to enslave our minds to time, to make
it all but impossible for us to escape subjection to sodden
routines and unpleasant surprises, he could hardly have done better
than handing down our present system. It is like a set of
trapezoidal building blocks, with no vertical or horizontal
surfaces, like a language in which the simplest thought demands
ornate constructions, useless particles and lengthy
circumlocutions. Unlike the more successful patterns of language
and science, which enable us to face experience boldly or at least
level-headedly, our system of temporal calculation silently and
persistently encourages our terror of time.
... It is as though architects had to measure length in feet, width
in meters and height in ells; as though basic instruction manuals
demanded a knowledge of five different languages. It is no wonder
then that we often look into our own immediate past or future, last
Tuesday or a week from Sunday, with feelings of helpless confusion.
...
--Robert Grudin, 'Time and the Art of Living'.
This section describes the textual date representations that GNU
programs accept. These are the strings you, as a user, can supply as
arguments to the various programs. The C interface (via the
'parse_datetime' function) is not described here.
* Menu:
* General date syntax:: Common rules.
* Calendar date items:: 19 Dec 1994.
* Time of day items:: 9:20pm.
* Time zone items:: EST, PDT, UTC, ...
* Combined date and time of day items:: 1972-09-24T20:02:00,000000-0500.
* Day of week items:: Monday and others.
* Relative items in date strings:: next tuesday, 2 years ago.
* Pure numbers in date strings:: 19931219, 1440.
* Seconds since the Epoch:: @1078100502.
* Specifying time zone rules:: TZ="America/New_York", TZ="UTC0".
* Authors of parse_datetime:: Bellovin, Eggert, Salz, Berets, et al.
File: recutils.info, Node: General date syntax, Next: Calendar date items, Up: Date input formats
19.1 General date syntax
========================
A "date" is a string, possibly empty, containing many items separated by
whitespace. The whitespace may be omitted when no ambiguity arises.
The empty string means the beginning of today (i.e., midnight). Order
of the items is immaterial. A date string may contain many flavors of
items:
* calendar date items
* time of day items
* time zone items
* combined date and time of day items
* day of the week items
* relative items
* pure numbers.
We describe each of these item types in turn, below.
A few ordinal numbers may be written out in words in some contexts.
This is most useful for specifying day of the week items or relative
items (see below). Among the most commonly used ordinal numbers, the
word 'last' stands for -1, 'this' stands for 0, and 'first' and 'next'
both stand for 1. Because the word 'second' stands for the unit of time
there is no way to write the ordinal number 2, but for convenience
'third' stands for 3, 'fourth' for 4, 'fifth' for 5, 'sixth' for 6,
'seventh' for 7, 'eighth' for 8, 'ninth' for 9, 'tenth' for 10,
'eleventh' for 11 and 'twelfth' for 12.
When a month is written this way, it is still considered to be
written numerically, instead of being "spelled in full"; this changes
the allowed strings.
In the current implementation, only English is supported for words
and abbreviations like 'AM', 'DST', 'EST', 'first', 'January', 'Sunday',
'tomorrow', and 'year'.
The output of the 'date' command is not always acceptable as a date
string, not only because of the language problem, but also because there
is no standard meaning for time zone items like 'IST'. When using
'date' to generate a date string intended to be parsed later, specify a
date format that is independent of language and that does not use time
zone items other than 'UTC' and 'Z'. Here are some ways to do this:
$ LC_ALL=C TZ=UTC0 date
Mon Mar 1 00:21:42 UTC 2004
$ TZ=UTC0 date +'%Y-%m-%d %H:%M:%SZ'
2004-03-01 00:21:42Z
$ date --rfc-3339=ns # --rfc-3339 is a GNU extension.
2004-02-29 16:21:42.692722128-08:00
$ date --rfc-2822 # a GNU extension
Sun, 29 Feb 2004 16:21:42 -0800
$ date +'%Y-%m-%d %H:%M:%S %z' # %z is a GNU extension.
2004-02-29 16:21:42 -0800
$ date +'@%s.%N' # %s and %N are GNU extensions.
@1078100502.692722128
Alphabetic case is completely ignored in dates. Comments may be
introduced between round parentheses, as long as included parentheses
are properly nested. Hyphens not followed by a digit are currently
ignored. Leading zeros on numbers are ignored.
Invalid dates like '2005-02-29' or times like '24:00' are rejected.
In the typical case of a host that does not support leap seconds, a time
like '23:59:60' is rejected even if it corresponds to a valid leap
second.
File: recutils.info, Node: Calendar date items, Next: Time of day items, Prev: General date syntax, Up: Date input formats
19.2 Calendar date items
========================
A "calendar date item" specifies a day of the year. It is specified
differently, depending on whether the month is specified numerically or
literally. All these strings specify the same calendar date:
1972-09-24 # ISO 8601.
72-9-24 # Assume 19xx for 69 through 99,
# 20xx for 00 through 68.
72-09-24 # Leading zeros are ignored.
9/24/72 # Common U.S. writing.
24 September 1972
24 Sept 72 # September has a special abbreviation.
24 Sep 72 # Three-letter abbreviations always allowed.
Sep 24, 1972
24-sep-72
24sep72
The year can also be omitted. In this case, the last specified year
is used, or the current year if none. For example:
9/24
sep 24
Here are the rules.
For numeric months, the ISO 8601 format 'YEAR-MONTH-DAY' is allowed,
where YEAR is any positive number, MONTH is a number between 01 and 12,
and DAY is a number between 01 and 31. A leading zero must be present
if a number is less than ten. If YEAR is 68 or smaller, then 2000 is
added to it; otherwise, if YEAR is less than 100, then 1900 is added to
it. The construct 'MONTH/DAY/YEAR', popular in the United States, is
accepted. Also 'MONTH/DAY', omitting the year.
Literal months may be spelled out in full: 'January', 'February',
'March', 'April', 'May', 'June', 'July', 'August', 'September',
'October', 'November' or 'December'. Literal months may be abbreviated
to their first three letters, possibly followed by an abbreviating dot.
It is also permitted to write 'Sept' instead of 'September'.
When months are written literally, the calendar date may be given as
any of the following:
DAY MONTH YEAR
DAY MONTH
MONTH DAY YEAR
DAY-MONTH-YEAR
Or, omitting the year:
MONTH DAY
File: recutils.info, Node: Time of day items, Next: Time zone items, Prev: Calendar date items, Up: Date input formats
19.3 Time of day items
======================
A "time of day item" in date strings specifies the time on a given day.
Here are some examples, all of which represent the same time:
20:02:00.000000
20:02
8:02pm
20:02-0500 # In EST (U.S. Eastern Standard Time).
More generally, the time of day may be given as 'HOUR:MINUTE:SECOND',
where HOUR is a number between 0 and 23, MINUTE is a number between 0
and 59, and SECOND is a number between 0 and 59 possibly followed by '.'
or ',' and a fraction containing one or more digits. Alternatively,
':SECOND' can be omitted, in which case it is taken to be zero. On the
rare hosts that support leap seconds, SECOND may be 60.
If the time is followed by 'am' or 'pm' (or 'a.m.' or 'p.m.'), HOUR
is restricted to run from 1 to 12, and ':MINUTE' may be omitted (taken
to be zero). 'am' indicates the first half of the day, 'pm' indicates
the second half of the day. In this notation, 12 is the predecessor of
1: midnight is '12am' while noon is '12pm'. (This is the zero-oriented
interpretation of '12am' and '12pm', as opposed to the old tradition
derived from Latin which uses '12m' for noon and '12pm' for midnight.)
The time may alternatively be followed by a time zone correction,
expressed as 'SHHMM', where S is '+' or '-', HH is a number of zone
hours and MM is a number of zone minutes. The zone minutes term, MM,
may be omitted, in which case the one- or two-digit correction is
interpreted as a number of hours. You can also separate HH from MM with
a colon. When a time zone correction is given this way, it forces
interpretation of the time relative to Coordinated Universal Time (UTC),
overriding any previous specification for the time zone or the local
time zone. For example, '+0530' and '+05:30' both stand for the time
zone 5.5 hours ahead of UTC (e.g., India). This is the best way to
specify a time zone correction by fractional parts of an hour. The
maximum zone correction is 24 hours.
Either 'am'/'pm' or a time zone correction may be specified, but not
both.
File: recutils.info, Node: Time zone items, Next: Combined date and time of day items, Prev: Time of day items, Up: Date input formats
19.4 Time zone items
====================
A "time zone item" specifies an international time zone, indicated by a
small set of letters, e.g., 'UTC' or 'Z' for Coordinated Universal Time.
Any included periods are ignored. By following a non-daylight-saving
time zone by the string 'DST' in a separate word (that is, separated by
some white space), the corresponding daylight saving time zone may be
specified. Alternatively, a non-daylight-saving time zone can be
followed by a time zone correction, to add the two values. This is
normally done only for 'UTC'; for example, 'UTC+05:30' is equivalent to
'+05:30'.
Time zone items other than 'UTC' and 'Z' are obsolescent and are not
recommended, because they are ambiguous; for example, 'EST' has a
different meaning in Australia than in the United States. Instead, it's
better to use unambiguous numeric time zone corrections like '-0500', as
described in the previous section.
If neither a time zone item nor a time zone correction is supplied,
timestamps are interpreted using the rules of the default time zone
(*note Specifying time zone rules::).
File: recutils.info, Node: Combined date and time of day items, Next: Day of week items, Prev: Time zone items, Up: Date input formats
19.5 Combined date and time of day items
========================================
The ISO 8601 date and time of day extended format consists of an ISO
8601 date, a 'T' character separator, and an ISO 8601 time of day. This
format is also recognized if the 'T' is replaced by a space.
In this format, the time of day should use 24-hour notation.
Fractional seconds are allowed, with either comma or period preceding
the fraction. ISO 8601 fractional minutes and hours are not supported.
Typically, hosts support nanosecond timestamp resolution; excess
precision is silently discarded.
Here are some examples:
2012-09-24T20:02:00.052-05:00
2012-12-31T23:59:59,999999999+11:00
1970-01-01 00:00Z
File: recutils.info, Node: Day of week items, Next: Relative items in date strings, Prev: Combined date and time of day items, Up: Date input formats
19.6 Day of week items
======================
The explicit mention of a day of the week will forward the date (only if
necessary) to reach that day of the week in the future.
Days of the week may be spelled out in full: 'Sunday', 'Monday',
'Tuesday', 'Wednesday', 'Thursday', 'Friday' or 'Saturday'. Days may be
abbreviated to their first three letters, optionally followed by a
period. The special abbreviations 'Tues' for 'Tuesday', 'Wednes' for
'Wednesday' and 'Thur' or 'Thurs' for 'Thursday' are also allowed.
A number may precede a day of the week item to move forward
supplementary weeks. It is best used in expression like 'third monday'.
In this context, 'last DAY' or 'next DAY' is also acceptable; they move
one week before or after the day that DAY by itself would represent.
A comma following a day of the week item is ignored.
File: recutils.info, Node: Relative items in date strings, Next: Pure numbers in date strings, Prev: Day of week items, Up: Date input formats
19.7 Relative items in date strings
===================================
"Relative items" adjust a date (or the current date if none) forward or
backward. The effects of relative items accumulate. Here are some
examples:
1 year
1 year ago
3 years
2 days
The unit of time displacement may be selected by the string 'year' or
'month' for moving by whole years or months. These are fuzzy units, as
years and months are not all of equal duration. More precise units are
'fortnight' which is worth 14 days, 'week' worth 7 days, 'day' worth 24
hours, 'hour' worth 60 minutes, 'minute' or 'min' worth 60 seconds, and
'second' or 'sec' worth one second. An 's' suffix on these units is
accepted and ignored.
The unit of time may be preceded by a multiplier, given as an
optionally signed number. Unsigned numbers are taken as positively
signed. No number at all implies 1 for a multiplier. Following a
relative item by the string 'ago' is equivalent to preceding the unit by
a multiplier with value -1.
The string 'tomorrow' is worth one day in the future (equivalent to
'day'), the string 'yesterday' is worth one day in the past (equivalent
to 'day ago').
The strings 'now' or 'today' are relative items corresponding to
zero-valued time displacement, these strings come from the fact a
zero-valued time displacement represents the current time when not
otherwise changed by previous items. They may be used to stress other
items, like in '12:00 today'. The string 'this' also has the meaning of
a zero-valued time displacement, but is preferred in date strings like
'this thursday'.
When a relative item causes the resulting date to cross a boundary
where the clocks were adjusted, typically for daylight saving time, the
resulting date and time are adjusted accordingly.
The fuzz in units can cause problems with relative items. For
example, '2003-07-31 -1 month' might evaluate to 2003-07-01, because
2003-06-31 is an invalid date. To determine the previous month more
reliably, you can ask for the month before the 15th of the current
month. For example:
$ date -R
Thu, 31 Jul 2003 13:02:39 -0700
$ date --date='-1 month' +'Last month was %B?'
Last month was July?
$ date --date="$(date +%Y-%m-15) -1 month" +'Last month was %B!'
Last month was June!
Also, take care when manipulating dates around clock changes such as
daylight saving leaps. In a few cases these have added or subtracted as
much as 24 hours from the clock, so it is often wise to adopt universal
time by setting the 'TZ' environment variable to 'UTC0' before embarking
on calendrical calculations.
File: recutils.info, Node: Pure numbers in date strings, Next: Seconds since the Epoch, Prev: Relative items in date strings, Up: Date input formats
19.8 Pure numbers in date strings
=================================
The precise interpretation of a pure decimal number depends on the
context in the date string.
If the decimal number is of the form YYYYMMDD and no other calendar
date item (*note Calendar date items::) appears before it in the date
string, then YYYY is read as the year, MM as the month number and DD as
the day of the month, for the specified calendar date.
If the decimal number is of the form HHMM and no other time of day
item appears before it in the date string, then HH is read as the hour
of the day and MM as the minute of the hour, for the specified time of
day. MM can also be omitted.
If both a calendar date and a time of day appear to the left of a
number in the date string, but no relative item, then the number
overrides the year.
File: recutils.info, Node: Seconds since the Epoch, Next: Specifying time zone rules, Prev: Pure numbers in date strings, Up: Date input formats
19.9 Seconds since the Epoch
============================
If you precede a number with '@', it represents an internal timestamp as
a count of seconds. The number can contain an internal decimal point
(either '.' or ','); any excess precision not supported by the internal
representation is truncated toward minus infinity. Such a number cannot
be combined with any other date item, as it specifies a complete
timestamp.
Internally, computer times are represented as a count of seconds
since an epoch--a well-defined point of time. On GNU and POSIX systems,
the epoch is 1970-01-01 00:00:00 UTC, so '@0' represents this time, '@1'
represents 1970-01-01 00:00:01 UTC, and so forth. GNU and most other
POSIX-compliant systems support such times as an extension to POSIX,
using negative counts, so that '@-1' represents 1969-12-31 23:59:59 UTC.
Traditional Unix systems count seconds with 32-bit two's-complement
integers and can represent times from 1901-12-13 20:45:52 through
2038-01-19 03:14:07 UTC. More modern systems use 64-bit counts of
seconds with nanosecond subcounts, and can represent all the times in
the known lifetime of the universe to a resolution of 1 nanosecond.
On most hosts, these counts ignore the presence of leap seconds. For
example, on most hosts '@915148799' represents 1998-12-31 23:59:59 UTC,
'@915148800' represents 1999-01-01 00:00:00 UTC, and there is no way to
represent the intervening leap second 1998-12-31 23:59:60 UTC.
File: recutils.info, Node: Specifying time zone rules, Next: Authors of parse_datetime, Prev: Seconds since the Epoch, Up: Date input formats
19.10 Specifying time zone rules
================================
Normally, dates are interpreted using the rules of the current time
zone, which in turn are specified by the 'TZ' environment variable, or
by a system default if 'TZ' is not set. To specify a different set of
default time zone rules that apply just to one date, start the date with
a string of the form 'TZ="RULE"'. The two quote characters ('"') must
be present in the date, and any quotes or backslashes within RULE must
be escaped by a backslash.
For example, with the GNU 'date' command you can answer the question
"What time is it in New York when a Paris clock shows 6:30am on October
31, 2004?" by using a date beginning with 'TZ="Europe/Paris"' as shown
in the following shell transcript:
$ export TZ="America/New_York"
$ date --date='TZ="Europe/Paris" 2004-10-31 06:30'
Sun Oct 31 01:30:00 EDT 2004
In this example, the '--date' operand begins with its own 'TZ'
setting, so the rest of that operand is processed according to
'Europe/Paris' rules, treating the string '2004-10-31 06:30' as if it
were in Paris. However, since the output of the 'date' command is
processed according to the overall time zone rules, it uses New York
time. (Paris was normally six hours ahead of New York in 2004, but this
example refers to a brief Halloween period when the gap was five hours.)
A 'TZ' value is a rule that typically names a location in the 'tz'
database (http://www.twinsun.com/tz/tz-link.htm). A recent catalog of
location names appears in the TWiki Date and Time Gateway
(http://twiki.org/cgi-bin/xtra/tzdate). A few non-GNU hosts require a
colon before a location name in a 'TZ' setting, e.g.,
'TZ=":America/New_York"'.
The 'tz' database includes a wide variety of locations ranging from
'Arctic/Longyearbyen' to 'Antarctica/South_Pole', but if you are at sea
and have your own private time zone, or if you are using a non-GNU host
that does not support the 'tz' database, you may need to use a POSIX
rule instead. Simple POSIX rules like 'UTC0' specify a time zone
without daylight saving time; other rules can specify simple daylight
saving regimes. *Note Specifying the Time Zone with 'TZ': (libc)TZ
Variable.
File: recutils.info, Node: Authors of parse_datetime, Prev: Specifying time zone rules, Up: Date input formats
19.11 Authors of 'parse_datetime'
=================================
'parse_datetime' started life as 'getdate', as originally implemented by
Steven M. Bellovin () while at the University of
North Carolina at Chapel Hill. The code was later tweaked by a couple
of people on Usenet, then completely overhauled by Rich $alz
() and Jim Berets () in August, 1990.
Various revisions for the GNU system were made by David MacKenzie, Jim
Meyering, Paul Eggert and others, including renaming it to 'get_date' to
avoid a conflict with the alternative Posix function 'getdate', and a
later rename to 'parse_datetime'. The Posix function 'getdate' can
parse more locale-specific dates using 'strptime', but relies on an
environment variable and external file, and lacks the thread-safety of
'parse_datetime'.
This chapter was originally produced by Franc,ois Pinard
() from the 'parse_datetime.y' source code, and
then edited by K. Berry ().
File: recutils.info, Node: GNU Free Documentation License, Next: Concept Index, Prev: Date input formats, Up: Top
Appendix A GNU Free Documentation License
*****************************************
Version 1.3, 3 November 2008
Copyright (C) 2000, 2001, 2002, 2007, 2008 Free Software Foundation, Inc.
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
0. PREAMBLE
The purpose of this License is to make a manual, textbook, or other
functional and useful document "free" in the sense of freedom: to
assure everyone the effective freedom to copy and redistribute it,
with or without modifying it, either commercially or
noncommercially. Secondarily, this License preserves for the
author and publisher a way to get credit for their work, while not
being considered responsible for modifications made by others.
This License is a kind of "copyleft", which means that derivative
works of the document must themselves be free in the same sense.
It complements the GNU General Public License, which is a copyleft
license designed for free software.
We have designed this License in order to use it for manuals for
free software, because free software needs free documentation: a
free program should come with manuals providing the same freedoms
that the software does. But this License is not limited to
software manuals; it can be used for any textual work, regardless
of subject matter or whether it is published as a printed book. We
recommend this License principally for works whose purpose is
instruction or reference.
1. APPLICABILITY AND DEFINITIONS
This License applies to any manual or other work, in any medium,
that contains a notice placed by the copyright holder saying it can
be distributed under the terms of this License. Such a notice
grants a world-wide, royalty-free license, unlimited in duration,
to use that work under the conditions stated herein. The
"Document", below, refers to any such manual or work. Any member
of the public is a licensee, and is addressed as "you". You accept
the license if you copy, modify or distribute the work in a way
requiring permission under copyright law.
A "Modified Version" of the Document means any work containing the
Document or a portion of it, either copied verbatim, or with
modifications and/or translated into another language.
A "Secondary Section" is a named appendix or a front-matter section
of the Document that deals exclusively with the relationship of the
publishers or authors of the Document to the Document's overall
subject (or to related matters) and contains nothing that could
fall directly within that overall subject. (Thus, if the Document
is in part a textbook of mathematics, a Secondary Section may not
explain any mathematics.) The relationship could be a matter of
historical connection with the subject or with related matters, or
of legal, commercial, philosophical, ethical or political position
regarding them.
The "Invariant Sections" are certain Secondary Sections whose
titles are designated, as being those of Invariant Sections, in the
notice that says that the Document is released under this License.
If a section does not fit the above definition of Secondary then it
is not allowed to be designated as Invariant. The Document may
contain zero Invariant Sections. If the Document does not identify
any Invariant Sections then there are none.
The "Cover Texts" are certain short passages of text that are
listed, as Front-Cover Texts or Back-Cover Texts, in the notice
that says that the Document is released under this License. A
Front-Cover Text may be at most 5 words, and a Back-Cover Text may
be at most 25 words.
A "Transparent" copy of the Document means a machine-readable copy,
represented in a format whose specification is available to the
general public, that is suitable for revising the document
straightforwardly with generic text editors or (for images composed
of pixels) generic paint programs or (for drawings) some widely
available drawing editor, and that is suitable for input to text
formatters or for automatic translation to a variety of formats
suitable for input to text formatters. A copy made in an otherwise
Transparent file format whose markup, or absence of markup, has
been arranged to thwart or discourage subsequent modification by
readers is not Transparent. An image format is not Transparent if
used for any substantial amount of text. A copy that is not
"Transparent" is called "Opaque".
Examples of suitable formats for Transparent copies include plain
ASCII without markup, Texinfo input format, LaTeX input format,
SGML or XML using a publicly available DTD, and standard-conforming
simple HTML, PostScript or PDF designed for human modification.
Examples of transparent image formats include PNG, XCF and JPG.
Opaque formats include proprietary formats that can be read and
edited only by proprietary word processors, SGML or XML for which
the DTD and/or processing tools are not generally available, and
the machine-generated HTML, PostScript or PDF produced by some word
processors for output purposes only.
The "Title Page" means, for a printed book, the title page itself,
plus such following pages as are needed to hold, legibly, the
material this License requires to appear in the title page. For
works in formats which do not have any title page as such, "Title
Page" means the text near the most prominent appearance of the
work's title, preceding the beginning of the body of the text.
The "publisher" means any person or entity that distributes copies
of the Document to the public.
A section "Entitled XYZ" means a named subunit of the Document
whose title either is precisely XYZ or contains XYZ in parentheses
following text that translates XYZ in another language. (Here XYZ
stands for a specific section name mentioned below, such as
"Acknowledgements", "Dedications", "Endorsements", or "History".)
To "Preserve the Title" of such a section when you modify the
Document means that it remains a section "Entitled XYZ" according
to this definition.
The Document may include Warranty Disclaimers next to the notice
which states that this License applies to the Document. These
Warranty Disclaimers are considered to be included by reference in
this License, but only as regards disclaiming warranties: any other
implication that these Warranty Disclaimers may have is void and
has no effect on the meaning of this License.
2. VERBATIM COPYING
You may copy and distribute the Document in any medium, either
commercially or noncommercially, provided that this License, the
copyright notices, and the license notice saying this License
applies to the Document are reproduced in all copies, and that you
add no other conditions whatsoever to those of this License. You
may not use technical measures to obstruct or control the reading
or further copying of the copies you make or distribute. However,
you may accept compensation in exchange for copies. If you
distribute a large enough number of copies you must also follow the
conditions in section 3.
You may also lend copies, under the same conditions stated above,
and you may publicly display copies.
3. COPYING IN QUANTITY
If you publish printed copies (or copies in media that commonly
have printed covers) of the Document, numbering more than 100, and
the Document's license notice requires Cover Texts, you must
enclose the copies in covers that carry, clearly and legibly, all
these Cover Texts: Front-Cover Texts on the front cover, and
Back-Cover Texts on the back cover. Both covers must also clearly
and legibly identify you as the publisher of these copies. The
front cover must present the full title with all words of the title
equally prominent and visible. You may add other material on the
covers in addition. Copying with changes limited to the covers, as
long as they preserve the title of the Document and satisfy these
conditions, can be treated as verbatim copying in other respects.
If the required texts for either cover are too voluminous to fit
legibly, you should put the first ones listed (as many as fit
reasonably) on the actual cover, and continue the rest onto
adjacent pages.
If you publish or distribute Opaque copies of the Document
numbering more than 100, you must either include a machine-readable
Transparent copy along with each Opaque copy, or state in or with
each Opaque copy a computer-network location from which the general
network-using public has access to download using public-standard
network protocols a complete Transparent copy of the Document, free
of added material. If you use the latter option, you must take
reasonably prudent steps, when you begin distribution of Opaque
copies in quantity, to ensure that this Transparent copy will
remain thus accessible at the stated location until at least one
year after the last time you distribute an Opaque copy (directly or
through your agents or retailers) of that edition to the public.
It is requested, but not required, that you contact the authors of
the Document well before redistributing any large number of copies,
to give them a chance to provide you with an updated version of the
Document.
4. MODIFICATIONS
You may copy and distribute a Modified Version of the Document
under the conditions of sections 2 and 3 above, provided that you
release the Modified Version under precisely this License, with the
Modified Version filling the role of the Document, thus licensing
distribution and modification of the Modified Version to whoever
possesses a copy of it. In addition, you must do these things in
the Modified Version:
A. Use in the Title Page (and on the covers, if any) a title
distinct from that of the Document, and from those of previous
versions (which should, if there were any, be listed in the
History section of the Document). You may use the same title
as a previous version if the original publisher of that
version gives permission.
B. List on the Title Page, as authors, one or more persons or
entities responsible for authorship of the modifications in
the Modified Version, together with at least five of the
principal authors of the Document (all of its principal
authors, if it has fewer than five), unless they release you
from this requirement.
C. State on the Title page the name of the publisher of the
Modified Version, as the publisher.
D. Preserve all the copyright notices of the Document.
E. Add an appropriate copyright notice for your modifications
adjacent to the other copyright notices.
F. Include, immediately after the copyright notices, a license
notice giving the public permission to use the Modified
Version under the terms of this License, in the form shown in
the Addendum below.
G. Preserve in that license notice the full lists of Invariant
Sections and required Cover Texts given in the Document's
license notice.
H. Include an unaltered copy of this License.
I. Preserve the section Entitled "History", Preserve its Title,
and add to it an item stating at least the title, year, new
authors, and publisher of the Modified Version as given on the
Title Page. If there is no section Entitled "History" in the
Document, create one stating the title, year, authors, and
publisher of the Document as given on its Title Page, then add
an item describing the Modified Version as stated in the
previous sentence.
J. Preserve the network location, if any, given in the Document
for public access to a Transparent copy of the Document, and
likewise the network locations given in the Document for
previous versions it was based on. These may be placed in the
"History" section. You may omit a network location for a work
that was published at least four years before the Document
itself, or if the original publisher of the version it refers
to gives permission.
K. For any section Entitled "Acknowledgements" or "Dedications",
Preserve the Title of the section, and preserve in the section
all the substance and tone of each of the contributor
acknowledgements and/or dedications given therein.
L. Preserve all the Invariant Sections of the Document, unaltered
in their text and in their titles. Section numbers or the
equivalent are not considered part of the section titles.
M. Delete any section Entitled "Endorsements". Such a section
may not be included in the Modified Version.
N. Do not retitle any existing section to be Entitled
"Endorsements" or to conflict in title with any Invariant
Section.
O. Preserve any Warranty Disclaimers.
If the Modified Version includes new front-matter sections or
appendices that qualify as Secondary Sections and contain no
material copied from the Document, you may at your option designate
some or all of these sections as invariant. To do this, add their
titles to the list of Invariant Sections in the Modified Version's
license notice. These titles must be distinct from any other
section titles.
You may add a section Entitled "Endorsements", provided it contains
nothing but endorsements of your Modified Version by various
parties--for example, statements of peer review or that the text
has been approved by an organization as the authoritative
definition of a standard.
You may add a passage of up to five words as a Front-Cover Text,
and a passage of up to 25 words as a Back-Cover Text, to the end of
the list of Cover Texts in the Modified Version. Only one passage
of Front-Cover Text and one of Back-Cover Text may be added by (or
through arrangements made by) any one entity. If the Document
already includes a cover text for the same cover, previously added
by you or by arrangement made by the same entity you are acting on
behalf of, you may not add another; but you may replace the old
one, on explicit permission from the previous publisher that added
the old one.
The author(s) and publisher(s) of the Document do not by this
License give permission to use their names for publicity for or to
assert or imply endorsement of any Modified Version.
5. COMBINING DOCUMENTS
You may combine the Document with other documents released under
this License, under the terms defined in section 4 above for
modified versions, provided that you include in the combination all
of the Invariant Sections of all of the original documents,
unmodified, and list them all as Invariant Sections of your
combined work in its license notice, and that you preserve all
their Warranty Disclaimers.
The combined work need only contain one copy of this License, and
multiple identical Invariant Sections may be replaced with a single
copy. If there are multiple Invariant Sections with the same name
but different contents, make the title of each such section unique
by adding at the end of it, in parentheses, the name of the
original author or publisher of that section if known, or else a
unique number. Make the same adjustment to the section titles in
the list of Invariant Sections in the license notice of the
combined work.
In the combination, you must combine any sections Entitled
"History" in the various original documents, forming one section
Entitled "History"; likewise combine any sections Entitled
"Acknowledgements", and any sections Entitled "Dedications". You
must delete all sections Entitled "Endorsements."
6. COLLECTIONS OF DOCUMENTS
You may make a collection consisting of the Document and other
documents released under this License, and replace the individual
copies of this License in the various documents with a single copy
that is included in the collection, provided that you follow the
rules of this License for verbatim copying of each of the documents
in all other respects.
You may extract a single document from such a collection, and
distribute it individually under this License, provided you insert
a copy of this License into the extracted document, and follow this
License in all other respects regarding verbatim copying of that
document.
7. AGGREGATION WITH INDEPENDENT WORKS
A compilation of the Document or its derivatives with other
separate and independent documents or works, in or on a volume of a
storage or distribution medium, is called an "aggregate" if the
copyright resulting from the compilation is not used to limit the
legal rights of the compilation's users beyond what the individual
works permit. When the Document is included in an aggregate, this
License does not apply to the other works in the aggregate which
are not themselves derivative works of the Document.
If the Cover Text requirement of section 3 is applicable to these
copies of the Document, then if the Document is less than one half
of the entire aggregate, the Document's Cover Texts may be placed
on covers that bracket the Document within the aggregate, or the
electronic equivalent of covers if the Document is in electronic
form. Otherwise they must appear on printed covers that bracket
the whole aggregate.
8. TRANSLATION
Translation is considered a kind of modification, so you may
distribute translations of the Document under the terms of section
4. Replacing Invariant Sections with translations requires special
permission from their copyright holders, but you may include
translations of some or all Invariant Sections in addition to the
original versions of these Invariant Sections. You may include a
translation of this License, and all the license notices in the
Document, and any Warranty Disclaimers, provided that you also
include the original English version of this License and the
original versions of those notices and disclaimers. In case of a
disagreement between the translation and the original version of
this License or a notice or disclaimer, the original version will
prevail.
If a section in the Document is Entitled "Acknowledgements",
"Dedications", or "History", the requirement (section 4) to
Preserve its Title (section 1) will typically require changing the
actual title.
9. TERMINATION
You may not copy, modify, sublicense, or distribute the Document
except as expressly provided under this License. Any attempt
otherwise to copy, modify, sublicense, or distribute it is void,
and will automatically terminate your rights under this License.
However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the
copyright holder fails to notify you of the violation by some
reasonable means prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from
that copyright holder, and you cure the violation prior to 30 days
after your receipt of the notice.
Termination of your rights under this section does not terminate
the licenses of parties who have received copies or rights from you
under this License. If your rights have been terminated and not
permanently reinstated, receipt of a copy of some or all of the
same material does not give you any rights to use it.
10. FUTURE REVISIONS OF THIS LICENSE
The Free Software Foundation may publish new, revised versions of
the GNU Free Documentation License from time to time. Such new
versions will be similar in spirit to the present version, but may
differ in detail to address new problems or concerns. See
.
Each version of the License is given a distinguishing version
number. If the Document specifies that a particular numbered
version of this License "or any later version" applies to it, you
have the option of following the terms and conditions either of
that specified version or of any later version that has been
published (not as a draft) by the Free Software Foundation. If the
Document does not specify a version number of this License, you may
choose any version ever published (not as a draft) by the Free
Software Foundation. If the Document specifies that a proxy can
decide which future versions of this License can be used, that
proxy's public statement of acceptance of a version permanently
authorizes you to choose that version for the Document.
11. RELICENSING
"Massive Multiauthor Collaboration Site" (or "MMC Site") means any
World Wide Web server that publishes copyrightable works and also
provides prominent facilities for anybody to edit those works. A
public wiki that anybody can edit is an example of such a server.
A "Massive Multiauthor Collaboration" (or "MMC") contained in the
site means any set of copyrightable works thus published on the MMC
site.
"CC-BY-SA" means the Creative Commons Attribution-Share Alike 3.0
license published by Creative Commons Corporation, a not-for-profit
corporation with a principal place of business in San Francisco,
California, as well as future copyleft versions of that license
published by that same organization.
"Incorporate" means to publish or republish a Document, in whole or
in part, as part of another Document.
An MMC is "eligible for relicensing" if it is licensed under this
License, and if all works that were first published under this
License somewhere other than this MMC, and subsequently
incorporated in whole or in part into the MMC, (1) had no cover
texts or invariant sections, and (2) were thus incorporated prior
to November 1, 2008.
The operator of an MMC Site may republish an MMC contained in the
site under CC-BY-SA on the same site at any time before August 1,
2009, provided the MMC is eligible for relicensing.
ADDENDUM: How to use this License for your documents
====================================================
To use this License in a document you have written, include a copy of
the License in the document and put the following copyright and license
notices just after the title page:
Copyright (C) YEAR YOUR NAME.
Permission is granted to copy, distribute and/or modify this document
under the terms of the GNU Free Documentation License, Version 1.3
or any later version published by the Free Software Foundation;
with no Invariant Sections, no Front-Cover Texts, and no Back-Cover
Texts. A copy of the license is included in the section entitled ``GNU
Free Documentation License''.
If you have Invariant Sections, Front-Cover Texts and Back-Cover
Texts, replace the "with...Texts." line with this:
with the Invariant Sections being LIST THEIR TITLES, with
the Front-Cover Texts being LIST, and with the Back-Cover Texts
being LIST.
If you have Invariant Sections without Cover Texts, or some other
combination of the three, merge those two alternatives to suit the
situation.
If your document contains nontrivial examples of program code, we
recommend releasing these examples in parallel under your choice of free
software license, such as the GNU General Public License, to permit
their use in free software.
File: recutils.info, Node: Concept Index, Prev: GNU Free Documentation License, Up: Top
Concept Index
*************
[index ]
* Menu:
* %allowed: Allowed Fields. (line 6)
* %auto: Auto-Generated Fields.
(line 6)
* %confidential: Confidential Fields. (line 6)
* %constraint: Arbitrary Constraints.
(line 6)
* %doc: Documenting Records. (line 6)
* %key: Keys and Unique Fields.
(line 6)
* %key <1>: Foreign Keys. (line 65)
* %key <2>: Auto-Generated Fields.
(line 24)
* %mandatory: Record Sets Properties.
(line 13)
* %mandatory <1>: Mandatory Fields. (line 6)
* %prohibit: Prohibited Fields. (line 6)
* %rec: Record Sets. (line 9)
* %rec <1>: Remote Descriptors. (line 6)
* %size: Size Constraints. (line 6)
* %sort: Sorted Output. (line 6)
* %type: Types and Fields. (line 6)
* %typedef: Types and Fields. (line 6)
* %unique: Keys and Unique Fields.
(line 6)
* abbreviations for months: Calendar date items. (line 38)
* adding fields: Adding Fields. (line 6)
* aggregate function: Aggregate Functions. (line 6)
* aliasing, field name aliasing: Field Expressions. (line 52)
* allowed fields: Allowed Fields. (line 6)
* anonymous types: Types and Fields. (line 20)
* arithmetic operators: SEX Operators. (line 13)
* authors of parse_datetime: Authors of parse_datetime.
(line 6)
* automatically generated values: Auto-Generated Fields.
(line 6)
* bash: Bash Builtins. (line 6)
* beginning of time, for POSIX: Seconds since the Epoch.
(line 13)
* Bellovin, Steven M.: Authors of parse_datetime.
(line 6)
* Berets, Jim: Authors of parse_datetime.
(line 6)
* Berry, K.: Authors of parse_datetime.
(line 19)
* books: A Little Example. (line 6)
* boolean operators: SEX Operators. (line 23)
* boolean types: Enumerated Field Types.
(line 30)
* calendar date item: Calendar date items. (line 6)
* case, ignored in dates: General date syntax. (line 60)
* case, in field names: Fields. (line 22)
* case, in selection expressions: Invoking recsel. (line 28)
* case, in selection expressions <1>: Invoking recins. (line 79)
* checking recfiles: Invoking recfix. (line 6)
* combined date and time of day item: Combined date and time of day items.
(line 6)
* comma separated values: CSV Files. (line 6)
* comma separated values <1>: Invoking csv2rec. (line 6)
* comma separated values <2>: Invoking rec2csv. (line 6)
* comments: Comments. (line 6)
* comments, in dates: General date syntax. (line 60)
* comments, in enumerated types: Enumerated Field Types.
(line 21)
* comparison: SEX Operators. (line 36)
* compulsory fields: Mandatory Fields. (line 6)
* conditional operator: SEX Operators. (line 76)
* confidential data: Confidential Fields. (line 6)
* constraints: Arbitrary Constraints.
(line 34)
* counters: Counters. (line 6)
* counting occurrences of a field: SEX Operators. (line 56)
* csv: CSV Files. (line 6)
* csv <1>: Invoking csv2rec. (line 6)
* csv <2>: Invoking rec2csv. (line 6)
* csv2rec: Invoking csv2rec. (line 6)
* date and time of day format, ISO 8601: Combined date and time of day items.
(line 6)
* date comparison: Selecting by predicate.
(line 62)
* date comparison <1>: Selecting by predicate.
(line 86)
* date comparison <2>: SEX Operators. (line 48)
* date format, ISO 8601: Calendar date items. (line 30)
* date input formats: Date input formats. (line 6)
* date, fields containing dates: Date and Time Types. (line 6)
* day of week: Enumerated Field Types.
(line 17)
* day of week item: Day of week items. (line 6)
* decimal separator: Scalar Field Types. (line 54)
* default record types: Record Sets. (line 63)
* deleting fields: Deleting Fields. (line 6)
* deleting records: Deleting Records. (line 6)
* deleting records <1>: Invoking recdel. (line 6)
* description of record sets: Documenting Records. (line 6)
* descriptor: Record Descriptors. (line 6)
* descriptor, external descriptor: Remote Descriptors. (line 42)
* displacement of dates: Relative items in date strings.
(line 6)
* documentation fields: Documenting Records. (line 6)
* duplication, avoiding: Foreign Keys. (line 7)
* editing fields: Invoking recset. (line 6)
* Eggert, Paul: Authors of parse_datetime.
(line 6)
* email: Other Field Types. (line 6)
* encrypted fields: Confidential Fields. (line 18)
* encryption: Encryption. (line 6)
* enumerated types: Enumerated Field Types.
(line 6)
* epoch, for POSIX: Seconds since the Epoch.
(line 13)
* evaluation, of selection expressions: SEX Evaluation. (line 6)
* external descriptor: Remote Descriptors. (line 42)
* FEX: Field Expressions. (line 6)
* field: Fields. (line 6)
* field expressions: Field Expressions. (line 6)
* field name: Fields. (line 16)
* field operators: SEX Operators. (line 56)
* field size: String Field Types. (line 19)
* field types,: Types and Fields. (line 6)
* field values: Fields. (line 34)
* field values, in selection expressions: SEX Operands. (line 51)
* field, allowed fields: Allowed Fields. (line 6)
* field, compulsory fields: Mandatory Fields. (line 13)
* field, forbidden fields: Prohibited Fields. (line 6)
* field, mandatory fields: Mandatory Fields. (line 13)
* field, special fields: Record Sets Properties.
(line 6)
* floating point numbers: Scalar Field Types. (line 52)
* foreign key: Other Field Types. (line 27)
* foreign key <1>: Foreign Keys. (line 67)
* formatted output: Invoking recfmt. (line 6)
* fractions: Scalar Field Types. (line 52)
* general date syntax: General date syntax. (line 6)
* grouping: Grouping Records. (line 6)
* grouping, within regular expressions: Regular Expressions. (line 42)
* hexadecimal: Scalar Field Types. (line 13)
* ID numbers: Auto-Generated Fields.
(line 24)
* implies, logical implication: Arbitrary Constraints.
(line 34)
* inserting new records: Invoking recins. (line 6)
* integers: Scalar Field Types. (line 9)
* integrity problems: Declaring Types. (line 52)
* integrity problems <1>: Mandatory Fields. (line 15)
* integrity problems <2>: Keys and Unique Fields.
(line 57)
* integrity problems <3>: Arbitrary Constraints.
(line 24)
* integrity problems <4>: Remote Descriptors. (line 35)
* integrity problems <5>: Confidential Fields. (line 62)
* integrity, checking: Checking Recfiles. (line 6)
* integrity, checking <1>: Invoking recfix. (line 6)
* interactive use: Bash Builtins. (line 6)
* ISO 8601 date and time of day format: Combined date and time of day items.
(line 6)
* ISO 8601 date format: Calendar date items. (line 30)
* items in date strings: General date syntax. (line 6)
* join: Joining Records. (line 65)
* key, foreign key: Foreign Keys. (line 67)
* key, primary key: Auto-Generated Fields.
(line 24)
* language, in dates: General date syntax. (line 36)
* language, in dates <1>: General date syntax. (line 40)
* leap seconds: General date syntax. (line 65)
* leap seconds <1>: Time of day items. (line 14)
* leap seconds <2>: Seconds since the Epoch.
(line 26)
* license, GNU Free Documentation License: GNU Free Documentation License.
(line 6)
* literals, numeric literals: SEX Operands. (line 12)
* literals, string literals: SEX Operands. (line 30)
* locale: Scalar Field Types. (line 54)
* locale <1>: Date and Time Types. (line 11)
* looking up data: Selecting by predicate.
(line 6)
* MacKenzie, David: Authors of parse_datetime.
(line 6)
* mandatory fields: Record Sets Properties.
(line 13)
* mandatory fields <1>: Mandatory Fields. (line 6)
* mdb: Invoking mdb2rec. (line 6)
* mdb2rec: Invoking mdb2rec. (line 6)
* Meyering, Jim: Authors of parse_datetime.
(line 6)
* minutes, time zone correction by: Time of day items. (line 29)
* month names in date strings: Calendar date items. (line 38)
* months, written-out: General date syntax. (line 32)
* MS Access: Invoking mdb2rec. (line 6)
* multiline field values: Fields. (line 37)
* multiline field values <1>: String Field Types. (line 14)
* mutating field values: Setting Fields. (line 6)
* numbers, written-out: General date syntax. (line 22)
* octal: Scalar Field Types. (line 13)
* operands, SEX operands: SEX Operands. (line 6)
* operators: Size Constraints. (line 23)
* operators, arithmetic operators: SEX Operators. (line 13)
* operators, boolean operators: SEX Operators. (line 23)
* operators, comparison operators: SEX Operators. (line 36)
* operators, conditional operator: SEX Operators. (line 76)
* operators, in selection expressions: SEX Operators. (line 6)
* operators, string operators: SEX Operators. (line 68)
* order of fields: Sorted Output. (line 58)
* ordinal numbers: General date syntax. (line 22)
* parentheses, in selection expressions.: SEX Operands. (line 101)
* passwords: Confidential Fields. (line 6)
* Pinard, F.: Authors of parse_datetime.
(line 19)
* primary key: Keys and Unique Fields.
(line 32)
* primary key <1>: Auto-Generated Fields.
(line 24)
* prohibited fields: Prohibited Fields. (line 6)
* pure numbers in date strings: Pure numbers in date strings.
(line 6)
* quotation marks: Selecting by predicate.
(line 85)
* quotation marks <1>: SEX Operands. (line 42)
* range, type description: Declaring Types. (line 12)
* ranges: Scalar Field Types. (line 25)
* readability: Purpose. (line 35)
* readability <1>: Foreign Keys. (line 70)
* reals: Scalar Field Types. (line 52)
* rec, type description: Foreign Keys. (line 67)
* rec2csv: Invoking rec2csv. (line 6)
* recdel: Invoking recdel. (line 6)
* recfix: Syntactical Errors. (line 14)
* recfix <1>: Invoking recfix. (line 6)
* recfmt: Generating Reports. (line 32)
* recfmt <1>: Invoking recfmt. (line 6)
* recinf: Invoking recinf. (line 6)
* recins: Invoking recins. (line 6)
* record: Records. (line 6)
* record sets: Record Sets. (line 6)
* record sets <1>: Foreign Keys. (line 6)
* record size: Records. (line 20)
* record size <1>: Size Constraints. (line 6)
* recsel: Selecting by predicate.
(line 12)
* recsel <1>: Invoking recsel. (line 6)
* recset: Invoking recset. (line 6)
* regexp, type description: String Field Types. (line 33)
* regular expressions: Regular Expressions. (line 6)
* relative items in date strings: Relative items in date strings.
(line 6)
* remote descriptors: Remote Descriptors. (line 53)
* renaming fields: Renaming Fields. (line 6)
* reports: Generating Reports. (line 6)
* requiring certain fields in records: Mandatory Fields. (line 6)
* restricting fields from records: Prohibited Fields. (line 6)
* restricting fields from records <1>: Allowed Fields. (line 6)
* restricting values of fields: String Field Types. (line 33)
* restricting values of fields <1>: Arbitrary Constraints.
(line 6)
* retrieving data: Selecting by predicate.
(line 6)
* Salz, Rich: Authors of parse_datetime.
(line 6)
* selecting records: Selecting by predicate.
(line 6)
* selecting records <1>: Invoking recsel. (line 6)
* selection expressions: Selection Expressions.
(line 6)
* selection expressions <1>: Selecting by predicate.
(line 20)
* shell: Bash Builtins. (line 6)
* size, field size: String Field Types. (line 19)
* size, record size: Records. (line 20)
* size, record size <1>: Size Constraints. (line 6)
* size, type description: String Field Types. (line 19)
* sorting: Sorted Output. (line 6)
* sorting <1>: Sorting Records. (line 6)
* sorting <2>: Invoking recsel. (line 40)
* sorting <3>: Invoking recfix. (line 30)
* sorting, physically: Sorting Records. (line 6)
* special fields: Record Sets Properties.
(line 6)
* special fields <1>: Semantic Errors. (line 6)
* special fields, list of: Record Sets Properties.
(line 38)
* spots: Generating Reports. (line 55)
* string operators: SEX Operators. (line 68)
* strings: String Field Types. (line 6)
* subscripts, in selection expressions: SEX Operands. (line 79)
* templates: Generating Reports. (line 32)
* templates <1>: Templates. (line 6)
* time of day item: Time of day items. (line 6)
* time zone correction: Date and Time Types. (line 11)
* time zone correction <1>: Time of day items. (line 29)
* time zone item: General date syntax. (line 40)
* time zone item <1>: Time zone items. (line 6)
* time, fields containing time values: Date and Time Types. (line 6)
* timestamps: Time-Stamps. (line 6)
* types: Types and Fields. (line 6)
* unique fields: Keys and Unique Fields.
(line 15)
* unique identifiers: Unique Identifiers. (line 6)
* URL: Remote Descriptors. (line 53)
* UUID: Other Field Types. (line 16)
* uuid: Unique Identifiers. (line 6)
Tag Table:
Node: Top1413
Node: Introduction7569
Node: Purpose7793
Ref: Purpose-Footnote-110820
Node: A Little Example10856
Node: The Rec Format13522
Node: Fields14124
Node: Records15906
Node: Comments16947
Node: Record Descriptors18338
Node: Record Sets18950
Node: Naming Record Types21795
Node: Documenting Records22781
Node: Record Sets Properties24155
Node: Querying Recfiles26455
Node: Simple Selections27521
Node: Selecting by Type31623
Node: Selecting by Position33547
Node: Random Records35284
Node: Selection Expressions36540
Node: Selecting by predicate37477
Node: SEX Operands41115
Node: SEX Operators43958
Node: SEX Evaluation46725
Node: Field Expressions48229
Node: Sorted Output50760
Node: Editing Records54574
Node: Inserting Records55439
Node: Adding Records With recins56790
Node: Replacing Records With recins58744
Node: Adding Anonymous Records59826
Node: Deleting Records61144
Ref: Deleting Records-Footnote-162755
Node: Sorting Records62919
Node: Editing Fields63788
Node: Adding Fields64881
Node: Setting Fields66073
Node: Deleting Fields67072
Node: Renaming Fields67819
Node: Field Types68334
Node: Declaring Types69796
Node: Types and Fields71887
Node: Scalar Field Types72933
Node: String Field Types75088
Node: Enumerated Field Types77028
Node: Date and Time Types78378
Node: Other Field Types79362
Node: Constraints on Record Sets80689
Node: Mandatory Fields81876
Node: Prohibited Fields83778
Node: Allowed Fields85186
Node: Keys and Unique Fields86014
Node: Size Constraints88414
Node: Arbitrary Constraints89559
Node: Checking Recfiles91433
Node: Syntactical Errors92125
Node: Semantic Errors92999
Node: Remote Descriptors94278
Node: Grouping and Aggregates97240
Node: Grouping Records97689
Node: Aggregate Functions100499
Node: Queries which Join Records104649
Node: Foreign Keys106698
Node: Joining Records108987
Node: Auto-Generated Fields112264
Node: Counters115287
Node: Unique Identifiers116346
Node: Time-Stamps117644
Node: Encryption118113
Node: Confidential Fields119086
Node: Encrypting Files122565
Node: Decrypting Data123624
Node: Generating Reports126337
Node: Templates128875
Node: Interoperability130223
Node: CSV Files130740
Node: Importing MDB Files132920
Node: Bash Builtins135226
Node: readrec136637
Node: Invoking the Utilities139254
Ref: Common Options139570
Node: Invoking recinf140479
Node: Invoking recsel141793
Node: Invoking recins146108
Node: Invoking recdel149151
Node: Invoking recset151119
Node: Invoking recfix153938
Node: Invoking recfmt156030
Node: Invoking csv2rec156642
Node: Invoking rec2csv157360
Node: Invoking mdb2rec158501
Node: Regular Expressions159268
Node: Date input formats161519
Node: General date syntax163956
Node: Calendar date items166940
Node: Time of day items168944
Node: Time zone items171147
Node: Combined date and time of day items172404
Node: Day of week items173266
Node: Relative items in date strings174281
Node: Pure numbers in date strings177090
Node: Seconds since the Epoch178078
Node: Specifying time zone rules179705
Node: Authors of parse_datetime182085
Ref: Authors of get_date182271
Node: GNU Free Documentation License183234
Node: Concept Index208397
End Tag Table
recutils-1.8/doc/version-rec-mode.texi 0000644 0000000 0000000 00000000136 13413353273 014725 0000000 0000000 @set UPDATED 3 January 2019
@set UPDATED-MONTH January 2019
@set EDITION 1.8
@set VERSION 1.8
recutils-1.8/doc/texinfo.tex 0000644 0000000 0000000 00001070111 13253164723 013055 0000000 0000000 % texinfo.tex -- TeX macros to handle Texinfo files.
%
% Load plain if necessary, i.e., if running under initex.
\expandafter\ifx\csname fmtname\endcsname\relax\input plain\fi
%
\def\texinfoversion{2008-11-17.21}
%
% Copyright (C) 1985, 1986, 1988, 1990, 1991, 1992, 1993, 1994, 1995,
% 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006,
% 2007, 2008 Free Software Foundation, Inc.
%
% This texinfo.tex file is free software: you can redistribute it and/or
% modify it under the terms of the GNU General Public License as
% published by the Free Software Foundation, either version 3 of the
% License, or (at your option) any later version.
%
% This texinfo.tex file is distributed in the hope that it will be
% useful, but WITHOUT ANY WARRANTY; without even the implied warranty
% of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
% General Public License for more details.
%
% You should have received a copy of the GNU General Public License
% along with this program. If not, see .
%
% As a special exception, when this file is read by TeX when processing
% a Texinfo source document, you may use the result without
% restriction. (This has been our intent since Texinfo was invented.)
%
% Please try the latest version of texinfo.tex before submitting bug
% reports; you can get the latest version from:
% http://www.gnu.org/software/texinfo/ (the Texinfo home page), or
% ftp://tug.org/tex/texinfo.tex
% (and all CTAN mirrors, see http://www.ctan.org).
% The texinfo.tex in any given distribution could well be out
% of date, so if that's what you're using, please check.
%
% Send bug reports to bug-texinfo@gnu.org. Please include including a
% complete document in each bug report with which we can reproduce the
% problem. Patches are, of course, greatly appreciated.
%
% To process a Texinfo manual with TeX, it's most reliable to use the
% texi2dvi shell script that comes with the distribution. For a simple
% manual foo.texi, however, you can get away with this:
% tex foo.texi
% texindex foo.??
% tex foo.texi
% tex foo.texi
% dvips foo.dvi -o # or whatever; this makes foo.ps.
% The extra TeX runs get the cross-reference information correct.
% Sometimes one run after texindex suffices, and sometimes you need more
% than two; texi2dvi does it as many times as necessary.
%
% It is possible to adapt texinfo.tex for other languages, to some
% extent. You can get the existing language-specific files from the
% full Texinfo distribution.
%
% The GNU Texinfo home page is http://www.gnu.org/software/texinfo.
\message{Loading texinfo [version \texinfoversion]:}
% If in a .fmt file, print the version number
% and turn on active characters that we couldn't do earlier because
% they might have appeared in the input file name.
\everyjob{\message{[Texinfo version \texinfoversion]}%
\catcode`+=\active \catcode`\_=\active}
\chardef\other=12
% We never want plain's \outer definition of \+ in Texinfo.
% For @tex, we can use \tabalign.
\let\+ = \relax
% Save some plain tex macros whose names we will redefine.
\let\ptexb=\b
\let\ptexbullet=\bullet
\let\ptexc=\c
\let\ptexcomma=\,
\let\ptexdot=\.
\let\ptexdots=\dots
\let\ptexend=\end
\let\ptexequiv=\equiv
\let\ptexexclam=\!
\let\ptexfootnote=\footnote
\let\ptexgtr=>
\let\ptexhat=^
\let\ptexi=\i
\let\ptexindent=\indent
\let\ptexinsert=\insert
\let\ptexlbrace=\{
\let\ptexless=<
\let\ptexnewwrite\newwrite
\let\ptexnoindent=\noindent
\let\ptexplus=+
\let\ptexrbrace=\}
\let\ptexslash=\/
\let\ptexstar=\*
\let\ptext=\t
\let\ptextop=\top
{\catcode`\'=\active
\global\let\ptexquoteright'}% Math-mode def from plain.tex.
% If this character appears in an error message or help string, it
% starts a new line in the output.
\newlinechar = `^^J
% Use TeX 3.0's \inputlineno to get the line number, for better error
% messages, but if we're using an old version of TeX, don't do anything.
%
\ifx\inputlineno\thisisundefined
\let\linenumber = \empty % Pre-3.0.
\else
\def\linenumber{l.\the\inputlineno:\space}
\fi
% Set up fixed words for English if not already set.
\ifx\putwordAppendix\undefined \gdef\putwordAppendix{Appendix}\fi
\ifx\putwordChapter\undefined \gdef\putwordChapter{Chapter}\fi
\ifx\putwordfile\undefined \gdef\putwordfile{file}\fi
\ifx\putwordin\undefined \gdef\putwordin{in}\fi
\ifx\putwordIndexIsEmpty\undefined \gdef\putwordIndexIsEmpty{(Index is empty)}\fi
\ifx\putwordIndexNonexistent\undefined \gdef\putwordIndexNonexistent{(Index is nonexistent)}\fi
\ifx\putwordInfo\undefined \gdef\putwordInfo{Info}\fi
\ifx\putwordInstanceVariableof\undefined \gdef\putwordInstanceVariableof{Instance Variable of}\fi
\ifx\putwordMethodon\undefined \gdef\putwordMethodon{Method on}\fi
\ifx\putwordNoTitle\undefined \gdef\putwordNoTitle{No Title}\fi
\ifx\putwordof\undefined \gdef\putwordof{of}\fi
\ifx\putwordon\undefined \gdef\putwordon{on}\fi
\ifx\putwordpage\undefined \gdef\putwordpage{page}\fi
\ifx\putwordsection\undefined \gdef\putwordsection{section}\fi
\ifx\putwordSection\undefined \gdef\putwordSection{Section}\fi
\ifx\putwordsee\undefined \gdef\putwordsee{see}\fi
\ifx\putwordSee\undefined \gdef\putwordSee{See}\fi
\ifx\putwordShortTOC\undefined \gdef\putwordShortTOC{Short Contents}\fi
\ifx\putwordTOC\undefined \gdef\putwordTOC{Table of Contents}\fi
%
\ifx\putwordMJan\undefined \gdef\putwordMJan{January}\fi
\ifx\putwordMFeb\undefined \gdef\putwordMFeb{February}\fi
\ifx\putwordMMar\undefined \gdef\putwordMMar{March}\fi
\ifx\putwordMApr\undefined \gdef\putwordMApr{April}\fi
\ifx\putwordMMay\undefined \gdef\putwordMMay{May}\fi
\ifx\putwordMJun\undefined \gdef\putwordMJun{June}\fi
\ifx\putwordMJul\undefined \gdef\putwordMJul{July}\fi
\ifx\putwordMAug\undefined \gdef\putwordMAug{August}\fi
\ifx\putwordMSep\undefined \gdef\putwordMSep{September}\fi
\ifx\putwordMOct\undefined \gdef\putwordMOct{October}\fi
\ifx\putwordMNov\undefined \gdef\putwordMNov{November}\fi
\ifx\putwordMDec\undefined \gdef\putwordMDec{December}\fi
%
\ifx\putwordDefmac\undefined \gdef\putwordDefmac{Macro}\fi
\ifx\putwordDefspec\undefined \gdef\putwordDefspec{Special Form}\fi
\ifx\putwordDefvar\undefined \gdef\putwordDefvar{Variable}\fi
\ifx\putwordDefopt\undefined \gdef\putwordDefopt{User Option}\fi
\ifx\putwordDeffunc\undefined \gdef\putwordDeffunc{Function}\fi
% Since the category of space is not known, we have to be careful.
\chardef\spacecat = 10
\def\spaceisspace{\catcode`\ =\spacecat}
% sometimes characters are active, so we need control sequences.
\chardef\colonChar = `\:
\chardef\commaChar = `\,
\chardef\dashChar = `\-
\chardef\dotChar = `\.
\chardef\exclamChar= `\!
\chardef\lquoteChar= `\`
\chardef\questChar = `\?
\chardef\rquoteChar= `\'
\chardef\semiChar = `\;
\chardef\underChar = `\_
% Ignore a token.
%
\def\gobble#1{}
% The following is used inside several \edef's.
\def\makecsname#1{\expandafter\noexpand\csname#1\endcsname}
% Hyphenation fixes.
\hyphenation{
Flor-i-da Ghost-script Ghost-view Mac-OS Post-Script
ap-pen-dix bit-map bit-maps
data-base data-bases eshell fall-ing half-way long-est man-u-script
man-u-scripts mini-buf-fer mini-buf-fers over-view par-a-digm
par-a-digms rath-er rec-tan-gu-lar ro-bot-ics se-vere-ly set-up spa-ces
spell-ing spell-ings
stand-alone strong-est time-stamp time-stamps which-ever white-space
wide-spread wrap-around
}
% Margin to add to right of even pages, to left of odd pages.
\newdimen\bindingoffset
\newdimen\normaloffset
\newdimen\pagewidth \newdimen\pageheight
% For a final copy, take out the rectangles
% that mark overfull boxes (in case you have decided
% that the text looks ok even though it passes the margin).
%
\def\finalout{\overfullrule=0pt}
% @| inserts a changebar to the left of the current line. It should
% surround any changed text. This approach does *not* work if the
% change spans more than two lines of output. To handle that, we would
% have adopt a much more difficult approach (putting marks into the main
% vertical list for the beginning and end of each change).
%
\def\|{%
% \vadjust can only be used in horizontal mode.
\leavevmode
%
% Append this vertical mode material after the current line in the output.
\vadjust{%
% We want to insert a rule with the height and depth of the current
% leading; that is exactly what \strutbox is supposed to record.
\vskip-\baselineskip
%
% \vadjust-items are inserted at the left edge of the type. So
% the \llap here moves out into the left-hand margin.
\llap{%
%
% For a thicker or thinner bar, change the `1pt'.
\vrule height\baselineskip width1pt
%
% This is the space between the bar and the text.
\hskip 12pt
}%
}%
}
% Sometimes it is convenient to have everything in the transcript file
% and nothing on the terminal. We don't just call \tracingall here,
% since that produces some useless output on the terminal. We also make
% some effort to order the tracing commands to reduce output in the log
% file; cf. trace.sty in LaTeX.
%
\def\gloggingall{\begingroup \globaldefs = 1 \loggingall \endgroup}%
\def\loggingall{%
\tracingstats2
\tracingpages1
\tracinglostchars2 % 2 gives us more in etex
\tracingparagraphs1
\tracingoutput1
\tracingmacros2
\tracingrestores1
\showboxbreadth\maxdimen \showboxdepth\maxdimen
\ifx\eTeXversion\undefined\else % etex gives us more logging
\tracingscantokens1
\tracingifs1
\tracinggroups1
\tracingnesting2
\tracingassigns1
\fi
\tracingcommands3 % 3 gives us more in etex
\errorcontextlines16
}%
% add check for \lastpenalty to plain's definitions. If the last thing
% we did was a \nobreak, we don't want to insert more space.
%
\def\smallbreak{\ifnum\lastpenalty<10000\par\ifdim\lastskip<\smallskipamount
\removelastskip\penalty-50\smallskip\fi\fi}
\def\medbreak{\ifnum\lastpenalty<10000\par\ifdim\lastskip<\medskipamount
\removelastskip\penalty-100\medskip\fi\fi}
\def\bigbreak{\ifnum\lastpenalty<10000\par\ifdim\lastskip<\bigskipamount
\removelastskip\penalty-200\bigskip\fi\fi}
% For @cropmarks command.
% Do @cropmarks to get crop marks.
%
\newif\ifcropmarks
\let\cropmarks = \cropmarkstrue
%
% Dimensions to add cropmarks at corners.
% Added by P. A. MacKay, 12 Nov. 1986
%
\newdimen\outerhsize \newdimen\outervsize % set by the paper size routines
\newdimen\cornerlong \cornerlong=1pc
\newdimen\cornerthick \cornerthick=.3pt
\newdimen\topandbottommargin \topandbottommargin=.75in
% Output a mark which sets \thischapter, \thissection and \thiscolor.
% We dump everything together because we only have one kind of mark.
% This works because we only use \botmark / \topmark, not \firstmark.
%
% A mark contains a subexpression of the \ifcase ... \fi construct.
% \get*marks macros below extract the needed part using \ifcase.
%
% Another complication is to let the user choose whether \thischapter
% (\thissection) refers to the chapter (section) in effect at the top
% of a page, or that at the bottom of a page. The solution is
% described on page 260 of The TeXbook. It involves outputting two
% marks for the sectioning macros, one before the section break, and
% one after. I won't pretend I can describe this better than DEK...
\def\domark{%
\toks0=\expandafter{\lastchapterdefs}%
\toks2=\expandafter{\lastsectiondefs}%
\toks4=\expandafter{\prevchapterdefs}%
\toks6=\expandafter{\prevsectiondefs}%
\toks8=\expandafter{\lastcolordefs}%
\mark{%
\the\toks0 \the\toks2
\noexpand\or \the\toks4 \the\toks6
\noexpand\else \the\toks8
}%
}
% \topmark doesn't work for the very first chapter (after the title
% page or the contents), so we use \firstmark there -- this gets us
% the mark with the chapter defs, unless the user sneaks in, e.g.,
% @setcolor (or @url, or @link, etc.) between @contents and the very
% first @chapter.
\def\gettopheadingmarks{%
\ifcase0\topmark\fi
\ifx\thischapter\empty \ifcase0\firstmark\fi \fi
}
\def\getbottomheadingmarks{\ifcase1\botmark\fi}
\def\getcolormarks{\ifcase2\topmark\fi}
% Avoid "undefined control sequence" errors.
\def\lastchapterdefs{}
\def\lastsectiondefs{}
\def\prevchapterdefs{}
\def\prevsectiondefs{}
\def\lastcolordefs{}
% Main output routine.
\chardef\PAGE = 255
\output = {\onepageout{\pagecontents\PAGE}}
\newbox\headlinebox
\newbox\footlinebox
% \onepageout takes a vbox as an argument. Note that \pagecontents
% does insertions, but you have to call it yourself.
\def\onepageout#1{%
\ifcropmarks \hoffset=0pt \else \hoffset=\normaloffset \fi
%
\ifodd\pageno \advance\hoffset by \bindingoffset
\else \advance\hoffset by -\bindingoffset\fi
%
% Do this outside of the \shipout so @code etc. will be expanded in
% the headline as they should be, not taken literally (outputting ''code).
\ifodd\pageno \getoddheadingmarks \else \getevenheadingmarks \fi
\setbox\headlinebox = \vbox{\let\hsize=\pagewidth \makeheadline}%
\ifodd\pageno \getoddfootingmarks \else \getevenfootingmarks \fi
\setbox\footlinebox = \vbox{\let\hsize=\pagewidth \makefootline}%
%
{%
% Have to do this stuff outside the \shipout because we want it to
% take effect in \write's, yet the group defined by the \vbox ends
% before the \shipout runs.
%
\indexdummies % don't expand commands in the output.
\normalturnoffactive % \ in index entries must not stay \, e.g., if
% the page break happens to be in the middle of an example.
% We don't want .vr (or whatever) entries like this:
% \entry{{\tt \indexbackslash }acronym}{32}{\code {\acronym}}
% "\acronym" won't work when it's read back in;
% it needs to be
% {\code {{\tt \backslashcurfont }acronym}
\shipout\vbox{%
% Do this early so pdf references go to the beginning of the page.
\ifpdfmakepagedest \pdfdest name{\the\pageno} xyz\fi
%
\ifcropmarks \vbox to \outervsize\bgroup
\hsize = \outerhsize
\vskip-\topandbottommargin
\vtop to0pt{%
\line{\ewtop\hfil\ewtop}%
\nointerlineskip
\line{%
\vbox{\moveleft\cornerthick\nstop}%
\hfill
\vbox{\moveright\cornerthick\nstop}%
}%
\vss}%
\vskip\topandbottommargin
\line\bgroup
\hfil % center the page within the outer (page) hsize.
\ifodd\pageno\hskip\bindingoffset\fi
\vbox\bgroup
\fi
%
\unvbox\headlinebox
\pagebody{#1}%
\ifdim\ht\footlinebox > 0pt
% Only leave this space if the footline is nonempty.
% (We lessened \vsize for it in \oddfootingyyy.)
% The \baselineskip=24pt in plain's \makefootline has no effect.
\vskip 24pt
\unvbox\footlinebox
\fi
%
\ifcropmarks
\egroup % end of \vbox\bgroup
\hfil\egroup % end of (centering) \line\bgroup
\vskip\topandbottommargin plus1fill minus1fill
\boxmaxdepth = \cornerthick
\vbox to0pt{\vss
\line{%
\vbox{\moveleft\cornerthick\nsbot}%
\hfill
\vbox{\moveright\cornerthick\nsbot}%
}%
\nointerlineskip
\line{\ewbot\hfil\ewbot}%
}%
\egroup % \vbox from first cropmarks clause
\fi
}% end of \shipout\vbox
}% end of group with \indexdummies
\advancepageno
\ifnum\outputpenalty>-20000 \else\dosupereject\fi
}
\newinsert\margin \dimen\margin=\maxdimen
\def\pagebody#1{\vbox to\pageheight{\boxmaxdepth=\maxdepth #1}}
{\catcode`\@ =11
\gdef\pagecontents#1{\ifvoid\topins\else\unvbox\topins\fi
% marginal hacks, juha@viisa.uucp (Juha Takala)
\ifvoid\margin\else % marginal info is present
\rlap{\kern\hsize\vbox to\z@{\kern1pt\box\margin \vss}}\fi
\dimen@=\dp#1\relax \unvbox#1\relax
\ifvoid\footins\else\vskip\skip\footins\footnoterule \unvbox\footins\fi
\ifr@ggedbottom \kern-\dimen@ \vfil \fi}
}
% Here are the rules for the cropmarks. Note that they are
% offset so that the space between them is truly \outerhsize or \outervsize
% (P. A. MacKay, 12 November, 1986)
%
\def\ewtop{\vrule height\cornerthick depth0pt width\cornerlong}
\def\nstop{\vbox
{\hrule height\cornerthick depth\cornerlong width\cornerthick}}
\def\ewbot{\vrule height0pt depth\cornerthick width\cornerlong}
\def\nsbot{\vbox
{\hrule height\cornerlong depth\cornerthick width\cornerthick}}
% Parse an argument, then pass it to #1. The argument is the rest of
% the input line (except we remove a trailing comment). #1 should be a
% macro which expects an ordinary undelimited TeX argument.
%
\def\parsearg{\parseargusing{}}
\def\parseargusing#1#2{%
\def\argtorun{#2}%
\begingroup
\obeylines
\spaceisspace
#1%
\parseargline\empty% Insert the \empty token, see \finishparsearg below.
}
{\obeylines %
\gdef\parseargline#1^^M{%
\endgroup % End of the group started in \parsearg.
\argremovecomment #1\comment\ArgTerm%
}%
}
% First remove any @comment, then any @c comment.
\def\argremovecomment#1\comment#2\ArgTerm{\argremovec #1\c\ArgTerm}
\def\argremovec#1\c#2\ArgTerm{\argcheckspaces#1\^^M\ArgTerm}
% Each occurrence of `\^^M' or `\^^M' is replaced by a single space.
%
% \argremovec might leave us with trailing space, e.g.,
% @end itemize @c foo
% This space token undergoes the same procedure and is eventually removed
% by \finishparsearg.
%
\def\argcheckspaces#1\^^M{\argcheckspacesX#1\^^M \^^M}
\def\argcheckspacesX#1 \^^M{\argcheckspacesY#1\^^M}
\def\argcheckspacesY#1\^^M#2\^^M#3\ArgTerm{%
\def\temp{#3}%
\ifx\temp\empty
% Do not use \next, perhaps the caller of \parsearg uses it; reuse \temp:
\let\temp\finishparsearg
\else
\let\temp\argcheckspaces
\fi
% Put the space token in:
\temp#1 #3\ArgTerm
}
% If a _delimited_ argument is enclosed in braces, they get stripped; so
% to get _exactly_ the rest of the line, we had to prevent such situation.
% We prepended an \empty token at the very beginning and we expand it now,
% just before passing the control to \argtorun.
% (Similarly, we have to think about #3 of \argcheckspacesY above: it is
% either the null string, or it ends with \^^M---thus there is no danger
% that a pair of braces would be stripped.
%
% But first, we have to remove the trailing space token.
%
\def\finishparsearg#1 \ArgTerm{\expandafter\argtorun\expandafter{#1}}
% \parseargdef\foo{...}
% is roughly equivalent to
% \def\foo{\parsearg\Xfoo}
% \def\Xfoo#1{...}
%
% Actually, I use \csname\string\foo\endcsname, ie. \\foo, as it is my
% favourite TeX trick. --kasal, 16nov03
\def\parseargdef#1{%
\expandafter \doparseargdef \csname\string#1\endcsname #1%
}
\def\doparseargdef#1#2{%
\def#2{\parsearg#1}%
\def#1##1%
}
% Several utility definitions with active space:
{
\obeyspaces
\gdef\obeyedspace{ }
% Make each space character in the input produce a normal interword
% space in the output. Don't allow a line break at this space, as this
% is used only in environments like @example, where each line of input
% should produce a line of output anyway.
%
\gdef\sepspaces{\obeyspaces\let =\tie}
% If an index command is used in an @example environment, any spaces
% therein should become regular spaces in the raw index file, not the
% expansion of \tie (\leavevmode \penalty \@M \ ).
\gdef\unsepspaces{\let =\space}
}
\def\flushcr{\ifx\par\lisppar \def\next##1{}\else \let\next=\relax \fi \next}
% Define the framework for environments in texinfo.tex. It's used like this:
%
% \envdef\foo{...}
% \def\Efoo{...}
%
% It's the responsibility of \envdef to insert \begingroup before the
% actual body; @end closes the group after calling \Efoo. \envdef also
% defines \thisenv, so the current environment is known; @end checks
% whether the environment name matches. The \checkenv macro can also be
% used to check whether the current environment is the one expected.
%
% Non-false conditionals (@iftex, @ifset) don't fit into this, so they
% are not treated as environments; they don't open a group. (The
% implementation of @end takes care not to call \endgroup in this
% special case.)
% At run-time, environments start with this:
\def\startenvironment#1{\begingroup\def\thisenv{#1}}
% initialize
\let\thisenv\empty
% ... but they get defined via ``\envdef\foo{...}'':
\long\def\envdef#1#2{\def#1{\startenvironment#1#2}}
\def\envparseargdef#1#2{\parseargdef#1{\startenvironment#1#2}}
% Check whether we're in the right environment:
\def\checkenv#1{%
\def\temp{#1}%
\ifx\thisenv\temp
\else
\badenverr
\fi
}
% Environment mismatch, #1 expected:
\def\badenverr{%
\errhelp = \EMsimple
\errmessage{This command can appear only \inenvironment\temp,
not \inenvironment\thisenv}%
}
\def\inenvironment#1{%
\ifx#1\empty
out of any environment%
\else
in environment \expandafter\string#1%
\fi
}
% @end foo executes the definition of \Efoo.
% But first, it executes a specialized version of \checkenv
%
\parseargdef\end{%
\if 1\csname iscond.#1\endcsname
\else
% The general wording of \badenverr may not be ideal, but... --kasal, 06nov03
\expandafter\checkenv\csname#1\endcsname
\csname E#1\endcsname
\endgroup
\fi
}
\newhelp\EMsimple{Press RETURN to continue.}
%% Simple single-character @ commands
% @@ prints an @
% Kludge this until the fonts are right (grr).
\def\@{{\tt\char64}}
% This is turned off because it was never documented
% and you can use @w{...} around a quote to suppress ligatures.
%% Define @` and @' to be the same as ` and '
%% but suppressing ligatures.
%\def\`{{`}}
%\def\'{{'}}
% Used to generate quoted braces.
\def\mylbrace {{\tt\char123}}
\def\myrbrace {{\tt\char125}}
\let\{=\mylbrace
\let\}=\myrbrace
\begingroup
% Definitions to produce \{ and \} commands for indices,
% and @{ and @} for the aux/toc files.
\catcode`\{ = \other \catcode`\} = \other
\catcode`\[ = 1 \catcode`\] = 2
\catcode`\! = 0 \catcode`\\ = \other
!gdef!lbracecmd[\{]%
!gdef!rbracecmd[\}]%
!gdef!lbraceatcmd[@{]%
!gdef!rbraceatcmd[@}]%
!endgroup
% @comma{} to avoid , parsing problems.
\let\comma = ,
% Accents: @, @dotaccent @ringaccent @ubaraccent @udotaccent
% Others are defined by plain TeX: @` @' @" @^ @~ @= @u @v @H.
\let\, = \c
\let\dotaccent = \.
\def\ringaccent#1{{\accent23 #1}}
\let\tieaccent = \t
\let\ubaraccent = \b
\let\udotaccent = \d
% Other special characters: @questiondown @exclamdown @ordf @ordm
% Plain TeX defines: @AA @AE @O @OE @L (plus lowercase versions) @ss.
\def\questiondown{?`}
\def\exclamdown{!`}
\def\ordf{\leavevmode\raise1ex\hbox{\selectfonts\lllsize \underbar{a}}}
\def\ordm{\leavevmode\raise1ex\hbox{\selectfonts\lllsize \underbar{o}}}
% Dotless i and dotless j, used for accents.
\def\imacro{i}
\def\jmacro{j}
\def\dotless#1{%
\def\temp{#1}%
\ifx\temp\imacro \ifmmode\imath \else\ptexi \fi
\else\ifx\temp\jmacro \ifmmode\jmath \else\j \fi
\else \errmessage{@dotless can be used only with i or j}%
\fi\fi
}
% The \TeX{} logo, as in plain, but resetting the spacing so that a
% period following counts as ending a sentence. (Idea found in latex.)
%
\edef\TeX{\TeX \spacefactor=1000 }
% @LaTeX{} logo. Not quite the same results as the definition in
% latex.ltx, since we use a different font for the raised A; it's most
% convenient for us to use an explicitly smaller font, rather than using
% the \scriptstyle font (since we don't reset \scriptstyle and
% \scriptscriptstyle).
%
\def\LaTeX{%
L\kern-.36em
{\setbox0=\hbox{T}%
\vbox to \ht0{\hbox{\selectfonts\lllsize A}\vss}}%
\kern-.15em
\TeX
}
% Be sure we're in horizontal mode when doing a tie, since we make space
% equivalent to this in @example-like environments. Otherwise, a space
% at the beginning of a line will start with \penalty -- and
% since \penalty is valid in vertical mode, we'd end up putting the
% penalty on the vertical list instead of in the new paragraph.
{\catcode`@ = 11
% Avoid using \@M directly, because that causes trouble
% if the definition is written into an index file.
\global\let\tiepenalty = \@M
\gdef\tie{\leavevmode\penalty\tiepenalty\ }
}
% @: forces normal size whitespace following.
\def\:{\spacefactor=1000 }
% @* forces a line break.
\def\*{\hfil\break\hbox{}\ignorespaces}
% @/ allows a line break.
\let\/=\allowbreak
% @. is an end-of-sentence period.
\def\.{.\spacefactor=\endofsentencespacefactor\space}
% @! is an end-of-sentence bang.
\def\!{!\spacefactor=\endofsentencespacefactor\space}
% @? is an end-of-sentence query.
\def\?{?\spacefactor=\endofsentencespacefactor\space}
% @frenchspacing on|off says whether to put extra space after punctuation.
%
\def\onword{on}
\def\offword{off}
%
\parseargdef\frenchspacing{%
\def\temp{#1}%
\ifx\temp\onword \plainfrenchspacing
\else\ifx\temp\offword \plainnonfrenchspacing
\else
\errhelp = \EMsimple
\errmessage{Unknown @frenchspacing option `\temp', must be on/off}%
\fi\fi
}
% @w prevents a word break. Without the \leavevmode, @w at the
% beginning of a paragraph, when TeX is still in vertical mode, would
% produce a whole line of output instead of starting the paragraph.
\def\w#1{\leavevmode\hbox{#1}}
% @group ... @end group forces ... to be all on one page, by enclosing
% it in a TeX vbox. We use \vtop instead of \vbox to construct the box
% to keep its height that of a normal line. According to the rules for
% \topskip (p.114 of the TeXbook), the glue inserted is
% max (\topskip - \ht (first item), 0). If that height is large,
% therefore, no glue is inserted, and the space between the headline and
% the text is small, which looks bad.
%
% Another complication is that the group might be very large. This can
% cause the glue on the previous page to be unduly stretched, because it
% does not have much material. In this case, it's better to add an
% explicit \vfill so that the extra space is at the bottom. The
% threshold for doing this is if the group is more than \vfilllimit
% percent of a page (\vfilllimit can be changed inside of @tex).
%
\newbox\groupbox
\def\vfilllimit{0.7}
%
\envdef\group{%
\ifnum\catcode`\^^M=\active \else
\errhelp = \groupinvalidhelp
\errmessage{@group invalid in context where filling is enabled}%
\fi
\startsavinginserts
%
\setbox\groupbox = \vtop\bgroup
% Do @comment since we are called inside an environment such as
% @example, where each end-of-line in the input causes an
% end-of-line in the output. We don't want the end-of-line after
% the `@group' to put extra space in the output. Since @group
% should appear on a line by itself (according to the Texinfo
% manual), we don't worry about eating any user text.
\comment
}
%
% The \vtop produces a box with normal height and large depth; thus, TeX puts
% \baselineskip glue before it, and (when the next line of text is done)
% \lineskip glue after it. Thus, space below is not quite equal to space
% above. But it's pretty close.
\def\Egroup{%
% To get correct interline space between the last line of the group
% and the first line afterwards, we have to propagate \prevdepth.
\endgraf % Not \par, as it may have been set to \lisppar.
\global\dimen1 = \prevdepth
\egroup % End the \vtop.
% \dimen0 is the vertical size of the group's box.
\dimen0 = \ht\groupbox \advance\dimen0 by \dp\groupbox
% \dimen2 is how much space is left on the page (more or less).
\dimen2 = \pageheight \advance\dimen2 by -\pagetotal
% if the group doesn't fit on the current page, and it's a big big
% group, force a page break.
\ifdim \dimen0 > \dimen2
\ifdim \pagetotal < \vfilllimit\pageheight
\page
\fi
\fi
\box\groupbox
\prevdepth = \dimen1
\checkinserts
}
%
% TeX puts in an \escapechar (i.e., `@') at the beginning of the help
% message, so this ends up printing `@group can only ...'.
%
\newhelp\groupinvalidhelp{%
group can only be used in environments such as @example,^^J%
where each line of input produces a line of output.}
% @need space-in-mils
% forces a page break if there is not space-in-mils remaining.
\newdimen\mil \mil=0.001in
% Old definition--didn't work.
%\parseargdef\need{\par %
%% This method tries to make TeX break the page naturally
%% if the depth of the box does not fit.
%{\baselineskip=0pt%
%\vtop to #1\mil{\vfil}\kern -#1\mil\nobreak
%\prevdepth=-1000pt
%}}
\parseargdef\need{%
% Ensure vertical mode, so we don't make a big box in the middle of a
% paragraph.
\par
%
% If the @need value is less than one line space, it's useless.
\dimen0 = #1\mil
\dimen2 = \ht\strutbox
\advance\dimen2 by \dp\strutbox
\ifdim\dimen0 > \dimen2
%
% Do a \strut just to make the height of this box be normal, so the
% normal leading is inserted relative to the preceding line.
% And a page break here is fine.
\vtop to #1\mil{\strut\vfil}%
%
% TeX does not even consider page breaks if a penalty added to the
% main vertical list is 10000 or more. But in order to see if the
% empty box we just added fits on the page, we must make it consider
% page breaks. On the other hand, we don't want to actually break the
% page after the empty box. So we use a penalty of 9999.
%
% There is an extremely small chance that TeX will actually break the
% page at this \penalty, if there are no other feasible breakpoints in
% sight. (If the user is using lots of big @group commands, which
% almost-but-not-quite fill up a page, TeX will have a hard time doing
% good page breaking, for example.) However, I could not construct an
% example where a page broke at this \penalty; if it happens in a real
% document, then we can reconsider our strategy.
\penalty9999
%
% Back up by the size of the box, whether we did a page break or not.
\kern -#1\mil
%
% Do not allow a page break right after this kern.
\nobreak
\fi
}
% @br forces paragraph break (and is undocumented).
\let\br = \par
% @page forces the start of a new page.
%
\def\page{\par\vfill\supereject}
% @exdent text....
% outputs text on separate line in roman font, starting at standard page margin
% This records the amount of indent in the innermost environment.
% That's how much \exdent should take out.
\newskip\exdentamount
% This defn is used inside fill environments such as @defun.
\parseargdef\exdent{\hfil\break\hbox{\kern -\exdentamount{\rm#1}}\hfil\break}
% This defn is used inside nofill environments such as @example.
\parseargdef\nofillexdent{{\advance \leftskip by -\exdentamount
\leftline{\hskip\leftskip{\rm#1}}}}
% @inmargin{WHICH}{TEXT} puts TEXT in the WHICH margin next to the current
% paragraph. For more general purposes, use the \margin insertion
% class. WHICH is `l' or `r'.
%
\newskip\inmarginspacing \inmarginspacing=1cm
\def\strutdepth{\dp\strutbox}
%
\def\doinmargin#1#2{\strut\vadjust{%
\nobreak
\kern-\strutdepth
\vtop to \strutdepth{%
\baselineskip=\strutdepth
\vss
% if you have multiple lines of stuff to put here, you'll need to
% make the vbox yourself of the appropriate size.
\ifx#1l%
\llap{\ignorespaces #2\hskip\inmarginspacing}%
\else
\rlap{\hskip\hsize \hskip\inmarginspacing \ignorespaces #2}%
\fi
\null
}%
}}
\def\inleftmargin{\doinmargin l}
\def\inrightmargin{\doinmargin r}
%
% @inmargin{TEXT [, RIGHT-TEXT]}
% (if RIGHT-TEXT is given, use TEXT for left page, RIGHT-TEXT for right;
% else use TEXT for both).
%
\def\inmargin#1{\parseinmargin #1,,\finish}
\def\parseinmargin#1,#2,#3\finish{% not perfect, but better than nothing.
\setbox0 = \hbox{\ignorespaces #2}%
\ifdim\wd0 > 0pt
\def\lefttext{#1}% have both texts
\def\righttext{#2}%
\else
\def\lefttext{#1}% have only one text
\def\righttext{#1}%
\fi
%
\ifodd\pageno
\def\temp{\inrightmargin\righttext}% odd page -> outside is right margin
\else
\def\temp{\inleftmargin\lefttext}%
\fi
\temp
}
% @include FILE -- \input text of FILE.
%
\def\include{\parseargusing\filenamecatcodes\includezzz}
\def\includezzz#1{%
\pushthisfilestack
\def\thisfile{#1}%
{%
\makevalueexpandable % we want to expand any @value in FILE.
\turnoffactive % and allow special characters in the expansion
\indexnofonts % Allow `@@' and other weird things in file names.
\edef\temp{\noexpand\input #1 }%
%
% This trickery is to read FILE outside of a group, in case it makes
% definitions, etc.
\expandafter
}\temp
\popthisfilestack
}
\def\filenamecatcodes{%
\catcode`\\=\other
\catcode`~=\other
\catcode`^=\other
\catcode`_=\other
\catcode`|=\other
\catcode`<=\other
\catcode`>=\other
\catcode`+=\other
\catcode`-=\other
\catcode`\`=\other
\catcode`\'=\other
}
\def\pushthisfilestack{%
\expandafter\pushthisfilestackX\popthisfilestack\StackTerm
}
\def\pushthisfilestackX{%
\expandafter\pushthisfilestackY\thisfile\StackTerm
}
\def\pushthisfilestackY #1\StackTerm #2\StackTerm {%
\gdef\popthisfilestack{\gdef\thisfile{#1}\gdef\popthisfilestack{#2}}%
}
\def\popthisfilestack{\errthisfilestackempty}
\def\errthisfilestackempty{\errmessage{Internal error:
the stack of filenames is empty.}}
\def\thisfile{}
% @center line
% outputs that line, centered.
%
\parseargdef\center{%
\ifhmode
\let\next\centerH
\else
\let\next\centerV
\fi
\next{\hfil \ignorespaces#1\unskip \hfil}%
}
\def\centerH#1{%
{%
\hfil\break
\advance\hsize by -\leftskip
\advance\hsize by -\rightskip
\line{#1}%
\break
}%
}
\def\centerV#1{\line{\kern\leftskip #1\kern\rightskip}}
% @sp n outputs n lines of vertical space
\parseargdef\sp{\vskip #1\baselineskip}
% @comment ...line which is ignored...
% @c is the same as @comment
% @ignore ... @end ignore is another way to write a comment
\def\comment{\begingroup \catcode`\^^M=\other%
\catcode`\@=\other \catcode`\{=\other \catcode`\}=\other%
\commentxxx}
{\catcode`\^^M=\other \gdef\commentxxx#1^^M{\endgroup}}
\let\c=\comment
% @paragraphindent NCHARS
% We'll use ems for NCHARS, close enough.
% NCHARS can also be the word `asis' or `none'.
% We cannot feasibly implement @paragraphindent asis, though.
%
\def\asisword{asis} % no translation, these are keywords
\def\noneword{none}
%
\parseargdef\paragraphindent{%
\def\temp{#1}%
\ifx\temp\asisword
\else
\ifx\temp\noneword
\defaultparindent = 0pt
\else
\defaultparindent = #1em
\fi
\fi
\parindent = \defaultparindent
}
% @exampleindent NCHARS
% We'll use ems for NCHARS like @paragraphindent.
% It seems @exampleindent asis isn't necessary, but
% I preserve it to make it similar to @paragraphindent.
\parseargdef\exampleindent{%
\def\temp{#1}%
\ifx\temp\asisword
\else
\ifx\temp\noneword
\lispnarrowing = 0pt
\else
\lispnarrowing = #1em
\fi
\fi
}
% @firstparagraphindent WORD
% If WORD is `none', then suppress indentation of the first paragraph
% after a section heading. If WORD is `insert', then do indent at such
% paragraphs.
%
% The paragraph indentation is suppressed or not by calling
% \suppressfirstparagraphindent, which the sectioning commands do.
% We switch the definition of this back and forth according to WORD.
% By default, we suppress indentation.
%
\def\suppressfirstparagraphindent{\dosuppressfirstparagraphindent}
\def\insertword{insert}
%
\parseargdef\firstparagraphindent{%
\def\temp{#1}%
\ifx\temp\noneword
\let\suppressfirstparagraphindent = \dosuppressfirstparagraphindent
\else\ifx\temp\insertword
\let\suppressfirstparagraphindent = \relax
\else
\errhelp = \EMsimple
\errmessage{Unknown @firstparagraphindent option `\temp'}%
\fi\fi
}
% Here is how we actually suppress indentation. Redefine \everypar to
% \kern backwards by \parindent, and then reset itself to empty.
%
% We also make \indent itself not actually do anything until the next
% paragraph.
%
\gdef\dosuppressfirstparagraphindent{%
\gdef\indent{%
\restorefirstparagraphindent
\indent
}%
\gdef\noindent{%
\restorefirstparagraphindent
\noindent
}%
\global\everypar = {%
\kern -\parindent
\restorefirstparagraphindent
}%
}
\gdef\restorefirstparagraphindent{%
\global \let \indent = \ptexindent
\global \let \noindent = \ptexnoindent
\global \everypar = {}%
}
% @asis just yields its argument. Used with @table, for example.
%
\def\asis#1{#1}
% @math outputs its argument in math mode.
%
% One complication: _ usually means subscripts, but it could also mean
% an actual _ character, as in @math{@var{some_variable} + 1}. So make
% _ active, and distinguish by seeing if the current family is \slfam,
% which is what @var uses.
{
\catcode`\_ = \active
\gdef\mathunderscore{%
\catcode`\_=\active
\def_{\ifnum\fam=\slfam \_\else\sb\fi}%
}
}
% Another complication: we want \\ (and @\) to output a \ character.
% FYI, plain.tex uses \\ as a temporary control sequence (why?), but
% this is not advertised and we don't care. Texinfo does not
% otherwise define @\.
%
% The \mathchar is class=0=ordinary, family=7=ttfam, position=5C=\.
\def\mathbackslash{\ifnum\fam=\ttfam \mathchar"075C \else\backslash \fi}
%
\def\math{%
\tex
\mathunderscore
\let\\ = \mathbackslash
\mathactive
% make the texinfo accent commands work in math mode
\let\"=\ddot
\let\'=\acute
\let\==\bar
\let\^=\hat
\let\`=\grave
\let\u=\breve
\let\v=\check
\let\~=\tilde
\let\dotaccent=\dot
$\finishmath
}
\def\finishmath#1{#1$\endgroup} % Close the group opened by \tex.
% Some active characters (such as <) are spaced differently in math.
% We have to reset their definitions in case the @math was an argument
% to a command which sets the catcodes (such as @item or @section).
%
{
\catcode`^ = \active
\catcode`< = \active
\catcode`> = \active
\catcode`+ = \active
\catcode`' = \active
\gdef\mathactive{%
\let^ = \ptexhat
\let< = \ptexless
\let> = \ptexgtr
\let+ = \ptexplus
\let' = \ptexquoteright
}
}
% Some math mode symbols.
\def\bullet{$\ptexbullet$}
\def\geq{\ifmmode \ge\else $\ge$\fi}
\def\leq{\ifmmode \le\else $\le$\fi}
\def\minus{\ifmmode -\else $-$\fi}
% @dots{} outputs an ellipsis using the current font.
% We do .5em per period so that it has the same spacing in the cm
% typewriter fonts as three actual period characters; on the other hand,
% in other typewriter fonts three periods are wider than 1.5em. So do
% whichever is larger.
%
\def\dots{%
\leavevmode
\setbox0=\hbox{...}% get width of three periods
\ifdim\wd0 > 1.5em
\dimen0 = \wd0
\else
\dimen0 = 1.5em
\fi
\hbox to \dimen0{%
\hskip 0pt plus.25fil
.\hskip 0pt plus1fil
.\hskip 0pt plus1fil
.\hskip 0pt plus.5fil
}%
}
% @enddots{} is an end-of-sentence ellipsis.
%
\def\enddots{%
\dots
\spacefactor=\endofsentencespacefactor
}
% @comma{} is so commas can be inserted into text without messing up
% Texinfo's parsing.
%
\let\comma = ,
% @refill is a no-op.
\let\refill=\relax
% If working on a large document in chapters, it is convenient to
% be able to disable indexing, cross-referencing, and contents, for test runs.
% This is done with @novalidate (before @setfilename).
%
\newif\iflinks \linkstrue % by default we want the aux files.
\let\novalidate = \linksfalse
% @setfilename is done at the beginning of every texinfo file.
% So open here the files we need to have open while reading the input.
% This makes it possible to make a .fmt file for texinfo.
\def\setfilename{%
\fixbackslash % Turn off hack to swallow `\input texinfo'.
\iflinks
\tryauxfile
% Open the new aux file. TeX will close it automatically at exit.
\immediate\openout\auxfile=\jobname.aux
\fi % \openindices needs to do some work in any case.
\openindices
\let\setfilename=\comment % Ignore extra @setfilename cmds.
%
% If texinfo.cnf is present on the system, read it.
% Useful for site-wide @afourpaper, etc.
\openin 1 texinfo.cnf
\ifeof 1 \else \input texinfo.cnf \fi
\closein 1
%
\comment % Ignore the actual filename.
}
% Called from \setfilename.
%
\def\openindices{%
\newindex{cp}%
\newcodeindex{fn}%
\newcodeindex{vr}%
\newcodeindex{tp}%
\newcodeindex{ky}%
\newcodeindex{pg}%
}
% @bye.
\outer\def\bye{\pagealignmacro\tracingstats=1\ptexend}
\message{pdf,}
% adobe `portable' document format
\newcount\tempnum
\newcount\lnkcount
\newtoks\filename
\newcount\filenamelength
\newcount\pgn
\newtoks\toksA
\newtoks\toksB
\newtoks\toksC
\newtoks\toksD
\newbox\boxA
\newcount\countA
\newif\ifpdf
\newif\ifpdfmakepagedest
% when pdftex is run in dvi mode, \pdfoutput is defined (so \pdfoutput=1
% can be set). So we test for \relax and 0 as well as \undefined,
% borrowed from ifpdf.sty.
\ifx\pdfoutput\undefined
\else
\ifx\pdfoutput\relax
\else
\ifcase\pdfoutput
\else
\pdftrue
\fi
\fi
\fi
% PDF uses PostScript string constants for the names of xref targets,
% for display in the outlines, and in other places. Thus, we have to
% double any backslashes. Otherwise, a name like "\node" will be
% interpreted as a newline (\n), followed by o, d, e. Not good.
% http://www.ntg.nl/pipermail/ntg-pdftex/2004-July/000654.html
% (and related messages, the final outcome is that it is up to the TeX
% user to double the backslashes and otherwise make the string valid, so
% that's what we do).
% double active backslashes.
%
{\catcode`\@=0 \catcode`\\=\active
@gdef@activebackslashdouble{%
@catcode`@\=@active
@let\=@doublebackslash}
}
% To handle parens, we must adopt a different approach, since parens are
% not active characters. hyperref.dtx (which has the same problem as
% us) handles it with this amazing macro to replace tokens, with minor
% changes for Texinfo. It is included here under the GPL by permission
% from the author, Heiko Oberdiek.
%
% #1 is the tokens to replace.
% #2 is the replacement.
% #3 is the control sequence with the string.
%
\def\HyPsdSubst#1#2#3{%
\def\HyPsdReplace##1#1##2\END{%
##1%
\ifx\\##2\\%
\else
#2%
\HyReturnAfterFi{%
\HyPsdReplace##2\END
}%
\fi
}%
\xdef#3{\expandafter\HyPsdReplace#3#1\END}%
}
\long\def\HyReturnAfterFi#1\fi{\fi#1}
% #1 is a control sequence in which to do the replacements.
\def\backslashparens#1{%
\xdef#1{#1}% redefine it as its expansion; the definition is simply
% \lastnode when called from \setref -> \pdfmkdest.
\HyPsdSubst{(}{\realbackslash(}{#1}%
\HyPsdSubst{)}{\realbackslash)}{#1}%
}
\newhelp\nopdfimagehelp{Texinfo supports .png, .jpg, .jpeg, and .pdf images
with PDF output, and none of those formats could be found. (.eps cannot
be supported due to the design of the PDF format; use regular TeX (DVI
output) for that.)}
\ifpdf
%
% Color manipulation macros based on pdfcolor.tex.
\def\cmykDarkRed{0.28 1 1 0.35}
\def\cmykBlack{0 0 0 1}
%
\def\pdfsetcolor#1{\pdfliteral{#1 k}}
% Set color, and create a mark which defines \thiscolor accordingly,
% so that \makeheadline knows which color to restore.
\def\setcolor#1{%
\xdef\lastcolordefs{\gdef\noexpand\thiscolor{#1}}%
\domark
\pdfsetcolor{#1}%
}
%
\def\maincolor{\cmykBlack}
\pdfsetcolor{\maincolor}
\edef\thiscolor{\maincolor}
\def\lastcolordefs{}
%
\def\makefootline{%
\baselineskip24pt
\line{\pdfsetcolor{\maincolor}\the\footline}%
}
%
\def\makeheadline{%
\vbox to 0pt{%
\vskip-22.5pt
\line{%
\vbox to8.5pt{}%
% Extract \thiscolor definition from the marks.
\getcolormarks
% Typeset the headline with \maincolor, then restore the color.
\pdfsetcolor{\maincolor}\the\headline\pdfsetcolor{\thiscolor}%
}%
\vss
}%
\nointerlineskip
}
%
%
\pdfcatalog{/PageMode /UseOutlines}
%
% #1 is image name, #2 width (might be empty/whitespace), #3 height (ditto).
\def\dopdfimage#1#2#3{%
\def\imagewidth{#2}\setbox0 = \hbox{\ignorespaces #2}%
\def\imageheight{#3}\setbox2 = \hbox{\ignorespaces #3}%
%
% pdftex (and the PDF format) support .png, .jpg, .pdf (among
% others). Let's try in that order.
\let\pdfimgext=\empty
\begingroup
\openin 1 #1.png \ifeof 1
\openin 1 #1.jpg \ifeof 1
\openin 1 #1.jpeg \ifeof 1
\openin 1 #1.JPG \ifeof 1
\openin 1 #1.pdf \ifeof 1
\openin 1 #1.PDF \ifeof 1
\errhelp = \nopdfimagehelp
\errmessage{Could not find image file #1 for pdf}%
\else \gdef\pdfimgext{PDF}%
\fi
\else \gdef\pdfimgext{pdf}%
\fi
\else \gdef\pdfimgext{JPG}%
\fi
\else \gdef\pdfimgext{jpeg}%
\fi
\else \gdef\pdfimgext{jpg}%
\fi
\else \gdef\pdfimgext{png}%
\fi
\closein 1
\endgroup
%
% without \immediate, ancient pdftex seg faults when the same image is
% included twice. (Version 3.14159-pre-1.0-unofficial-20010704.)
\ifnum\pdftexversion < 14
\immediate\pdfimage
\else
\immediate\pdfximage
\fi
\ifdim \wd0 >0pt width \imagewidth \fi
\ifdim \wd2 >0pt height \imageheight \fi
\ifnum\pdftexversion<13
#1.\pdfimgext
\else
{#1.\pdfimgext}%
\fi
\ifnum\pdftexversion < 14 \else
\pdfrefximage \pdflastximage
\fi}
%
\def\pdfmkdest#1{{%
% We have to set dummies so commands such as @code, and characters
% such as \, aren't expanded when present in a section title.
\indexnofonts
\turnoffactive
\activebackslashdouble
\makevalueexpandable
\def\pdfdestname{#1}%
\backslashparens\pdfdestname
\safewhatsit{\pdfdest name{\pdfdestname} xyz}%
}}
%
% used to mark target names; must be expandable.
\def\pdfmkpgn#1{#1}
%
% by default, use a color that is dark enough to print on paper as
% nearly black, but still distinguishable for online viewing.
\def\urlcolor{\cmykDarkRed}
\def\linkcolor{\cmykDarkRed}
\def\endlink{\setcolor{\maincolor}\pdfendlink}
%
% Adding outlines to PDF; macros for calculating structure of outlines
% come from Petr Olsak
\def\expnumber#1{\expandafter\ifx\csname#1\endcsname\relax 0%
\else \csname#1\endcsname \fi}
\def\advancenumber#1{\tempnum=\expnumber{#1}\relax
\advance\tempnum by 1
\expandafter\xdef\csname#1\endcsname{\the\tempnum}}
%
% #1 is the section text, which is what will be displayed in the
% outline by the pdf viewer. #2 is the pdf expression for the number
% of subentries (or empty, for subsubsections). #3 is the node text,
% which might be empty if this toc entry had no corresponding node.
% #4 is the page number
%
\def\dopdfoutline#1#2#3#4{%
% Generate a link to the node text if that exists; else, use the
% page number. We could generate a destination for the section
% text in the case where a section has no node, but it doesn't
% seem worth the trouble, since most documents are normally structured.
\def\pdfoutlinedest{#3}%
\ifx\pdfoutlinedest\empty
\def\pdfoutlinedest{#4}%
\else
% Doubled backslashes in the name.
{\activebackslashdouble \xdef\pdfoutlinedest{#3}%
\backslashparens\pdfoutlinedest}%
\fi
%
% Also double the backslashes in the display string.
{\activebackslashdouble \xdef\pdfoutlinetext{#1}%
\backslashparens\pdfoutlinetext}%
%
\pdfoutline goto name{\pdfmkpgn{\pdfoutlinedest}}#2{\pdfoutlinetext}%
}
%
\def\pdfmakeoutlines{%
\begingroup
% Thanh's hack / proper braces in bookmarks
\edef\mylbrace{\iftrue \string{\else}\fi}\let\{=\mylbrace
\edef\myrbrace{\iffalse{\else\string}\fi}\let\}=\myrbrace
%
% Read toc silently, to get counts of subentries for \pdfoutline.
\def\numchapentry##1##2##3##4{%
\def\thischapnum{##2}%
\def\thissecnum{0}%
\def\thissubsecnum{0}%
}%
\def\numsecentry##1##2##3##4{%
\advancenumber{chap\thischapnum}%
\def\thissecnum{##2}%
\def\thissubsecnum{0}%
}%
\def\numsubsecentry##1##2##3##4{%
\advancenumber{sec\thissecnum}%
\def\thissubsecnum{##2}%
}%
\def\numsubsubsecentry##1##2##3##4{%
\advancenumber{subsec\thissubsecnum}%
}%
\def\thischapnum{0}%
\def\thissecnum{0}%
\def\thissubsecnum{0}%
%
% use \def rather than \let here because we redefine \chapentry et
% al. a second time, below.
\def\appentry{\numchapentry}%
\def\appsecentry{\numsecentry}%
\def\appsubsecentry{\numsubsecentry}%
\def\appsubsubsecentry{\numsubsubsecentry}%
\def\unnchapentry{\numchapentry}%
\def\unnsecentry{\numsecentry}%
\def\unnsubsecentry{\numsubsecentry}%
\def\unnsubsubsecentry{\numsubsubsecentry}%
\readdatafile{toc}%
%
% Read toc second time, this time actually producing the outlines.
% The `-' means take the \expnumber as the absolute number of
% subentries, which we calculated on our first read of the .toc above.
%
% We use the node names as the destinations.
\def\numchapentry##1##2##3##4{%
\dopdfoutline{##1}{count-\expnumber{chap##2}}{##3}{##4}}%
\def\numsecentry##1##2##3##4{%
\dopdfoutline{##1}{count-\expnumber{sec##2}}{##3}{##4}}%
\def\numsubsecentry##1##2##3##4{%
\dopdfoutline{##1}{count-\expnumber{subsec##2}}{##3}{##4}}%
\def\numsubsubsecentry##1##2##3##4{% count is always zero
\dopdfoutline{##1}{}{##3}{##4}}%
%
% PDF outlines are displayed using system fonts, instead of
% document fonts. Therefore we cannot use special characters,
% since the encoding is unknown. For example, the eogonek from
% Latin 2 (0xea) gets translated to a | character. Info from
% Staszek Wawrykiewicz, 19 Jan 2004 04:09:24 +0100.
%
% xx to do this right, we have to translate 8-bit characters to
% their "best" equivalent, based on the @documentencoding. Right
% now, I guess we'll just let the pdf reader have its way.
\indexnofonts
\setupdatafile
\catcode`\\=\active \otherbackslash
\input \tocreadfilename
\endgroup
}
%
\def\skipspaces#1{\def\PP{#1}\def\D{|}%
\ifx\PP\D\let\nextsp\relax
\else\let\nextsp\skipspaces
\ifx\p\space\else\addtokens{\filename}{\PP}%
\advance\filenamelength by 1
\fi
\fi
\nextsp}
\def\getfilename#1{\filenamelength=0\expandafter\skipspaces#1|\relax}
\ifnum\pdftexversion < 14
\let \startlink \pdfannotlink
\else
\let \startlink \pdfstartlink
\fi
% make a live url in pdf output.
\def\pdfurl#1{%
\begingroup
% it seems we really need yet another set of dummies; have not
% tried to figure out what each command should do in the context
% of @url. for now, just make @/ a no-op, that's the only one
% people have actually reported a problem with.
%
\normalturnoffactive
\def\@{@}%
\let\/=\empty
\makevalueexpandable
\leavevmode\setcolor{\urlcolor}%
\startlink attr{/Border [0 0 0]}%
user{/Subtype /Link /A << /S /URI /URI (#1) >>}%
\endgroup}
\def\pdfgettoks#1.{\setbox\boxA=\hbox{\toksA={#1.}\toksB={}\maketoks}}
\def\addtokens#1#2{\edef\addtoks{\noexpand#1={\the#1#2}}\addtoks}
\def\adn#1{\addtokens{\toksC}{#1}\global\countA=1\let\next=\maketoks}
\def\poptoks#1#2|ENDTOKS|{\let\first=#1\toksD={#1}\toksA={#2}}
\def\maketoks{%
\expandafter\poptoks\the\toksA|ENDTOKS|\relax
\ifx\first0\adn0
\else\ifx\first1\adn1 \else\ifx\first2\adn2 \else\ifx\first3\adn3
\else\ifx\first4\adn4 \else\ifx\first5\adn5 \else\ifx\first6\adn6
\else\ifx\first7\adn7 \else\ifx\first8\adn8 \else\ifx\first9\adn9
\else
\ifnum0=\countA\else\makelink\fi
\ifx\first.\let\next=\done\else
\let\next=\maketoks
\addtokens{\toksB}{\the\toksD}
\ifx\first,\addtokens{\toksB}{\space}\fi
\fi
\fi\fi\fi\fi\fi\fi\fi\fi\fi\fi
\next}
\def\makelink{\addtokens{\toksB}%
{\noexpand\pdflink{\the\toksC}}\toksC={}\global\countA=0}
\def\pdflink#1{%
\startlink attr{/Border [0 0 0]} goto name{\pdfmkpgn{#1}}
\setcolor{\linkcolor}#1\endlink}
\def\done{\edef\st{\global\noexpand\toksA={\the\toksB}}\st}
\else
\let\pdfmkdest = \gobble
\let\pdfurl = \gobble
\let\endlink = \relax
\let\setcolor = \gobble
\let\pdfsetcolor = \gobble
\let\pdfmakeoutlines = \relax
\fi % \ifx\pdfoutput
\message{fonts,}
% Change the current font style to #1, remembering it in \curfontstyle.
% For now, we do not accumulate font styles: @b{@i{foo}} prints foo in
% italics, not bold italics.
%
\def\setfontstyle#1{%
\def\curfontstyle{#1}% not as a control sequence, because we are \edef'd.
\csname ten#1\endcsname % change the current font
}
% Select #1 fonts with the current style.
%
\def\selectfonts#1{\csname #1fonts\endcsname \csname\curfontstyle\endcsname}
\def\rm{\fam=0 \setfontstyle{rm}}
\def\it{\fam=\itfam \setfontstyle{it}}
\def\sl{\fam=\slfam \setfontstyle{sl}}
\def\bf{\fam=\bffam \setfontstyle{bf}}\def\bfstylename{bf}
\def\tt{\fam=\ttfam \setfontstyle{tt}}
% Unfortunately, we have to override this for titles and the like, since
% in those cases "rm" is bold. Sigh.
\def\rmisbold{\rm\def\curfontstyle{bf}}
% Texinfo sort of supports the sans serif font style, which plain TeX does not.
% So we set up a \sf.
\newfam\sffam
\def\sf{\fam=\sffam \setfontstyle{sf}}
\let\li = \sf % Sometimes we call it \li, not \sf.
% We don't need math for this font style.
\def\ttsl{\setfontstyle{ttsl}}
% Default leading.
\newdimen\textleading \textleading = 13.2pt
% Set the baselineskip to #1, and the lineskip and strut size
% correspondingly. There is no deep meaning behind these magic numbers
% used as factors; they just match (closely enough) what Knuth defined.
%
\def\lineskipfactor{.08333}
\def\strutheightpercent{.70833}
\def\strutdepthpercent {.29167}
%
% can get a sort of poor man's double spacing by redefining this.
\def\baselinefactor{1}
%
\def\setleading#1{%
\dimen0 = #1\relax
\normalbaselineskip = \baselinefactor\dimen0
\normallineskip = \lineskipfactor\normalbaselineskip
\normalbaselines
\setbox\strutbox =\hbox{%
\vrule width0pt height\strutheightpercent\baselineskip
depth \strutdepthpercent \baselineskip
}%
}
% PDF CMaps. See also LaTeX's t1.cmap.
%
% do nothing with this by default.
\expandafter\let\csname cmapOT1\endcsname\gobble
\expandafter\let\csname cmapOT1IT\endcsname\gobble
\expandafter\let\csname cmapOT1TT\endcsname\gobble
% if we are producing pdf, and we have \pdffontattr, then define cmaps.
% (\pdffontattr was introduced many years ago, but people still run
% older pdftex's; it's easy to conditionalize, so we do.)
\ifpdf \ifx\pdffontattr\undefined \else
\begingroup
\catcode`\^^M=\active \def^^M{^^J}% Output line endings as the ^^J char.
\catcode`\%=12 \immediate\pdfobj stream {%!PS-Adobe-3.0 Resource-CMap
%%DocumentNeededResources: ProcSet (CIDInit)
%%IncludeResource: ProcSet (CIDInit)
%%BeginResource: CMap (TeX-OT1-0)
%%Title: (TeX-OT1-0 TeX OT1 0)
%%Version: 1.000
%%EndComments
/CIDInit /ProcSet findresource begin
12 dict begin
begincmap
/CIDSystemInfo
<< /Registry (TeX)
/Ordering (OT1)
/Supplement 0
>> def
/CMapName /TeX-OT1-0 def
/CMapType 2 def
1 begincodespacerange
<00> <7F>
endcodespacerange
8 beginbfrange
<00> <01> <0393>
<09> <0A> <03A8>
<23> <26> <0023>
<28> <3B> <0028>
<3F> <5B> <003F>
<5D> <5E> <005D>
<61> <7A> <0061>
<7B> <7C> <2013>
endbfrange
40 beginbfchar
<02> <0398>
<03> <039B>
<04> <039E>
<05> <03A0>
<06> <03A3>
<07> <03D2>
<08> <03A6>
<0B> <00660066>
<0C> <00660069>
<0D> <0066006C>
<0E> <006600660069>
<0F> <00660066006C>
<10> <0131>
<11> <0237>
<12> <0060>
<13> <00B4>
<14> <02C7>
<15> <02D8>
<16> <00AF>
<17> <02DA>
<18> <00B8>
<19> <00DF>
<1A> <00E6>
<1B> <0153>
<1C> <00F8>
<1D> <00C6>
<1E> <0152>
<1F> <00D8>
<21> <0021>
<22> <201D>
<27> <2019>
<3C> <00A1>
<3D> <003D>
<3E> <00BF>
<5C> <201C>
<5F> <02D9>
<60> <2018>
<7D> <02DD>
<7E> <007E>
<7F> <00A8>
endbfchar
endcmap
CMapName currentdict /CMap defineresource pop
end
end
%%EndResource
%%EOF
}\endgroup
\expandafter\edef\csname cmapOT1\endcsname#1{%
\pdffontattr#1{/ToUnicode \the\pdflastobj\space 0 R}%
}%
%
% \cmapOT1IT
\begingroup
\catcode`\^^M=\active \def^^M{^^J}% Output line endings as the ^^J char.
\catcode`\%=12 \immediate\pdfobj stream {%!PS-Adobe-3.0 Resource-CMap
%%DocumentNeededResources: ProcSet (CIDInit)
%%IncludeResource: ProcSet (CIDInit)
%%BeginResource: CMap (TeX-OT1IT-0)
%%Title: (TeX-OT1IT-0 TeX OT1IT 0)
%%Version: 1.000
%%EndComments
/CIDInit /ProcSet findresource begin
12 dict begin
begincmap
/CIDSystemInfo
<< /Registry (TeX)
/Ordering (OT1IT)
/Supplement 0
>> def
/CMapName /TeX-OT1IT-0 def
/CMapType 2 def
1 begincodespacerange
<00> <7F>
endcodespacerange
8 beginbfrange
<00> <01> <0393>
<09> <0A> <03A8>
<25> <26> <0025>
<28> <3B> <0028>
<3F> <5B> <003F>
<5D> <5E> <005D>
<61> <7A> <0061>
<7B> <7C> <2013>
endbfrange
42 beginbfchar
<02> <0398>
<03> <039B>
<04> <039E>
<05> <03A0>
<06> <03A3>
<07> <03D2>
<08> <03A6>
<0B> <00660066>
<0C> <00660069>
<0D> <0066006C>
<0E> <006600660069>
<0F> <00660066006C>
<10> <0131>
<11> <0237>
<12> <0060>
<13> <00B4>
<14> <02C7>
<15> <02D8>
<16> <00AF>
<17> <02DA>
<18> <00B8>
<19> <00DF>
<1A> <00E6>
<1B> <0153>
<1C> <00F8>
<1D> <00C6>
<1E> <0152>
<1F> <00D8>
<21> <0021>
<22> <201D>
<23> <0023>
<24> <00A3>
<27> <2019>
<3C> <00A1>
<3D> <003D>
<3E> <00BF>
<5C> <201C>
<5F> <02D9>
<60> <2018>
<7D> <02DD>
<7E> <007E>
<7F> <00A8>
endbfchar
endcmap
CMapName currentdict /CMap defineresource pop
end
end
%%EndResource
%%EOF
}\endgroup
\expandafter\edef\csname cmapOT1IT\endcsname#1{%
\pdffontattr#1{/ToUnicode \the\pdflastobj\space 0 R}%
}%
%
% \cmapOT1TT
\begingroup
\catcode`\^^M=\active \def^^M{^^J}% Output line endings as the ^^J char.
\catcode`\%=12 \immediate\pdfobj stream {%!PS-Adobe-3.0 Resource-CMap
%%DocumentNeededResources: ProcSet (CIDInit)
%%IncludeResource: ProcSet (CIDInit)
%%BeginResource: CMap (TeX-OT1TT-0)
%%Title: (TeX-OT1TT-0 TeX OT1TT 0)
%%Version: 1.000
%%EndComments
/CIDInit /ProcSet findresource begin
12 dict begin
begincmap
/CIDSystemInfo
<< /Registry (TeX)
/Ordering (OT1TT)
/Supplement 0
>> def
/CMapName /TeX-OT1TT-0 def
/CMapType 2 def
1 begincodespacerange
<00> <7F>
endcodespacerange
5 beginbfrange
<00> <01> <0393>
<09> <0A> <03A8>
<21> <26> <0021>
<28> <5F> <0028>
<61> <7E> <0061>
endbfrange
32 beginbfchar
<02> <0398>
<03> <039B>
<04> <039E>
<05> <03A0>
<06> <03A3>
<07> <03D2>
<08> <03A6>
<0B> <2191>
<0C> <2193>
<0D> <0027>
<0E> <00A1>
<0F> <00BF>
<10> <0131>
<11> <0237>
<12> <0060>
<13> <00B4>
<14> <02C7>
<15> <02D8>
<16> <00AF>
<17> <02DA>
<18> <00B8>
<19> <00DF>
<1A> <00E6>
<1B> <0153>
<1C> <00F8>
<1D> <00C6>
<1E> <0152>
<1F> <00D8>
<20> <2423>
<27> <2019>
<60> <2018>
<7F> <00A8>
endbfchar
endcmap
CMapName currentdict /CMap defineresource pop
end
end
%%EndResource
%%EOF
}\endgroup
\expandafter\edef\csname cmapOT1TT\endcsname#1{%
\pdffontattr#1{/ToUnicode \the\pdflastobj\space 0 R}%
}%
\fi\fi
% Set the font macro #1 to the font named #2, adding on the
% specified font prefix (normally `cm').
% #3 is the font's design size, #4 is a scale factor, #5 is the CMap
% encoding (currently only OT1, OT1IT and OT1TT are allowed, pass
% empty to omit).
\def\setfont#1#2#3#4#5{%
\font#1=\fontprefix#2#3 scaled #4
\csname cmap#5\endcsname#1%
}
% This is what gets called when #5 of \setfont is empty.
\let\cmap\gobble
% emacs-page end of cmaps
% Use cm as the default font prefix.
% To specify the font prefix, you must define \fontprefix
% before you read in texinfo.tex.
\ifx\fontprefix\undefined
\def\fontprefix{cm}
\fi
% Support font families that don't use the same naming scheme as CM.
\def\rmshape{r}
\def\rmbshape{bx} %where the normal face is bold
\def\bfshape{b}
\def\bxshape{bx}
\def\ttshape{tt}
\def\ttbshape{tt}
\def\ttslshape{sltt}
\def\itshape{ti}
\def\itbshape{bxti}
\def\slshape{sl}
\def\slbshape{bxsl}
\def\sfshape{ss}
\def\sfbshape{ss}
\def\scshape{csc}
\def\scbshape{csc}
% Definitions for a main text size of 11pt. This is the default in
% Texinfo.
%
\def\definetextfontsizexi{%
% Text fonts (11.2pt, magstep1).
\def\textnominalsize{11pt}
\edef\mainmagstep{\magstephalf}
\setfont\textrm\rmshape{10}{\mainmagstep}{OT1}
\setfont\texttt\ttshape{10}{\mainmagstep}{OT1TT}
\setfont\textbf\bfshape{10}{\mainmagstep}{OT1}
\setfont\textit\itshape{10}{\mainmagstep}{OT1IT}
\setfont\textsl\slshape{10}{\mainmagstep}{OT1}
\setfont\textsf\sfshape{10}{\mainmagstep}{OT1}
\setfont\textsc\scshape{10}{\mainmagstep}{OT1}
\setfont\textttsl\ttslshape{10}{\mainmagstep}{OT1TT}
\font\texti=cmmi10 scaled \mainmagstep
\font\textsy=cmsy10 scaled \mainmagstep
\def\textecsize{1095}
% A few fonts for @defun names and args.
\setfont\defbf\bfshape{10}{\magstep1}{OT1}
\setfont\deftt\ttshape{10}{\magstep1}{OT1TT}
\setfont\defttsl\ttslshape{10}{\magstep1}{OT1TT}
\def\df{\let\tentt=\deftt \let\tenbf = \defbf \let\tenttsl=\defttsl \bf}
% Fonts for indices, footnotes, small examples (9pt).
\def\smallnominalsize{9pt}
\setfont\smallrm\rmshape{9}{1000}{OT1}
\setfont\smalltt\ttshape{9}{1000}{OT1TT}
\setfont\smallbf\bfshape{10}{900}{OT1}
\setfont\smallit\itshape{9}{1000}{OT1IT}
\setfont\smallsl\slshape{9}{1000}{OT1}
\setfont\smallsf\sfshape{9}{1000}{OT1}
\setfont\smallsc\scshape{10}{900}{OT1}
\setfont\smallttsl\ttslshape{10}{900}{OT1TT}
\font\smalli=cmmi9
\font\smallsy=cmsy9
\def\smallecsize{0900}
% Fonts for small examples (8pt).
\def\smallernominalsize{8pt}
\setfont\smallerrm\rmshape{8}{1000}{OT1}
\setfont\smallertt\ttshape{8}{1000}{OT1TT}
\setfont\smallerbf\bfshape{10}{800}{OT1}
\setfont\smallerit\itshape{8}{1000}{OT1IT}
\setfont\smallersl\slshape{8}{1000}{OT1}
\setfont\smallersf\sfshape{8}{1000}{OT1}
\setfont\smallersc\scshape{10}{800}{OT1}
\setfont\smallerttsl\ttslshape{10}{800}{OT1TT}
\font\smalleri=cmmi8
\font\smallersy=cmsy8
\def\smallerecsize{0800}
% Fonts for title page (20.4pt):
\def\titlenominalsize{20pt}
\setfont\titlerm\rmbshape{12}{\magstep3}{OT1}
\setfont\titleit\itbshape{10}{\magstep4}{OT1IT}
\setfont\titlesl\slbshape{10}{\magstep4}{OT1}
\setfont\titlett\ttbshape{12}{\magstep3}{OT1TT}
\setfont\titlettsl\ttslshape{10}{\magstep4}{OT1TT}
\setfont\titlesf\sfbshape{17}{\magstep1}{OT1}
\let\titlebf=\titlerm
\setfont\titlesc\scbshape{10}{\magstep4}{OT1}
\font\titlei=cmmi12 scaled \magstep3
\font\titlesy=cmsy10 scaled \magstep4
\def\titleecsize{2074}
% Chapter (and unnumbered) fonts (17.28pt).
\def\chapnominalsize{17pt}
\setfont\chaprm\rmbshape{12}{\magstep2}{OT1}
\setfont\chapit\itbshape{10}{\magstep3}{OT1IT}
\setfont\chapsl\slbshape{10}{\magstep3}{OT1}
\setfont\chaptt\ttbshape{12}{\magstep2}{OT1TT}
\setfont\chapttsl\ttslshape{10}{\magstep3}{OT1TT}
\setfont\chapsf\sfbshape{17}{1000}{OT1}
\let\chapbf=\chaprm
\setfont\chapsc\scbshape{10}{\magstep3}{OT1}
\font\chapi=cmmi12 scaled \magstep2
\font\chapsy=cmsy10 scaled \magstep3
\def\chapecsize{1728}
% Section fonts (14.4pt).
\def\secnominalsize{14pt}
\setfont\secrm\rmbshape{12}{\magstep1}{OT1}
\setfont\secit\itbshape{10}{\magstep2}{OT1IT}
\setfont\secsl\slbshape{10}{\magstep2}{OT1}
\setfont\sectt\ttbshape{12}{\magstep1}{OT1TT}
\setfont\secttsl\ttslshape{10}{\magstep2}{OT1TT}
\setfont\secsf\sfbshape{12}{\magstep1}{OT1}
\let\secbf\secrm
\setfont\secsc\scbshape{10}{\magstep2}{OT1}
\font\seci=cmmi12 scaled \magstep1
\font\secsy=cmsy10 scaled \magstep2
\def\sececsize{1440}
% Subsection fonts (13.15pt).
\def\ssecnominalsize{13pt}
\setfont\ssecrm\rmbshape{12}{\magstephalf}{OT1}
\setfont\ssecit\itbshape{10}{1315}{OT1IT}
\setfont\ssecsl\slbshape{10}{1315}{OT1}
\setfont\ssectt\ttbshape{12}{\magstephalf}{OT1TT}
\setfont\ssecttsl\ttslshape{10}{1315}{OT1TT}
\setfont\ssecsf\sfbshape{12}{\magstephalf}{OT1}
\let\ssecbf\ssecrm
\setfont\ssecsc\scbshape{10}{1315}{OT1}
\font\sseci=cmmi12 scaled \magstephalf
\font\ssecsy=cmsy10 scaled 1315
\def\ssececsize{1200}
% Reduced fonts for @acro in text (10pt).
\def\reducednominalsize{10pt}
\setfont\reducedrm\rmshape{10}{1000}{OT1}
\setfont\reducedtt\ttshape{10}{1000}{OT1TT}
\setfont\reducedbf\bfshape{10}{1000}{OT1}
\setfont\reducedit\itshape{10}{1000}{OT1IT}
\setfont\reducedsl\slshape{10}{1000}{OT1}
\setfont\reducedsf\sfshape{10}{1000}{OT1}
\setfont\reducedsc\scshape{10}{1000}{OT1}
\setfont\reducedttsl\ttslshape{10}{1000}{OT1TT}
\font\reducedi=cmmi10
\font\reducedsy=cmsy10
\def\reducedecsize{1000}
% reset the current fonts
\textfonts
\rm
} % end of 11pt text font size definitions
% Definitions to make the main text be 10pt Computer Modern, with
% section, chapter, etc., sizes following suit. This is for the GNU
% Press printing of the Emacs 22 manual. Maybe other manuals in the
% future. Used with @smallbook, which sets the leading to 12pt.
%
\def\definetextfontsizex{%
% Text fonts (10pt).
\def\textnominalsize{10pt}
\edef\mainmagstep{1000}
\setfont\textrm\rmshape{10}{\mainmagstep}{OT1}
\setfont\texttt\ttshape{10}{\mainmagstep}{OT1TT}
\setfont\textbf\bfshape{10}{\mainmagstep}{OT1}
\setfont\textit\itshape{10}{\mainmagstep}{OT1IT}
\setfont\textsl\slshape{10}{\mainmagstep}{OT1}
\setfont\textsf\sfshape{10}{\mainmagstep}{OT1}
\setfont\textsc\scshape{10}{\mainmagstep}{OT1}
\setfont\textttsl\ttslshape{10}{\mainmagstep}{OT1TT}
\font\texti=cmmi10 scaled \mainmagstep
\font\textsy=cmsy10 scaled \mainmagstep
\def\textecsize{1000}
% A few fonts for @defun names and args.
\setfont\defbf\bfshape{10}{\magstephalf}{OT1}
\setfont\deftt\ttshape{10}{\magstephalf}{OT1TT}
\setfont\defttsl\ttslshape{10}{\magstephalf}{OT1TT}
\def\df{\let\tentt=\deftt \let\tenbf = \defbf \let\tenttsl=\defttsl \bf}
% Fonts for indices, footnotes, small examples (9pt).
\def\smallnominalsize{9pt}
\setfont\smallrm\rmshape{9}{1000}{OT1}
\setfont\smalltt\ttshape{9}{1000}{OT1TT}
\setfont\smallbf\bfshape{10}{900}{OT1}
\setfont\smallit\itshape{9}{1000}{OT1IT}
\setfont\smallsl\slshape{9}{1000}{OT1}
\setfont\smallsf\sfshape{9}{1000}{OT1}
\setfont\smallsc\scshape{10}{900}{OT1}
\setfont\smallttsl\ttslshape{10}{900}{OT1TT}
\font\smalli=cmmi9
\font\smallsy=cmsy9
\def\smallecsize{0900}
% Fonts for small examples (8pt).
\def\smallernominalsize{8pt}
\setfont\smallerrm\rmshape{8}{1000}{OT1}
\setfont\smallertt\ttshape{8}{1000}{OT1TT}
\setfont\smallerbf\bfshape{10}{800}{OT1}
\setfont\smallerit\itshape{8}{1000}{OT1IT}
\setfont\smallersl\slshape{8}{1000}{OT1}
\setfont\smallersf\sfshape{8}{1000}{OT1}
\setfont\smallersc\scshape{10}{800}{OT1}
\setfont\smallerttsl\ttslshape{10}{800}{OT1TT}
\font\smalleri=cmmi8
\font\smallersy=cmsy8
\def\smallerecsize{0800}
% Fonts for title page (20.4pt):
\def\titlenominalsize{20pt}
\setfont\titlerm\rmbshape{12}{\magstep3}{OT1}
\setfont\titleit\itbshape{10}{\magstep4}{OT1IT}
\setfont\titlesl\slbshape{10}{\magstep4}{OT1}
\setfont\titlett\ttbshape{12}{\magstep3}{OT1TT}
\setfont\titlettsl\ttslshape{10}{\magstep4}{OT1TT}
\setfont\titlesf\sfbshape{17}{\magstep1}{OT1}
\let\titlebf=\titlerm
\setfont\titlesc\scbshape{10}{\magstep4}{OT1}
\font\titlei=cmmi12 scaled \magstep3
\font\titlesy=cmsy10 scaled \magstep4
\def\titleecsize{2074}
% Chapter fonts (14.4pt).
\def\chapnominalsize{14pt}
\setfont\chaprm\rmbshape{12}{\magstep1}{OT1}
\setfont\chapit\itbshape{10}{\magstep2}{OT1IT}
\setfont\chapsl\slbshape{10}{\magstep2}{OT1}
\setfont\chaptt\ttbshape{12}{\magstep1}{OT1TT}
\setfont\chapttsl\ttslshape{10}{\magstep2}{OT1TT}
\setfont\chapsf\sfbshape{12}{\magstep1}{OT1}
\let\chapbf\chaprm
\setfont\chapsc\scbshape{10}{\magstep2}{OT1}
\font\chapi=cmmi12 scaled \magstep1
\font\chapsy=cmsy10 scaled \magstep2
\def\chapecsize{1440}
% Section fonts (12pt).
\def\secnominalsize{12pt}
\setfont\secrm\rmbshape{12}{1000}{OT1}
\setfont\secit\itbshape{10}{\magstep1}{OT1IT}
\setfont\secsl\slbshape{10}{\magstep1}{OT1}
\setfont\sectt\ttbshape{12}{1000}{OT1TT}
\setfont\secttsl\ttslshape{10}{\magstep1}{OT1TT}
\setfont\secsf\sfbshape{12}{1000}{OT1}
\let\secbf\secrm
\setfont\secsc\scbshape{10}{\magstep1}{OT1}
\font\seci=cmmi12
\font\secsy=cmsy10 scaled \magstep1
\def\sececsize{1200}
% Subsection fonts (10pt).
\def\ssecnominalsize{10pt}
\setfont\ssecrm\rmbshape{10}{1000}{OT1}
\setfont\ssecit\itbshape{10}{1000}{OT1IT}
\setfont\ssecsl\slbshape{10}{1000}{OT1}
\setfont\ssectt\ttbshape{10}{1000}{OT1TT}
\setfont\ssecttsl\ttslshape{10}{1000}{OT1TT}
\setfont\ssecsf\sfbshape{10}{1000}{OT1}
\let\ssecbf\ssecrm
\setfont\ssecsc\scbshape{10}{1000}{OT1}
\font\sseci=cmmi10
\font\ssecsy=cmsy10
\def\ssececsize{1000}
% Reduced fonts for @acro in text (9pt).
\def\reducednominalsize{9pt}
\setfont\reducedrm\rmshape{9}{1000}{OT1}
\setfont\reducedtt\ttshape{9}{1000}{OT1TT}
\setfont\reducedbf\bfshape{10}{900}{OT1}
\setfont\reducedit\itshape{9}{1000}{OT1IT}
\setfont\reducedsl\slshape{9}{1000}{OT1}
\setfont\reducedsf\sfshape{9}{1000}{OT1}
\setfont\reducedsc\scshape{10}{900}{OT1}
\setfont\reducedttsl\ttslshape{10}{900}{OT1TT}
\font\reducedi=cmmi9
\font\reducedsy=cmsy9
\def\reducedecsize{0900}
% reduce space between paragraphs
\divide\parskip by 2
% reset the current fonts
\textfonts
\rm
} % end of 10pt text font size definitions
% We provide the user-level command
% @fonttextsize 10
% (or 11) to redefine the text font size. pt is assumed.
%
\def\xword{10}
\def\xiword{11}
%
\parseargdef\fonttextsize{%
\def\textsizearg{#1}%
\wlog{doing @fonttextsize \textsizearg}%
%
% Set \globaldefs so that documents can use this inside @tex, since
% makeinfo 4.8 does not support it, but we need it nonetheless.
%
\begingroup \globaldefs=1
\ifx\textsizearg\xword \definetextfontsizex
\else \ifx\textsizearg\xiword \definetextfontsizexi
\else
\errhelp=\EMsimple
\errmessage{@fonttextsize only supports `10' or `11', not `\textsizearg'}
\fi\fi
\endgroup
}
% In order for the font changes to affect most math symbols and letters,
% we have to define the \textfont of the standard families. Since
% texinfo doesn't allow for producing subscripts and superscripts except
% in the main text, we don't bother to reset \scriptfont and
% \scriptscriptfont (which would also require loading a lot more fonts).
%
\def\resetmathfonts{%
\textfont0=\tenrm \textfont1=\teni \textfont2=\tensy
\textfont\itfam=\tenit \textfont\slfam=\tensl \textfont\bffam=\tenbf
\textfont\ttfam=\tentt \textfont\sffam=\tensf
}
% The font-changing commands redefine the meanings of \tenSTYLE, instead
% of just \STYLE. We do this because \STYLE needs to also set the
% current \fam for math mode. Our \STYLE (e.g., \rm) commands hardwire
% \tenSTYLE to set the current font.
%
% Each font-changing command also sets the names \lsize (one size lower)
% and \lllsize (three sizes lower). These relative commands are used in
% the LaTeX logo and acronyms.
%
% This all needs generalizing, badly.
%
\def\textfonts{%
\let\tenrm=\textrm \let\tenit=\textit \let\tensl=\textsl
\let\tenbf=\textbf \let\tentt=\texttt \let\smallcaps=\textsc
\let\tensf=\textsf \let\teni=\texti \let\tensy=\textsy
\let\tenttsl=\textttsl
\def\curfontsize{text}%
\def\lsize{reduced}\def\lllsize{smaller}%
\resetmathfonts \setleading{\textleading}}
\def\titlefonts{%
\let\tenrm=\titlerm \let\tenit=\titleit \let\tensl=\titlesl
\let\tenbf=\titlebf \let\tentt=\titlett \let\smallcaps=\titlesc
\let\tensf=\titlesf \let\teni=\titlei \let\tensy=\titlesy
\let\tenttsl=\titlettsl
\def\curfontsize{title}%
\def\lsize{chap}\def\lllsize{subsec}%
\resetmathfonts \setleading{25pt}}
\def\titlefont#1{{\titlefonts\rmisbold #1}}
\def\chapfonts{%
\let\tenrm=\chaprm \let\tenit=\chapit \let\tensl=\chapsl
\let\tenbf=\chapbf \let\tentt=\chaptt \let\smallcaps=\chapsc
\let\tensf=\chapsf \let\teni=\chapi \let\tensy=\chapsy
\let\tenttsl=\chapttsl
\def\curfontsize{chap}%
\def\lsize{sec}\def\lllsize{text}%
\resetmathfonts \setleading{19pt}}
\def\secfonts{%
\let\tenrm=\secrm \let\tenit=\secit \let\tensl=\secsl
\let\tenbf=\secbf \let\tentt=\sectt \let\smallcaps=\secsc
\let\tensf=\secsf \let\teni=\seci \let\tensy=\secsy
\let\tenttsl=\secttsl
\def\curfontsize{sec}%
\def\lsize{subsec}\def\lllsize{reduced}%
\resetmathfonts \setleading{16pt}}
\def\subsecfonts{%
\let\tenrm=\ssecrm \let\tenit=\ssecit \let\tensl=\ssecsl
\let\tenbf=\ssecbf \let\tentt=\ssectt \let\smallcaps=\ssecsc
\let\tensf=\ssecsf \let\teni=\sseci \let\tensy=\ssecsy
\let\tenttsl=\ssecttsl
\def\curfontsize{ssec}%
\def\lsize{text}\def\lllsize{small}%
\resetmathfonts \setleading{15pt}}
\let\subsubsecfonts = \subsecfonts
\def\reducedfonts{%
\let\tenrm=\reducedrm \let\tenit=\reducedit \let\tensl=\reducedsl
\let\tenbf=\reducedbf \let\tentt=\reducedtt \let\reducedcaps=\reducedsc
\let\tensf=\reducedsf \let\teni=\reducedi \let\tensy=\reducedsy
\let\tenttsl=\reducedttsl
\def\curfontsize{reduced}%
\def\lsize{small}\def\lllsize{smaller}%
\resetmathfonts \setleading{10.5pt}}
\def\smallfonts{%
\let\tenrm=\smallrm \let\tenit=\smallit \let\tensl=\smallsl
\let\tenbf=\smallbf \let\tentt=\smalltt \let\smallcaps=\smallsc
\let\tensf=\smallsf \let\teni=\smalli \let\tensy=\smallsy
\let\tenttsl=\smallttsl
\def\curfontsize{small}%
\def\lsize{smaller}\def\lllsize{smaller}%
\resetmathfonts \setleading{10.5pt}}
\def\smallerfonts{%
\let\tenrm=\smallerrm \let\tenit=\smallerit \let\tensl=\smallersl
\let\tenbf=\smallerbf \let\tentt=\smallertt \let\smallcaps=\smallersc
\let\tensf=\smallersf \let\teni=\smalleri \let\tensy=\smallersy
\let\tenttsl=\smallerttsl
\def\curfontsize{smaller}%
\def\lsize{smaller}\def\lllsize{smaller}%
\resetmathfonts \setleading{9.5pt}}
% Set the fonts to use with the @small... environments.
\let\smallexamplefonts = \smallfonts
% About \smallexamplefonts. If we use \smallfonts (9pt), @smallexample
% can fit this many characters:
% 8.5x11=86 smallbook=72 a4=90 a5=69
% If we use \scriptfonts (8pt), then we can fit this many characters:
% 8.5x11=90+ smallbook=80 a4=90+ a5=77
% For me, subjectively, the few extra characters that fit aren't worth
% the additional smallness of 8pt. So I'm making the default 9pt.
%
% By the way, for comparison, here's what fits with @example (10pt):
% 8.5x11=71 smallbook=60 a4=75 a5=58
%
% I wish the USA used A4 paper.
% --karl, 24jan03.
% Set up the default fonts, so we can use them for creating boxes.
%
\definetextfontsizexi
% Define these so they can be easily changed for other fonts.
\def\angleleft{$\langle$}
\def\angleright{$\rangle$}
% Count depth in font-changes, for error checks
\newcount\fontdepth \fontdepth=0
% Fonts for short table of contents.
\setfont\shortcontrm\rmshape{12}{1000}{OT1}
\setfont\shortcontbf\bfshape{10}{\magstep1}{OT1} % no cmb12
\setfont\shortcontsl\slshape{12}{1000}{OT1}
\setfont\shortconttt\ttshape{12}{1000}{OT1TT}
%% Add scribe-like font environments, plus @l for inline lisp (usually sans
%% serif) and @ii for TeX italic
% \smartitalic{ARG} outputs arg in italics, followed by an italic correction
% unless the following character is such as not to need one.
\def\smartitalicx{\ifx\next,\else\ifx\next-\else\ifx\next.\else
\ptexslash\fi\fi\fi}
\def\smartslanted#1{{\ifusingtt\ttsl\sl #1}\futurelet\next\smartitalicx}
\def\smartitalic#1{{\ifusingtt\ttsl\it #1}\futurelet\next\smartitalicx}
% like \smartslanted except unconditionally uses \ttsl.
% @var is set to this for defun arguments.
\def\ttslanted#1{{\ttsl #1}\futurelet\next\smartitalicx}
% like \smartslanted except unconditionally use \sl. We never want
% ttsl for book titles, do we?
\def\cite#1{{\sl #1}\futurelet\next\smartitalicx}
\let\i=\smartitalic
\let\slanted=\smartslanted
\def\var#1{{\setupmarkupstyle{var}\smartslanted{#1}}}
\let\dfn=\smartslanted
\let\emph=\smartitalic
% @b, explicit bold.
\def\b#1{{\bf #1}}
\let\strong=\b
% @sansserif, explicit sans.
\def\sansserif#1{{\sf #1}}
% We can't just use \exhyphenpenalty, because that only has effect at
% the end of a paragraph. Restore normal hyphenation at the end of the
% group within which \nohyphenation is presumably called.
%
\def\nohyphenation{\hyphenchar\font = -1 \aftergroup\restorehyphenation}
\def\restorehyphenation{\hyphenchar\font = `- }
% Set sfcode to normal for the chars that usually have another value.
% Can't use plain's \frenchspacing because it uses the `\x notation, and
% sometimes \x has an active definition that messes things up.
%
\catcode`@=11
\def\plainfrenchspacing{%
\sfcode\dotChar =\@m \sfcode\questChar=\@m \sfcode\exclamChar=\@m
\sfcode\colonChar=\@m \sfcode\semiChar =\@m \sfcode\commaChar =\@m
\def\endofsentencespacefactor{1000}% for @. and friends
}
\def\plainnonfrenchspacing{%
\sfcode`\.3000\sfcode`\?3000\sfcode`\!3000
\sfcode`\:2000\sfcode`\;1500\sfcode`\,1250
\def\endofsentencespacefactor{3000}% for @. and friends
}
\catcode`@=\other
\def\endofsentencespacefactor{3000}% default
\def\t#1{%
{\tt \rawbackslash \plainfrenchspacing #1}%
\null
}
\def\samp#1{{\setupmarkupstyle{samp}\lq\tclose{#1}\rq\null}}
\setfont\keyrm\rmshape{8}{1000}{OT1}
\font\keysy=cmsy9
\def\key#1{{\keyrm\textfont2=\keysy \leavevmode\hbox{%
\raise0.4pt\hbox{\angleleft}\kern-.08em\vtop{%
\vbox{\hrule\kern-0.4pt
\hbox{\raise0.4pt\hbox{\vphantom{\angleleft}}#1}}%
\kern-0.4pt\hrule}%
\kern-.06em\raise0.4pt\hbox{\angleright}}}}
\def\key #1{{\setupmarkupstyle{key}\nohyphenation \uppercase{#1}}\null}
% The old definition, with no lozenge:
%\def\key #1{{\ttsl \nohyphenation \uppercase{#1}}\null}
\def\ctrl #1{{\tt \rawbackslash \hat}#1}
% @file, @option are the same as @samp.
\let\file=\samp
\let\option=\samp
% @code is a modification of @t,
% which makes spaces the same size as normal in the surrounding text.
\def\tclose#1{%
{%
% Change normal interword space to be same as for the current font.
\spaceskip = \fontdimen2\font
%
% Switch to typewriter.
\tt
%
% But `\ ' produces the large typewriter interword space.
\def\ {{\spaceskip = 0pt{} }}%
%
% Turn off hyphenation.
\nohyphenation
%
\rawbackslash
\plainfrenchspacing
#1%
}%
\null
}
% We *must* turn on hyphenation at `-' and `_' in @code.
% Otherwise, it is too hard to avoid overfull hboxes
% in the Emacs manual, the Library manual, etc.
% Unfortunately, TeX uses one parameter (\hyphenchar) to control
% both hyphenation at - and hyphenation within words.
% We must therefore turn them both off (\tclose does that)
% and arrange explicitly to hyphenate at a dash.
% -- rms.
{
\catcode`\-=\active \catcode`\_=\active
\catcode`\'=\active \catcode`\`=\active
\global\let'=\rq \global\let`=\lq % default definitions
%
\global\def\code{\begingroup
\setupmarkupstyle{code}%
% The following should really be moved into \setupmarkupstyle handlers.
\catcode\dashChar=\active \catcode\underChar=\active
\ifallowcodebreaks
\let-\codedash
\let_\codeunder
\else
\let-\realdash
\let_\realunder
\fi
\codex
}
}
\def\realdash{-}
\def\codedash{-\discretionary{}{}{}}
\def\codeunder{%
% this is all so @math{@code{var_name}+1} can work. In math mode, _
% is "active" (mathcode"8000) and \normalunderscore (or \char95, etc.)
% will therefore expand the active definition of _, which is us
% (inside @code that is), therefore an endless loop.
\ifusingtt{\ifmmode
\mathchar"075F % class 0=ordinary, family 7=ttfam, pos 0x5F=_.
\else\normalunderscore \fi
\discretionary{}{}{}}%
{\_}%
}
\def\codex #1{\tclose{#1}\endgroup}
% An additional complication: the above will allow breaks after, e.g.,
% each of the four underscores in __typeof__. This is undesirable in
% some manuals, especially if they don't have long identifiers in
% general. @allowcodebreaks provides a way to control this.
%
\newif\ifallowcodebreaks \allowcodebreakstrue
\def\keywordtrue{true}
\def\keywordfalse{false}
\parseargdef\allowcodebreaks{%
\def\txiarg{#1}%
\ifx\txiarg\keywordtrue
\allowcodebreakstrue
\else\ifx\txiarg\keywordfalse
\allowcodebreaksfalse
\else
\errhelp = \EMsimple
\errmessage{Unknown @allowcodebreaks option `\txiarg'}%
\fi\fi
}
% @kbd is like @code, except that if the argument is just one @key command,
% then @kbd has no effect.
% @kbdinputstyle -- arg is `distinct' (@kbd uses slanted tty font always),
% `example' (@kbd uses ttsl only inside of @example and friends),
% or `code' (@kbd uses normal tty font always).
\parseargdef\kbdinputstyle{%
\def\txiarg{#1}%
\ifx\txiarg\worddistinct
\gdef\kbdexamplefont{\ttsl}\gdef\kbdfont{\ttsl}%
\else\ifx\txiarg\wordexample
\gdef\kbdexamplefont{\ttsl}\gdef\kbdfont{\tt}%
\else\ifx\txiarg\wordcode
\gdef\kbdexamplefont{\tt}\gdef\kbdfont{\tt}%
\else
\errhelp = \EMsimple
\errmessage{Unknown @kbdinputstyle option `\txiarg'}%
\fi\fi\fi
}
\def\worddistinct{distinct}
\def\wordexample{example}
\def\wordcode{code}
% Default is `distinct.'
\kbdinputstyle distinct
\def\xkey{\key}
\def\kbdfoo#1#2#3\par{\def\one{#1}\def\three{#3}\def\threex{??}%
\ifx\one\xkey\ifx\threex\three \key{#2}%
\else{\tclose{\kbdfont\setupmarkupstyle{kbd}\look}}\fi
\else{\tclose{\kbdfont\setupmarkupstyle{kbd}\look}}\fi}
% For @indicateurl, @env, @command quotes seem unnecessary, so use \code.
\let\indicateurl=\code
\let\env=\code
\let\command=\code
% @clicksequence{File @click{} Open ...}
\def\clicksequence#1{\begingroup #1\endgroup}
% @clickstyle @arrow (by default)
\parseargdef\clickstyle{\def\click{#1}}
\def\click{\arrow}
% @uref (abbreviation for `urlref') takes an optional (comma-separated)
% second argument specifying the text to display and an optional third
% arg as text to display instead of (rather than in addition to) the url
% itself. First (mandatory) arg is the url. Perhaps eventually put in
% a hypertex \special here.
%
\def\uref#1{\douref #1,,,\finish}
\def\douref#1,#2,#3,#4\finish{\begingroup
\unsepspaces
\pdfurl{#1}%
\setbox0 = \hbox{\ignorespaces #3}%
\ifdim\wd0 > 0pt
\unhbox0 % third arg given, show only that
\else
\setbox0 = \hbox{\ignorespaces #2}%
\ifdim\wd0 > 0pt
\ifpdf
\unhbox0 % PDF: 2nd arg given, show only it
\else
\unhbox0\ (\code{#1})% DVI: 2nd arg given, show both it and url
\fi
\else
\code{#1}% only url given, so show it
\fi
\fi
\endlink
\endgroup}
% @url synonym for @uref, since that's how everyone uses it.
%
\let\url=\uref
% rms does not like angle brackets --karl, 17may97.
% So now @email is just like @uref, unless we are pdf.
%
%\def\email#1{\angleleft{\tt #1}\angleright}
\ifpdf
\def\email#1{\doemail#1,,\finish}
\def\doemail#1,#2,#3\finish{\begingroup
\unsepspaces
\pdfurl{mailto:#1}%
\setbox0 = \hbox{\ignorespaces #2}%
\ifdim\wd0>0pt\unhbox0\else\code{#1}\fi
\endlink
\endgroup}
\else
\let\email=\uref
\fi
% Check if we are currently using a typewriter font. Since all the
% Computer Modern typewriter fonts have zero interword stretch (and
% shrink), and it is reasonable to expect all typewriter fonts to have
% this property, we can check that font parameter.
%
\def\ifmonospace{\ifdim\fontdimen3\font=0pt }
% Typeset a dimension, e.g., `in' or `pt'. The only reason for the
% argument is to make the input look right: @dmn{pt} instead of @dmn{}pt.
%
\def\dmn#1{\thinspace #1}
\def\kbd#1{{\setupmarkupstyle{kbd}\def\look{#1}\expandafter\kbdfoo\look??\par}}
% @l was never documented to mean ``switch to the Lisp font'',
% and it is not used as such in any manual I can find. We need it for
% Polish suppressed-l. --karl, 22sep96.
%\def\l#1{{\li #1}\null}
% Explicit font changes: @r, @sc, undocumented @ii.
\def\r#1{{\rm #1}} % roman font
\def\sc#1{{\smallcaps#1}} % smallcaps font
\def\ii#1{{\it #1}} % italic font
% @acronym for "FBI", "NATO", and the like.
% We print this one point size smaller, since it's intended for
% all-uppercase.
%
\def\acronym#1{\doacronym #1,,\finish}
\def\doacronym#1,#2,#3\finish{%
{\selectfonts\lsize #1}%
\def\temp{#2}%
\ifx\temp\empty \else
\space ({\unsepspaces \ignorespaces \temp \unskip})%
\fi
}
% @abbr for "Comput. J." and the like.
% No font change, but don't do end-of-sentence spacing.
%
\def\abbr#1{\doabbr #1,,\finish}
\def\doabbr#1,#2,#3\finish{%
{\plainfrenchspacing #1}%
\def\temp{#2}%
\ifx\temp\empty \else
\space ({\unsepspaces \ignorespaces \temp \unskip})%
\fi
}
% @pounds{} is a sterling sign, which Knuth put in the CM italic font.
%
\def\pounds{{\it\$}}
% @euro{} comes from a separate font, depending on the current style.
% We use the free feym* fonts from the eurosym package by Henrik
% Theiling, which support regular, slanted, bold and bold slanted (and
% "outlined" (blackboard board, sort of) versions, which we don't need).
% It is available from http://www.ctan.org/tex-archive/fonts/eurosym.
%
% Although only regular is the truly official Euro symbol, we ignore
% that. The Euro is designed to be slightly taller than the regular
% font height.
%
% feymr - regular
% feymo - slanted
% feybr - bold
% feybo - bold slanted
%
% There is no good (free) typewriter version, to my knowledge.
% A feymr10 euro is ~7.3pt wide, while a normal cmtt10 char is ~5.25pt wide.
% Hmm.
%
% Also doesn't work in math. Do we need to do math with euro symbols?
% Hope not.
%
%
\def\euro{{\eurofont e}}
\def\eurofont{%
% We set the font at each command, rather than predefining it in
% \textfonts and the other font-switching commands, so that
% installations which never need the symbol don't have to have the
% font installed.
%
% There is only one designed size (nominal 10pt), so we always scale
% that to the current nominal size.
%
% By the way, simply using "at 1em" works for cmr10 and the like, but
% does not work for cmbx10 and other extended/shrunken fonts.
%
\def\eurosize{\csname\curfontsize nominalsize\endcsname}%
%
\ifx\curfontstyle\bfstylename
% bold:
\font\thiseurofont = \ifusingit{feybo10}{feybr10} at \eurosize
\else
% regular:
\font\thiseurofont = \ifusingit{feymo10}{feymr10} at \eurosize
\fi
\thiseurofont
}
% Hacks for glyphs from the EC fonts similar to \euro. We don't
% use \let for the aliases, because sometimes we redefine the original
% macro, and the alias should reflect the redefinition.
\def\guillemetleft{{\ecfont \char"13}}
\def\guillemotleft{\guillemetleft}
\def\guillemetright{{\ecfont \char"14}}
\def\guillemotright{\guillemetright}
\def\guilsinglleft{{\ecfont \char"0E}}
\def\guilsinglright{{\ecfont \char"0F}}
\def\quotedblbase{{\ecfont \char"12}}
\def\quotesinglbase{{\ecfont \char"0D}}
%
% This positioning is not perfect (see the ogonek LaTeX package), but
% we have the precomposed glyphs for the most common cases. We put the
% tests to use those glyphs in the single \ogonek macro so we have fewer
% dummy definitions to worry about for index entries, etc.
%
% ogonek is also used with other letters in Lithuanian (IOU), but using
% the precomposed glyphs for those is not so easy since they aren't in
% the same EC font.
\def\ogonek#1{{%
\def\temp{#1}%
\ifx\temp\macrocharA\Aogonek
\else\ifx\temp\macrochara\aogonek
\else\ifx\temp\macrocharE\Eogonek
\else\ifx\temp\macrochare\eogonek
\else
\ecfont \setbox0=\hbox{#1}%
\ifdim\ht0=1ex\accent"0C #1%
\else\ooalign{\unhbox0\crcr\hidewidth\char"0C \hidewidth}%
\fi
\fi\fi\fi\fi
}%
}
\def\Aogonek{{\ecfont \char"81}}\def\macrocharA{A}
\def\aogonek{{\ecfont \char"A1}}\def\macrochara{a}
\def\Eogonek{{\ecfont \char"86}}\def\macrocharE{E}
\def\eogonek{{\ecfont \char"A6}}\def\macrochare{e}
%
\def\ecfont{%
% We can't distinguish serif/sans and italic/slanted, but this
% is used for crude hacks anyway (like adding French and German
% quotes to documents typeset with CM, where we lose kerning), so
% hopefully nobody will notice/care.
\edef\ecsize{\csname\curfontsize ecsize\endcsname}%
\edef\nominalsize{\csname\curfontsize nominalsize\endcsname}%
\ifx\curfontstyle\bfstylename
% bold:
\font\thisecfont = ecb\ifusingit{i}{x}\ecsize \space at \nominalsize
\else
% regular:
\font\thisecfont = ec\ifusingit{ti}{rm}\ecsize \space at \nominalsize
\fi
\thisecfont
}
% @registeredsymbol - R in a circle. The font for the R should really
% be smaller yet, but lllsize is the best we can do for now.
% Adapted from the plain.tex definition of \copyright.
%
\def\registeredsymbol{%
$^{{\ooalign{\hfil\raise.07ex\hbox{\selectfonts\lllsize R}%
\hfil\crcr\Orb}}%
}$%
}
% @textdegree - the normal degrees sign.
%
\def\textdegree{$^\circ$}
% Laurent Siebenmann reports \Orb undefined with:
% Textures 1.7.7 (preloaded format=plain 93.10.14) (68K) 16 APR 2004 02:38
% so we'll define it if necessary.
%
\ifx\Orb\undefined
\def\Orb{\mathhexbox20D}
\fi
% Quotes.
\chardef\quotedblleft="5C
\chardef\quotedblright=`\"
\chardef\quoteleft=`\`
\chardef\quoteright=`\'
\message{page headings,}
\newskip\titlepagetopglue \titlepagetopglue = 1.5in
\newskip\titlepagebottomglue \titlepagebottomglue = 2pc
% First the title page. Must do @settitle before @titlepage.
\newif\ifseenauthor
\newif\iffinishedtitlepage
% Do an implicit @contents or @shortcontents after @end titlepage if the
% user says @setcontentsaftertitlepage or @setshortcontentsaftertitlepage.
%
\newif\ifsetcontentsaftertitlepage
\let\setcontentsaftertitlepage = \setcontentsaftertitlepagetrue
\newif\ifsetshortcontentsaftertitlepage
\let\setshortcontentsaftertitlepage = \setshortcontentsaftertitlepagetrue
\parseargdef\shorttitlepage{\begingroup\hbox{}\vskip 1.5in \chaprm \centerline{#1}%
\endgroup\page\hbox{}\page}
\envdef\titlepage{%
% Open one extra group, as we want to close it in the middle of \Etitlepage.
\begingroup
\parindent=0pt \textfonts
% Leave some space at the very top of the page.
\vglue\titlepagetopglue
% No rule at page bottom unless we print one at the top with @title.
\finishedtitlepagetrue
%
% Most title ``pages'' are actually two pages long, with space
% at the top of the second. We don't want the ragged left on the second.
\let\oldpage = \page
\def\page{%
\iffinishedtitlepage\else
\finishtitlepage
\fi
\let\page = \oldpage
\page
\null
}%
}
\def\Etitlepage{%
\iffinishedtitlepage\else
\finishtitlepage
\fi
% It is important to do the page break before ending the group,
% because the headline and footline are only empty inside the group.
% If we use the new definition of \page, we always get a blank page
% after the title page, which we certainly don't want.
\oldpage
\endgroup
%
% Need this before the \...aftertitlepage checks so that if they are
% in effect the toc pages will come out with page numbers.
\HEADINGSon
%
% If they want short, they certainly want long too.
\ifsetshortcontentsaftertitlepage
\shortcontents
\contents
\global\let\shortcontents = \relax
\global\let\contents = \relax
\fi
%
\ifsetcontentsaftertitlepage
\contents
\global\let\contents = \relax
\global\let\shortcontents = \relax
\fi
}
\def\finishtitlepage{%
\vskip4pt \hrule height 2pt width \hsize
\vskip\titlepagebottomglue
\finishedtitlepagetrue
}
%%% Macros to be used within @titlepage:
\let\subtitlerm=\tenrm
\def\subtitlefont{\subtitlerm \normalbaselineskip = 13pt \normalbaselines}
\parseargdef\title{%
\checkenv\titlepage
\leftline{\titlefonts\rmisbold #1}
% print a rule at the page bottom also.
\finishedtitlepagefalse
\vskip4pt \hrule height 4pt width \hsize \vskip4pt
}
\parseargdef\subtitle{%
\checkenv\titlepage
{\subtitlefont \rightline{#1}}%
}
% @author should come last, but may come many times.
% It can also be used inside @quotation.
%
\parseargdef\author{%
\def\temp{\quotation}%
\ifx\thisenv\temp
\def\quotationauthor{#1}% printed in \Equotation.
\else
\checkenv\titlepage
\ifseenauthor\else \vskip 0pt plus 1filll \seenauthortrue \fi
{\secfonts\rmisbold \leftline{#1}}%
\fi
}
%%% Set up page headings and footings.
\let\thispage=\folio
\newtoks\evenheadline % headline on even pages
\newtoks\oddheadline % headline on odd pages
\newtoks\evenfootline % footline on even pages
\newtoks\oddfootline % footline on odd pages
% Now make TeX use those variables
\headline={{\textfonts\rm \ifodd\pageno \the\oddheadline
\else \the\evenheadline \fi}}
\footline={{\textfonts\rm \ifodd\pageno \the\oddfootline
\else \the\evenfootline \fi}\HEADINGShook}
\let\HEADINGShook=\relax
% Commands to set those variables.
% For example, this is what @headings on does
% @evenheading @thistitle|@thispage|@thischapter
% @oddheading @thischapter|@thispage|@thistitle
% @evenfooting @thisfile||
% @oddfooting ||@thisfile
\def\evenheading{\parsearg\evenheadingxxx}
\def\evenheadingxxx #1{\evenheadingyyy #1\|\|\|\|\finish}
\def\evenheadingyyy #1\|#2\|#3\|#4\finish{%
\global\evenheadline={\rlap{\centerline{#2}}\line{#1\hfil#3}}}
\def\oddheading{\parsearg\oddheadingxxx}
\def\oddheadingxxx #1{\oddheadingyyy #1\|\|\|\|\finish}
\def\oddheadingyyy #1\|#2\|#3\|#4\finish{%
\global\oddheadline={\rlap{\centerline{#2}}\line{#1\hfil#3}}}
\parseargdef\everyheading{\oddheadingxxx{#1}\evenheadingxxx{#1}}%
\def\evenfooting{\parsearg\evenfootingxxx}
\def\evenfootingxxx #1{\evenfootingyyy #1\|\|\|\|\finish}
\def\evenfootingyyy #1\|#2\|#3\|#4\finish{%
\global\evenfootline={\rlap{\centerline{#2}}\line{#1\hfil#3}}}
\def\oddfooting{\parsearg\oddfootingxxx}
\def\oddfootingxxx #1{\oddfootingyyy #1\|\|\|\|\finish}
\def\oddfootingyyy #1\|#2\|#3\|#4\finish{%
\global\oddfootline = {\rlap{\centerline{#2}}\line{#1\hfil#3}}%
%
% Leave some space for the footline. Hopefully ok to assume
% @evenfooting will not be used by itself.
\global\advance\pageheight by -12pt
\global\advance\vsize by -12pt
}
\parseargdef\everyfooting{\oddfootingxxx{#1}\evenfootingxxx{#1}}
% @evenheadingmarks top \thischapter <- chapter at the top of a page
% @evenheadingmarks bottom \thischapter <- chapter at the bottom of a page
%
% The same set of arguments for:
%
% @oddheadingmarks
% @evenfootingmarks
% @oddfootingmarks
% @everyheadingmarks
% @everyfootingmarks
\def\evenheadingmarks{\headingmarks{even}{heading}}
\def\oddheadingmarks{\headingmarks{odd}{heading}}
\def\evenfootingmarks{\headingmarks{even}{footing}}
\def\oddfootingmarks{\headingmarks{odd}{footing}}
\def\everyheadingmarks#1 {\headingmarks{even}{heading}{#1}
\headingmarks{odd}{heading}{#1} }
\def\everyfootingmarks#1 {\headingmarks{even}{footing}{#1}
\headingmarks{odd}{footing}{#1} }
% #1 = even/odd, #2 = heading/footing, #3 = top/bottom.
\def\headingmarks#1#2#3 {%
\expandafter\let\expandafter\temp \csname get#3headingmarks\endcsname
\global\expandafter\let\csname get#1#2marks\endcsname \temp
}
\everyheadingmarks bottom
\everyfootingmarks bottom
% @headings double turns headings on for double-sided printing.
% @headings single turns headings on for single-sided printing.
% @headings off turns them off.
% @headings on same as @headings double, retained for compatibility.
% @headings after turns on double-sided headings after this page.
% @headings doubleafter turns on double-sided headings after this page.
% @headings singleafter turns on single-sided headings after this page.
% By default, they are off at the start of a document,
% and turned `on' after @end titlepage.
\def\headings #1 {\csname HEADINGS#1\endcsname}
\def\HEADINGSoff{%
\global\evenheadline={\hfil} \global\evenfootline={\hfil}
\global\oddheadline={\hfil} \global\oddfootline={\hfil}}
\HEADINGSoff
% When we turn headings on, set the page number to 1.
% For double-sided printing, put current file name in lower left corner,
% chapter name on inside top of right hand pages, document
% title on inside top of left hand pages, and page numbers on outside top
% edge of all pages.
\def\HEADINGSdouble{%
\global\pageno=1
\global\evenfootline={\hfil}
\global\oddfootline={\hfil}
\global\evenheadline={\line{\folio\hfil\thistitle}}
\global\oddheadline={\line{\thischapter\hfil\folio}}
\global\let\contentsalignmacro = \chapoddpage
}
\let\contentsalignmacro = \chappager
% For single-sided printing, chapter title goes across top left of page,
% page number on top right.
\def\HEADINGSsingle{%
\global\pageno=1
\global\evenfootline={\hfil}
\global\oddfootline={\hfil}
\global\evenheadline={\line{\thischapter\hfil\folio}}
\global\oddheadline={\line{\thischapter\hfil\folio}}
\global\let\contentsalignmacro = \chappager
}
\def\HEADINGSon{\HEADINGSdouble}
\def\HEADINGSafter{\let\HEADINGShook=\HEADINGSdoublex}
\let\HEADINGSdoubleafter=\HEADINGSafter
\def\HEADINGSdoublex{%
\global\evenfootline={\hfil}
\global\oddfootline={\hfil}
\global\evenheadline={\line{\folio\hfil\thistitle}}
\global\oddheadline={\line{\thischapter\hfil\folio}}
\global\let\contentsalignmacro = \chapoddpage
}
\def\HEADINGSsingleafter{\let\HEADINGShook=\HEADINGSsinglex}
\def\HEADINGSsinglex{%
\global\evenfootline={\hfil}
\global\oddfootline={\hfil}
\global\evenheadline={\line{\thischapter\hfil\folio}}
\global\oddheadline={\line{\thischapter\hfil\folio}}
\global\let\contentsalignmacro = \chappager
}
% Subroutines used in generating headings
% This produces Day Month Year style of output.
% Only define if not already defined, in case a txi-??.tex file has set
% up a different format (e.g., txi-cs.tex does this).
\ifx\today\undefined
\def\today{%
\number\day\space
\ifcase\month
\or\putwordMJan\or\putwordMFeb\or\putwordMMar\or\putwordMApr
\or\putwordMMay\or\putwordMJun\or\putwordMJul\or\putwordMAug
\or\putwordMSep\or\putwordMOct\or\putwordMNov\or\putwordMDec
\fi
\space\number\year}
\fi
% @settitle line... specifies the title of the document, for headings.
% It generates no output of its own.
\def\thistitle{\putwordNoTitle}
\def\settitle{\parsearg{\gdef\thistitle}}
\message{tables,}
% Tables -- @table, @ftable, @vtable, @item(x).
% default indentation of table text
\newdimen\tableindent \tableindent=.8in
% default indentation of @itemize and @enumerate text
\newdimen\itemindent \itemindent=.3in
% margin between end of table item and start of table text.
\newdimen\itemmargin \itemmargin=.1in
% used internally for \itemindent minus \itemmargin
\newdimen\itemmax
% Note @table, @ftable, and @vtable define @item, @itemx, etc., with
% these defs.
% They also define \itemindex
% to index the item name in whatever manner is desired (perhaps none).
\newif\ifitemxneedsnegativevskip
\def\itemxpar{\par\ifitemxneedsnegativevskip\nobreak\vskip-\parskip\nobreak\fi}
\def\internalBitem{\smallbreak \parsearg\itemzzz}
\def\internalBitemx{\itemxpar \parsearg\itemzzz}
\def\itemzzz #1{\begingroup %
\advance\hsize by -\rightskip
\advance\hsize by -\tableindent
\setbox0=\hbox{\itemindicate{#1}}%
\itemindex{#1}%
\nobreak % This prevents a break before @itemx.
%
% If the item text does not fit in the space we have, put it on a line
% by itself, and do not allow a page break either before or after that
% line. We do not start a paragraph here because then if the next
% command is, e.g., @kindex, the whatsit would get put into the
% horizontal list on a line by itself, resulting in extra blank space.
\ifdim \wd0>\itemmax
%
% Make this a paragraph so we get the \parskip glue and wrapping,
% but leave it ragged-right.
\begingroup
\advance\leftskip by-\tableindent
\advance\hsize by\tableindent
\advance\rightskip by0pt plus1fil
\leavevmode\unhbox0\par
\endgroup
%
% We're going to be starting a paragraph, but we don't want the
% \parskip glue -- logically it's part of the @item we just started.
\nobreak \vskip-\parskip
%
% Stop a page break at the \parskip glue coming up. However, if
% what follows is an environment such as @example, there will be no
% \parskip glue; then the negative vskip we just inserted would
% cause the example and the item to crash together. So we use this
% bizarre value of 10001 as a signal to \aboveenvbreak to insert
% \parskip glue after all. Section titles are handled this way also.
%
\penalty 10001
\endgroup
\itemxneedsnegativevskipfalse
\else
% The item text fits into the space. Start a paragraph, so that the
% following text (if any) will end up on the same line.
\noindent
% Do this with kerns and \unhbox so that if there is a footnote in
% the item text, it can migrate to the main vertical list and
% eventually be printed.
\nobreak\kern-\tableindent
\dimen0 = \itemmax \advance\dimen0 by \itemmargin \advance\dimen0 by -\wd0
\unhbox0
\nobreak\kern\dimen0
\endgroup
\itemxneedsnegativevskiptrue
\fi
}
\def\item{\errmessage{@item while not in a list environment}}
\def\itemx{\errmessage{@itemx while not in a list environment}}
% @table, @ftable, @vtable.
\envdef\table{%
\let\itemindex\gobble
\tablecheck{table}%
}
\envdef\ftable{%
\def\itemindex ##1{\doind {fn}{\code{##1}}}%
\tablecheck{ftable}%
}
\envdef\vtable{%
\def\itemindex ##1{\doind {vr}{\code{##1}}}%
\tablecheck{vtable}%
}
\def\tablecheck#1{%
\ifnum \the\catcode`\^^M=\active
\endgroup
\errmessage{This command won't work in this context; perhaps the problem is
that we are \inenvironment\thisenv}%
\def\next{\doignore{#1}}%
\else
\let\next\tablex
\fi
\next
}
\def\tablex#1{%
\def\itemindicate{#1}%
\parsearg\tabley
}
\def\tabley#1{%
{%
\makevalueexpandable
\edef\temp{\noexpand\tablez #1\space\space\space}%
\expandafter
}\temp \endtablez
}
\def\tablez #1 #2 #3 #4\endtablez{%
\aboveenvbreak
\ifnum 0#1>0 \advance \leftskip by #1\mil \fi
\ifnum 0#2>0 \tableindent=#2\mil \fi
\ifnum 0#3>0 \advance \rightskip by #3\mil \fi
\itemmax=\tableindent
\advance \itemmax by -\itemmargin
\advance \leftskip by \tableindent
\exdentamount=\tableindent
\parindent = 0pt
\parskip = \smallskipamount
\ifdim \parskip=0pt \parskip=2pt \fi
\let\item = \internalBitem
\let\itemx = \internalBitemx
}
\def\Etable{\endgraf\afterenvbreak}
\let\Eftable\Etable
\let\Evtable\Etable
\let\Eitemize\Etable
\let\Eenumerate\Etable
% This is the counter used by @enumerate, which is really @itemize
\newcount \itemno
\envdef\itemize{\parsearg\doitemize}
\def\doitemize#1{%
\aboveenvbreak
\itemmax=\itemindent
\advance\itemmax by -\itemmargin
\advance\leftskip by \itemindent
\exdentamount=\itemindent
\parindent=0pt
\parskip=\smallskipamount
\ifdim\parskip=0pt \parskip=2pt \fi
\def\itemcontents{#1}%
% @itemize with no arg is equivalent to @itemize @bullet.
\ifx\itemcontents\empty\def\itemcontents{\bullet}\fi
\let\item=\itemizeitem
}
% Definition of @item while inside @itemize and @enumerate.
%
\def\itemizeitem{%
\advance\itemno by 1 % for enumerations
{\let\par=\endgraf \smallbreak}% reasonable place to break
{%
% If the document has an @itemize directly after a section title, a
% \nobreak will be last on the list, and \sectionheading will have
% done a \vskip-\parskip. In that case, we don't want to zero
% parskip, or the item text will crash with the heading. On the
% other hand, when there is normal text preceding the item (as there
% usually is), we do want to zero parskip, or there would be too much
% space. In that case, we won't have a \nobreak before. At least
% that's the theory.
\ifnum\lastpenalty<10000 \parskip=0in \fi
\noindent
\hbox to 0pt{\hss \itemcontents \kern\itemmargin}%
\vadjust{\penalty 1200}}% not good to break after first line of item.
\flushcr
}
% \splitoff TOKENS\endmark defines \first to be the first token in
% TOKENS, and \rest to be the remainder.
%
\def\splitoff#1#2\endmark{\def\first{#1}\def\rest{#2}}%
% Allow an optional argument of an uppercase letter, lowercase letter,
% or number, to specify the first label in the enumerated list. No
% argument is the same as `1'.
%
\envparseargdef\enumerate{\enumeratey #1 \endenumeratey}
\def\enumeratey #1 #2\endenumeratey{%
% If we were given no argument, pretend we were given `1'.
\def\thearg{#1}%
\ifx\thearg\empty \def\thearg{1}\fi
%
% Detect if the argument is a single token. If so, it might be a
% letter. Otherwise, the only valid thing it can be is a number.
% (We will always have one token, because of the test we just made.
% This is a good thing, since \splitoff doesn't work given nothing at
% all -- the first parameter is undelimited.)
\expandafter\splitoff\thearg\endmark
\ifx\rest\empty
% Only one token in the argument. It could still be anything.
% A ``lowercase letter'' is one whose \lccode is nonzero.
% An ``uppercase letter'' is one whose \lccode is both nonzero, and
% not equal to itself.
% Otherwise, we assume it's a number.
%
% We need the \relax at the end of the \ifnum lines to stop TeX from
% continuing to look for a .
%
\ifnum\lccode\expandafter`\thearg=0\relax
\numericenumerate % a number (we hope)
\else
% It's a letter.
\ifnum\lccode\expandafter`\thearg=\expandafter`\thearg\relax
\lowercaseenumerate % lowercase letter
\else
\uppercaseenumerate % uppercase letter
\fi
\fi
\else
% Multiple tokens in the argument. We hope it's a number.
\numericenumerate
\fi
}
% An @enumerate whose labels are integers. The starting integer is
% given in \thearg.
%
\def\numericenumerate{%
\itemno = \thearg
\startenumeration{\the\itemno}%
}
% The starting (lowercase) letter is in \thearg.
\def\lowercaseenumerate{%
\itemno = \expandafter`\thearg
\startenumeration{%
% Be sure we're not beyond the end of the alphabet.
\ifnum\itemno=0
\errmessage{No more lowercase letters in @enumerate; get a bigger
alphabet}%
\fi
\char\lccode\itemno
}%
}
% The starting (uppercase) letter is in \thearg.
\def\uppercaseenumerate{%
\itemno = \expandafter`\thearg
\startenumeration{%
% Be sure we're not beyond the end of the alphabet.
\ifnum\itemno=0
\errmessage{No more uppercase letters in @enumerate; get a bigger
alphabet}
\fi
\char\uccode\itemno
}%
}
% Call \doitemize, adding a period to the first argument and supplying the
% common last two arguments. Also subtract one from the initial value in
% \itemno, since @item increments \itemno.
%
\def\startenumeration#1{%
\advance\itemno by -1
\doitemize{#1.}\flushcr
}
% @alphaenumerate and @capsenumerate are abbreviations for giving an arg
% to @enumerate.
%
\def\alphaenumerate{\enumerate{a}}
\def\capsenumerate{\enumerate{A}}
\def\Ealphaenumerate{\Eenumerate}
\def\Ecapsenumerate{\Eenumerate}
% @multitable macros
% Amy Hendrickson, 8/18/94, 3/6/96
%
% @multitable ... @end multitable will make as many columns as desired.
% Contents of each column will wrap at width given in preamble. Width
% can be specified either with sample text given in a template line,
% or in percent of \hsize, the current width of text on page.
% Table can continue over pages but will only break between lines.
% To make preamble:
%
% Either define widths of columns in terms of percent of \hsize:
% @multitable @columnfractions .25 .3 .45
% @item ...
%
% Numbers following @columnfractions are the percent of the total
% current hsize to be used for each column. You may use as many
% columns as desired.
% Or use a template:
% @multitable {Column 1 template} {Column 2 template} {Column 3 template}
% @item ...
% using the widest term desired in each column.
% Each new table line starts with @item, each subsequent new column
% starts with @tab. Empty columns may be produced by supplying @tab's
% with nothing between them for as many times as empty columns are needed,
% ie, @tab@tab@tab will produce two empty columns.
% @item, @tab do not need to be on their own lines, but it will not hurt
% if they are.
% Sample multitable:
% @multitable {Column 1 template} {Column 2 template} {Column 3 template}
% @item first col stuff @tab second col stuff @tab third col
% @item
% first col stuff
% @tab
% second col stuff
% @tab
% third col
% @item first col stuff @tab second col stuff
% @tab Many paragraphs of text may be used in any column.
%
% They will wrap at the width determined by the template.
% @item@tab@tab This will be in third column.
% @end multitable
% Default dimensions may be reset by user.
% @multitableparskip is vertical space between paragraphs in table.
% @multitableparindent is paragraph indent in table.
% @multitablecolmargin is horizontal space to be left between columns.
% @multitablelinespace is space to leave between table items, baseline
% to baseline.
% 0pt means it depends on current normal line spacing.
%
\newskip\multitableparskip
\newskip\multitableparindent
\newdimen\multitablecolspace
\newskip\multitablelinespace
\multitableparskip=0pt
\multitableparindent=6pt
\multitablecolspace=12pt
\multitablelinespace=0pt
% Macros used to set up halign preamble:
%
\let\endsetuptable\relax
\def\xendsetuptable{\endsetuptable}
\let\columnfractions\relax
\def\xcolumnfractions{\columnfractions}
\newif\ifsetpercent
% #1 is the @columnfraction, usually a decimal number like .5, but might
% be just 1. We just use it, whatever it is.
%
\def\pickupwholefraction#1 {%
\global\advance\colcount by 1
\expandafter\xdef\csname col\the\colcount\endcsname{#1\hsize}%
\setuptable
}
\newcount\colcount
\def\setuptable#1{%
\def\firstarg{#1}%
\ifx\firstarg\xendsetuptable
\let\go = \relax
\else
\ifx\firstarg\xcolumnfractions
\global\setpercenttrue
\else
\ifsetpercent
\let\go\pickupwholefraction
\else
\global\advance\colcount by 1
\setbox0=\hbox{#1\unskip\space}% Add a normal word space as a
% separator; typically that is always in the input, anyway.
\expandafter\xdef\csname col\the\colcount\endcsname{\the\wd0}%
\fi
\fi
\ifx\go\pickupwholefraction
% Put the argument back for the \pickupwholefraction call, so
% we'll always have a period there to be parsed.
\def\go{\pickupwholefraction#1}%
\else
\let\go = \setuptable
\fi%
\fi
\go
}
% multitable-only commands.
%
% @headitem starts a heading row, which we typeset in bold.
% Assignments have to be global since we are inside the implicit group
% of an alignment entry. Note that \everycr resets \everytab.
\def\headitem{\checkenv\multitable \crcr \global\everytab={\bf}\the\everytab}%
%
% A \tab used to include \hskip1sp. But then the space in a template
% line is not enough. That is bad. So let's go back to just `&' until
% we encounter the problem it was intended to solve again.
% --karl, nathan@acm.org, 20apr99.
\def\tab{\checkenv\multitable &\the\everytab}%
% @multitable ... @end multitable definitions:
%
\newtoks\everytab % insert after every tab.
%
\envdef\multitable{%
\vskip\parskip
\startsavinginserts
%
% @item within a multitable starts a normal row.
% We use \def instead of \let so that if one of the multitable entries
% contains an @itemize, we don't choke on the \item (seen as \crcr aka
% \endtemplate) expanding \doitemize.
\def\item{\crcr}%
%
\tolerance=9500
\hbadness=9500
\setmultitablespacing
\parskip=\multitableparskip
\parindent=\multitableparindent
\overfullrule=0pt
\global\colcount=0
%
\everycr = {%
\noalign{%
\global\everytab={}%
\global\colcount=0 % Reset the column counter.
% Check for saved footnotes, etc.
\checkinserts
% Keeps underfull box messages off when table breaks over pages.
%\filbreak
% Maybe so, but it also creates really weird page breaks when the
% table breaks over pages. Wouldn't \vfil be better? Wait until the
% problem manifests itself, so it can be fixed for real --karl.
}%
}%
%
\parsearg\domultitable
}
\def\domultitable#1{%
% To parse everything between @multitable and @item:
\setuptable#1 \endsetuptable
%
% This preamble sets up a generic column definition, which will
% be used as many times as user calls for columns.
% \vtop will set a single line and will also let text wrap and
% continue for many paragraphs if desired.
\halign\bgroup &%
\global\advance\colcount by 1
\multistrut
\vtop{%
% Use the current \colcount to find the correct column width:
\hsize=\expandafter\csname col\the\colcount\endcsname
%
% In order to keep entries from bumping into each other
% we will add a \leftskip of \multitablecolspace to all columns after
% the first one.
%
% If a template has been used, we will add \multitablecolspace
% to the width of each template entry.
%
% If the user has set preamble in terms of percent of \hsize we will
% use that dimension as the width of the column, and the \leftskip
% will keep entries from bumping into each other. Table will start at
% left margin and final column will justify at right margin.
%
% Make sure we don't inherit \rightskip from the outer environment.
\rightskip=0pt
\ifnum\colcount=1
% The first column will be indented with the surrounding text.
\advance\hsize by\leftskip
\else
\ifsetpercent \else
% If user has not set preamble in terms of percent of \hsize
% we will advance \hsize by \multitablecolspace.
\advance\hsize by \multitablecolspace
\fi
% In either case we will make \leftskip=\multitablecolspace:
\leftskip=\multitablecolspace
\fi
% Ignoring space at the beginning and end avoids an occasional spurious
% blank line, when TeX decides to break the line at the space before the
% box from the multistrut, so the strut ends up on a line by itself.
% For example:
% @multitable @columnfractions .11 .89
% @item @code{#}
% @tab Legal holiday which is valid in major parts of the whole country.
% Is automatically provided with highlighting sequences respectively
% marking characters.
\noindent\ignorespaces##\unskip\multistrut
}\cr
}
\def\Emultitable{%
\crcr
\egroup % end the \halign
\global\setpercentfalse
}
\def\setmultitablespacing{%
\def\multistrut{\strut}% just use the standard line spacing
%
% Compute \multitablelinespace (if not defined by user) for use in
% \multitableparskip calculation. We used define \multistrut based on
% this, but (ironically) that caused the spacing to be off.
% See bug-texinfo report from Werner Lemberg, 31 Oct 2004 12:52:20 +0100.
\ifdim\multitablelinespace=0pt
\setbox0=\vbox{X}\global\multitablelinespace=\the\baselineskip
\global\advance\multitablelinespace by-\ht0
\fi
%% Test to see if parskip is larger than space between lines of
%% table. If not, do nothing.
%% If so, set to same dimension as multitablelinespace.
\ifdim\multitableparskip>\multitablelinespace
\global\multitableparskip=\multitablelinespace
\global\advance\multitableparskip-7pt %% to keep parskip somewhat smaller
%% than skip between lines in the table.
\fi%
\ifdim\multitableparskip=0pt
\global\multitableparskip=\multitablelinespace
\global\advance\multitableparskip-7pt %% to keep parskip somewhat smaller
%% than skip between lines in the table.
\fi}
\message{conditionals,}
% @iftex, @ifnotdocbook, @ifnothtml, @ifnotinfo, @ifnotplaintext,
% @ifnotxml always succeed. They currently do nothing; we don't
% attempt to check whether the conditionals are properly nested. But we
% have to remember that they are conditionals, so that @end doesn't
% attempt to close an environment group.
%
\def\makecond#1{%
\expandafter\let\csname #1\endcsname = \relax
\expandafter\let\csname iscond.#1\endcsname = 1
}
\makecond{iftex}
\makecond{ifnotdocbook}
\makecond{ifnothtml}
\makecond{ifnotinfo}
\makecond{ifnotplaintext}
\makecond{ifnotxml}
% Ignore @ignore, @ifhtml, @ifinfo, and the like.
%
\def\direntry{\doignore{direntry}}
\def\documentdescription{\doignore{documentdescription}}
\def\docbook{\doignore{docbook}}
\def\html{\doignore{html}}
\def\ifdocbook{\doignore{ifdocbook}}
\def\ifhtml{\doignore{ifhtml}}
\def\ifinfo{\doignore{ifinfo}}
\def\ifnottex{\doignore{ifnottex}}
\def\ifplaintext{\doignore{ifplaintext}}
\def\ifxml{\doignore{ifxml}}
\def\ignore{\doignore{ignore}}
\def\menu{\doignore{menu}}
\def\xml{\doignore{xml}}
% Ignore text until a line `@end #1', keeping track of nested conditionals.
%
% A count to remember the depth of nesting.
\newcount\doignorecount
\def\doignore#1{\begingroup
% Scan in ``verbatim'' mode:
\obeylines
\catcode`\@ = \other
\catcode`\{ = \other
\catcode`\} = \other
%
% Make sure that spaces turn into tokens that match what \doignoretext wants.
\spaceisspace
%
% Count number of #1's that we've seen.
\doignorecount = 0
%
% Swallow text until we reach the matching `@end #1'.
\dodoignore{#1}%
}
{ \catcode`_=11 % We want to use \_STOP_ which cannot appear in texinfo source.
\obeylines %
%
\gdef\dodoignore#1{%
% #1 contains the command name as a string, e.g., `ifinfo'.
%
% Define a command to find the next `@end #1'.
\long\def\doignoretext##1^^M@end #1{%
\doignoretextyyy##1^^M@#1\_STOP_}%
%
% And this command to find another #1 command, at the beginning of a
% line. (Otherwise, we would consider a line `@c @ifset', for
% example, to count as an @ifset for nesting.)
\long\def\doignoretextyyy##1^^M@#1##2\_STOP_{\doignoreyyy{##2}\_STOP_}%
%
% And now expand that command.
\doignoretext ^^M%
}%
}
\def\doignoreyyy#1{%
\def\temp{#1}%
\ifx\temp\empty % Nothing found.
\let\next\doignoretextzzz
\else % Found a nested condition, ...
\advance\doignorecount by 1
\let\next\doignoretextyyy % ..., look for another.
% If we're here, #1 ends with ^^M\ifinfo (for example).
\fi
\next #1% the token \_STOP_ is present just after this macro.
}
% We have to swallow the remaining "\_STOP_".
%
\def\doignoretextzzz#1{%
\ifnum\doignorecount = 0 % We have just found the outermost @end.
\let\next\enddoignore
\else % Still inside a nested condition.
\advance\doignorecount by -1
\let\next\doignoretext % Look for the next @end.
\fi
\next
}
% Finish off ignored text.
{ \obeylines%
% Ignore anything after the last `@end #1'; this matters in verbatim
% environments, where otherwise the newline after an ignored conditional
% would result in a blank line in the output.
\gdef\enddoignore#1^^M{\endgroup\ignorespaces}%
}
% @set VAR sets the variable VAR to an empty value.
% @set VAR REST-OF-LINE sets VAR to the value REST-OF-LINE.
%
% Since we want to separate VAR from REST-OF-LINE (which might be
% empty), we can't just use \parsearg; we have to insert a space of our
% own to delimit the rest of the line, and then take it out again if we
% didn't need it.
% We rely on the fact that \parsearg sets \catcode`\ =10.
%
\parseargdef\set{\setyyy#1 \endsetyyy}
\def\setyyy#1 #2\endsetyyy{%
{%
\makevalueexpandable
\def\temp{#2}%
\edef\next{\gdef\makecsname{SET#1}}%
\ifx\temp\empty
\next{}%
\else
\setzzz#2\endsetzzz
\fi
}%
}
% Remove the trailing space \setxxx inserted.
\def\setzzz#1 \endsetzzz{\next{#1}}
% @clear VAR clears (i.e., unsets) the variable VAR.
%
\parseargdef\clear{%
{%
\makevalueexpandable
\global\expandafter\let\csname SET#1\endcsname=\relax
}%
}
% @value{foo} gets the text saved in variable foo.
\def\value{\begingroup\makevalueexpandable\valuexxx}
\def\valuexxx#1{\expandablevalue{#1}\endgroup}
{
\catcode`\- = \active \catcode`\_ = \active
%
\gdef\makevalueexpandable{%
\let\value = \expandablevalue
% We don't want these characters active, ...
\catcode`\-=\other \catcode`\_=\other
% ..., but we might end up with active ones in the argument if
% we're called from @code, as @code{@value{foo-bar_}}, though.
% So \let them to their normal equivalents.
\let-\realdash \let_\normalunderscore
}
}
% We have this subroutine so that we can handle at least some @value's
% properly in indexes (we call \makevalueexpandable in \indexdummies).
% The command has to be fully expandable (if the variable is set), since
% the result winds up in the index file. This means that if the
% variable's value contains other Texinfo commands, it's almost certain
% it will fail (although perhaps we could fix that with sufficient work
% to do a one-level expansion on the result, instead of complete).
%
\def\expandablevalue#1{%
\expandafter\ifx\csname SET#1\endcsname\relax
{[No value for ``#1'']}%
\message{Variable `#1', used in @value, is not set.}%
\else
\csname SET#1\endcsname
\fi
}
% @ifset VAR ... @end ifset reads the `...' iff VAR has been defined
% with @set.
%
% To get special treatment of `@end ifset,' call \makeond and the redefine.
%
\makecond{ifset}
\def\ifset{\parsearg{\doifset{\let\next=\ifsetfail}}}
\def\doifset#1#2{%
{%
\makevalueexpandable
\let\next=\empty
\expandafter\ifx\csname SET#2\endcsname\relax
#1% If not set, redefine \next.
\fi
\expandafter
}\next
}
\def\ifsetfail{\doignore{ifset}}
% @ifclear VAR ... @end ifclear reads the `...' iff VAR has never been
% defined with @set, or has been undefined with @clear.
%
% The `\else' inside the `\doifset' parameter is a trick to reuse the
% above code: if the variable is not set, do nothing, if it is set,
% then redefine \next to \ifclearfail.
%
\makecond{ifclear}
\def\ifclear{\parsearg{\doifset{\else \let\next=\ifclearfail}}}
\def\ifclearfail{\doignore{ifclear}}
% @dircategory CATEGORY -- specify a category of the dir file
% which this file should belong to. Ignore this in TeX.
\let\dircategory=\comment
% @defininfoenclose.
\let\definfoenclose=\comment
\message{indexing,}
% Index generation facilities
% Define \newwrite to be identical to plain tex's \newwrite
% except not \outer, so it can be used within macros and \if's.
\edef\newwrite{\makecsname{ptexnewwrite}}
% \newindex {foo} defines an index named foo.
% It automatically defines \fooindex such that
% \fooindex ...rest of line... puts an entry in the index foo.
% It also defines \fooindfile to be the number of the output channel for
% the file that accumulates this index. The file's extension is foo.
% The name of an index should be no more than 2 characters long
% for the sake of vms.
%
\def\newindex#1{%
\iflinks
\expandafter\newwrite \csname#1indfile\endcsname
\openout \csname#1indfile\endcsname \jobname.#1 % Open the file
\fi
\expandafter\xdef\csname#1index\endcsname{% % Define @#1index
\noexpand\doindex{#1}}
}
% @defindex foo == \newindex{foo}
%
\def\defindex{\parsearg\newindex}
% Define @defcodeindex, like @defindex except put all entries in @code.
%
\def\defcodeindex{\parsearg\newcodeindex}
%
\def\newcodeindex#1{%
\iflinks
\expandafter\newwrite \csname#1indfile\endcsname
\openout \csname#1indfile\endcsname \jobname.#1
\fi
\expandafter\xdef\csname#1index\endcsname{%
\noexpand\docodeindex{#1}}%
}
% @synindex foo bar makes index foo feed into index bar.
% Do this instead of @defindex foo if you don't want it as a separate index.
%
% @syncodeindex foo bar similar, but put all entries made for index foo
% inside @code.
%
\def\synindex#1 #2 {\dosynindex\doindex{#1}{#2}}
\def\syncodeindex#1 #2 {\dosynindex\docodeindex{#1}{#2}}
% #1 is \doindex or \docodeindex, #2 the index getting redefined (foo),
% #3 the target index (bar).
\def\dosynindex#1#2#3{%
% Only do \closeout if we haven't already done it, else we'll end up
% closing the target index.
\expandafter \ifx\csname donesynindex#2\endcsname \relax
% The \closeout helps reduce unnecessary open files; the limit on the
% Acorn RISC OS is a mere 16 files.
\expandafter\closeout\csname#2indfile\endcsname
\expandafter\let\csname donesynindex#2\endcsname = 1
\fi
% redefine \fooindfile:
\expandafter\let\expandafter\temp\expandafter=\csname#3indfile\endcsname
\expandafter\let\csname#2indfile\endcsname=\temp
% redefine \fooindex:
\expandafter\xdef\csname#2index\endcsname{\noexpand#1{#3}}%
}
% Define \doindex, the driver for all \fooindex macros.
% Argument #1 is generated by the calling \fooindex macro,
% and it is "foo", the name of the index.
% \doindex just uses \parsearg; it calls \doind for the actual work.
% This is because \doind is more useful to call from other macros.
% There is also \dosubind {index}{topic}{subtopic}
% which makes an entry in a two-level index such as the operation index.
\def\doindex#1{\edef\indexname{#1}\parsearg\singleindexer}
\def\singleindexer #1{\doind{\indexname}{#1}}
% like the previous two, but they put @code around the argument.
\def\docodeindex#1{\edef\indexname{#1}\parsearg\singlecodeindexer}
\def\singlecodeindexer #1{\doind{\indexname}{\code{#1}}}
% Take care of Texinfo commands that can appear in an index entry.
% Since there are some commands we want to expand, and others we don't,
% we have to laboriously prevent expansion for those that we don't.
%
\def\indexdummies{%
\escapechar = `\\ % use backslash in output files.
\def\@{@}% change to @@ when we switch to @ as escape char in index files.
\def\ {\realbackslash\space }%
%
% Need these in case \tex is in effect and \{ is a \delimiter again.
% But can't use \lbracecmd and \rbracecmd because texindex assumes
% braces and backslashes are used only as delimiters.
\let\{ = \mylbrace
\let\} = \myrbrace
%
% I don't entirely understand this, but when an index entry is
% generated from a macro call, the \endinput which \scanmacro inserts
% causes processing to be prematurely terminated. This is,
% apparently, because \indexsorttmp is fully expanded, and \endinput
% is an expandable command. The redefinition below makes \endinput
% disappear altogether for that purpose -- although logging shows that
% processing continues to some further point. On the other hand, it
% seems \endinput does not hurt in the printed index arg, since that
% is still getting written without apparent harm.
%
% Sample source (mac-idx3.tex, reported by Graham Percival to
% help-texinfo, 22may06):
% @macro funindex {WORD}
% @findex xyz
% @end macro
% ...
% @funindex commtest
%
% The above is not enough to reproduce the bug, but it gives the flavor.
%
% Sample whatsit resulting:
% .@write3{\entry{xyz}{@folio }{@code {xyz@endinput }}}
%
% So:
\let\endinput = \empty
%
% Do the redefinitions.
\commondummies
}
% For the aux and toc files, @ is the escape character. So we want to
% redefine everything using @ as the escape character (instead of
% \realbackslash, still used for index files). When everything uses @,
% this will be simpler.
%
\def\atdummies{%
\def\@{@@}%
\def\ {@ }%
\let\{ = \lbraceatcmd
\let\} = \rbraceatcmd
%
% Do the redefinitions.
\commondummies
\otherbackslash
}
% Called from \indexdummies and \atdummies.
%
\def\commondummies{%
%
% \definedummyword defines \#1 as \string\#1\space, thus effectively
% preventing its expansion. This is used only for control% words,
% not control letters, because the \space would be incorrect for
% control characters, but is needed to separate the control word
% from whatever follows.
%
% For control letters, we have \definedummyletter, which omits the
% space.
%
% These can be used both for control words that take an argument and
% those that do not. If it is followed by {arg} in the input, then
% that will dutifully get written to the index (or wherever).
%
\def\definedummyword ##1{\def##1{\string##1\space}}%
\def\definedummyletter##1{\def##1{\string##1}}%
\let\definedummyaccent\definedummyletter
%
\commondummiesnofonts
%
\definedummyletter\_%
%
% Non-English letters.
\definedummyword\AA
\definedummyword\AE
\definedummyword\L
\definedummyword\OE
\definedummyword\O
\definedummyword\aa
\definedummyword\ae
\definedummyword\l
\definedummyword\oe
\definedummyword\o
\definedummyword\ss
\definedummyword\exclamdown
\definedummyword\questiondown
\definedummyword\ordf
\definedummyword\ordm
%
% Although these internal commands shouldn't show up, sometimes they do.
\definedummyword\bf
\definedummyword\gtr
\definedummyword\hat
\definedummyword\less
\definedummyword\sf
\definedummyword\sl
\definedummyword\tclose
\definedummyword\tt
%
\definedummyword\LaTeX
\definedummyword\TeX
%
% Assorted special characters.
\definedummyword\bullet
\definedummyword\comma
\definedummyword\copyright
\definedummyword\registeredsymbol
\definedummyword\dots
\definedummyword\enddots
\definedummyword\equiv
\definedummyword\error
\definedummyword\euro
\definedummyword\guillemetleft
\definedummyword\guillemetright
\definedummyword\guilsinglleft
\definedummyword\guilsinglright
\definedummyword\expansion
\definedummyword\minus
\definedummyword\ogonek
\definedummyword\pounds
\definedummyword\point
\definedummyword\print
\definedummyword\quotedblbase
\definedummyword\quotedblleft
\definedummyword\quotedblright
\definedummyword\quoteleft
\definedummyword\quoteright
\definedummyword\quotesinglbase
\definedummyword\result
\definedummyword\textdegree
%
% We want to disable all macros so that they are not expanded by \write.
\macrolist
%
\normalturnoffactive
%
% Handle some cases of @value -- where it does not contain any
% (non-fully-expandable) commands.
\makevalueexpandable
}
% \commondummiesnofonts: common to \commondummies and \indexnofonts.
%
\def\commondummiesnofonts{%
% Control letters and accents.
\definedummyletter\!%
\definedummyaccent\"%
\definedummyaccent\'%
\definedummyletter\*%
\definedummyaccent\,%
\definedummyletter\.%
\definedummyletter\/%
\definedummyletter\:%
\definedummyaccent\=%
\definedummyletter\?%
\definedummyaccent\^%
\definedummyaccent\`%
\definedummyaccent\~%
\definedummyword\u
\definedummyword\v
\definedummyword\H
\definedummyword\dotaccent
\definedummyword\ogonek
\definedummyword\ringaccent
\definedummyword\tieaccent
\definedummyword\ubaraccent
\definedummyword\udotaccent
\definedummyword\dotless
%
% Texinfo font commands.
\definedummyword\b
\definedummyword\i
\definedummyword\r
\definedummyword\sc
\definedummyword\t
%
% Commands that take arguments.
\definedummyword\acronym
\definedummyword\cite
\definedummyword\code
\definedummyword\command
\definedummyword\dfn
\definedummyword\emph
\definedummyword\env
\definedummyword\file
\definedummyword\kbd
\definedummyword\key
\definedummyword\math
\definedummyword\option
\definedummyword\pxref
\definedummyword\ref
\definedummyword\samp
\definedummyword\strong
\definedummyword\tie
\definedummyword\uref
\definedummyword\url
\definedummyword\var
\definedummyword\verb
\definedummyword\w
\definedummyword\xref
}
% \indexnofonts is used when outputting the strings to sort the index
% by, and when constructing control sequence names. It eliminates all
% control sequences and just writes whatever the best ASCII sort string
% would be for a given command (usually its argument).
%
\def\indexnofonts{%
% Accent commands should become @asis.
\def\definedummyaccent##1{\let##1\asis}%
% We can just ignore other control letters.
\def\definedummyletter##1{\let##1\empty}%
% Hopefully, all control words can become @asis.
\let\definedummyword\definedummyaccent
%
\commondummiesnofonts
%
% Don't no-op \tt, since it isn't a user-level command
% and is used in the definitions of the active chars like <, >, |, etc.
% Likewise with the other plain tex font commands.
%\let\tt=\asis
%
\def\ { }%
\def\@{@}%
% how to handle braces?
\def\_{\normalunderscore}%
%
% Non-English letters.
\def\AA{AA}%
\def\AE{AE}%
\def\L{L}%
\def\OE{OE}%
\def\O{O}%
\def\aa{aa}%
\def\ae{ae}%
\def\l{l}%
\def\oe{oe}%
\def\o{o}%
\def\ss{ss}%
\def\exclamdown{!}%
\def\questiondown{?}%
\def\ordf{a}%
\def\ordm{o}%
%
\def\LaTeX{LaTeX}%
\def\TeX{TeX}%
%
% Assorted special characters.
% (The following {} will end up in the sort string, but that's ok.)
\def\bullet{bullet}%
\def\comma{,}%
\def\copyright{copyright}%
\def\registeredsymbol{R}%
\def\dots{...}%
\def\enddots{...}%
\def\equiv{==}%
\def\error{error}%
\def\euro{euro}%
\def\guillemetleft{<<}%
\def\guillemetright{>>}%
\def\guilsinglleft{<}%
\def\guilsinglright{>}%
\def\expansion{==>}%
\def\minus{-}%
\def\pounds{pounds}%
\def\point{.}%
\def\print{-|}%
\def\quotedblbase{"}%
\def\quotedblleft{"}%
\def\quotedblright{"}%
\def\quoteleft{`}%
\def\quoteright{'}%
\def\quotesinglbase{,}%
\def\result{=>}%
\def\textdegree{degrees}%
%
% We need to get rid of all macros, leaving only the arguments (if present).
% Of course this is not nearly correct, but it is the best we can do for now.
% makeinfo does not expand macros in the argument to @deffn, which ends up
% writing an index entry, and texindex isn't prepared for an index sort entry
% that starts with \.
%
% Since macro invocations are followed by braces, we can just redefine them
% to take a single TeX argument. The case of a macro invocation that
% goes to end-of-line is not handled.
%
\macrolist
}
\let\indexbackslash=0 %overridden during \printindex.
\let\SETmarginindex=\relax % put index entries in margin (undocumented)?
% Most index entries go through here, but \dosubind is the general case.
% #1 is the index name, #2 is the entry text.
\def\doind#1#2{\dosubind{#1}{#2}{}}
% Workhorse for all \fooindexes.
% #1 is name of index, #2 is stuff to put there, #3 is subentry --
% empty if called from \doind, as we usually are (the main exception
% is with most defuns, which call us directly).
%
\def\dosubind#1#2#3{%
\iflinks
{%
% Store the main index entry text (including the third arg).
\toks0 = {#2}%
% If third arg is present, precede it with a space.
\def\thirdarg{#3}%
\ifx\thirdarg\empty \else
\toks0 = \expandafter{\the\toks0 \space #3}%
\fi
%
\edef\writeto{\csname#1indfile\endcsname}%
%
\safewhatsit\dosubindwrite
}%
\fi
}
% Write the entry in \toks0 to the index file:
%
\def\dosubindwrite{%
% Put the index entry in the margin if desired.
\ifx\SETmarginindex\relax\else
\insert\margin{\hbox{\vrule height8pt depth3pt width0pt \the\toks0}}%
\fi
%
% Remember, we are within a group.
\indexdummies % Must do this here, since \bf, etc expand at this stage
\def\backslashcurfont{\indexbackslash}% \indexbackslash isn't defined now
% so it will be output as is; and it will print as backslash.
%
% Process the index entry with all font commands turned off, to
% get the string to sort by.
{\indexnofonts
\edef\temp{\the\toks0}% need full expansion
\xdef\indexsorttmp{\temp}%
}%
%
% Set up the complete index entry, with both the sort key and
% the original text, including any font commands. We write
% three arguments to \entry to the .?? file (four in the
% subentry case), texindex reduces to two when writing the .??s
% sorted result.
\edef\temp{%
\write\writeto{%
\string\entry{\indexsorttmp}{\noexpand\folio}{\the\toks0}}%
}%
\temp
}
% Take care of unwanted page breaks/skips around a whatsit:
%
% If a skip is the last thing on the list now, preserve it
% by backing up by \lastskip, doing the \write, then inserting
% the skip again. Otherwise, the whatsit generated by the
% \write or \pdfdest will make \lastskip zero. The result is that
% sequences like this:
% @end defun
% @tindex whatever
% @defun ...
% will have extra space inserted, because the \medbreak in the
% start of the @defun won't see the skip inserted by the @end of
% the previous defun.
%
% But don't do any of this if we're not in vertical mode. We
% don't want to do a \vskip and prematurely end a paragraph.
%
% Avoid page breaks due to these extra skips, too.
%
% But wait, there is a catch there:
% We'll have to check whether \lastskip is zero skip. \ifdim is not
% sufficient for this purpose, as it ignores stretch and shrink parts
% of the skip. The only way seems to be to check the textual
% representation of the skip.
%
% The following is almost like \def\zeroskipmacro{0.0pt} except that
% the ``p'' and ``t'' characters have catcode \other, not 11 (letter).
%
\edef\zeroskipmacro{\expandafter\the\csname z@skip\endcsname}
%
\newskip\whatsitskip
\newcount\whatsitpenalty
%
% ..., ready, GO:
%
\def\safewhatsit#1{%
\ifhmode
#1%
\else
% \lastskip and \lastpenalty cannot both be nonzero simultaneously.
\whatsitskip = \lastskip
\edef\lastskipmacro{\the\lastskip}%
\whatsitpenalty = \lastpenalty
%
% If \lastskip is nonzero, that means the last item was a
% skip. And since a skip is discardable, that means this
% -\whatsitskip glue we're inserting is preceded by a
% non-discardable item, therefore it is not a potential
% breakpoint, therefore no \nobreak needed.
\ifx\lastskipmacro\zeroskipmacro
\else
\vskip-\whatsitskip
\fi
%
#1%
%
\ifx\lastskipmacro\zeroskipmacro
% If \lastskip was zero, perhaps the last item was a penalty, and
% perhaps it was >=10000, e.g., a \nobreak. In that case, we want
% to re-insert the same penalty (values >10000 are used for various
% signals); since we just inserted a non-discardable item, any
% following glue (such as a \parskip) would be a breakpoint. For example:
%
% @deffn deffn-whatever
% @vindex index-whatever
% Description.
% would allow a break between the index-whatever whatsit
% and the "Description." paragraph.
\ifnum\whatsitpenalty>9999 \penalty\whatsitpenalty \fi
\else
% On the other hand, if we had a nonzero \lastskip,
% this make-up glue would be preceded by a non-discardable item
% (the whatsit from the \write), so we must insert a \nobreak.
\nobreak\vskip\whatsitskip
\fi
\fi
}
% The index entry written in the file actually looks like
% \entry {sortstring}{page}{topic}
% or
% \entry {sortstring}{page}{topic}{subtopic}
% The texindex program reads in these files and writes files
% containing these kinds of lines:
% \initial {c}
% before the first topic whose initial is c
% \entry {topic}{pagelist}
% for a topic that is used without subtopics
% \primary {topic}
% for the beginning of a topic that is used with subtopics
% \secondary {subtopic}{pagelist}
% for each subtopic.
% Define the user-accessible indexing commands
% @findex, @vindex, @kindex, @cindex.
\def\findex {\fnindex}
\def\kindex {\kyindex}
\def\cindex {\cpindex}
\def\vindex {\vrindex}
\def\tindex {\tpindex}
\def\pindex {\pgindex}
\def\cindexsub {\begingroup\obeylines\cindexsub}
{\obeylines %
\gdef\cindexsub "#1" #2^^M{\endgroup %
\dosubind{cp}{#2}{#1}}}
% Define the macros used in formatting output of the sorted index material.
% @printindex causes a particular index (the ??s file) to get printed.
% It does not print any chapter heading (usually an @unnumbered).
%
\parseargdef\printindex{\begingroup
\dobreak \chapheadingskip{10000}%
%
\smallfonts \rm
\tolerance = 9500
\plainfrenchspacing
\everypar = {}% don't want the \kern\-parindent from indentation suppression.
%
% See if the index file exists and is nonempty.
% Change catcode of @ here so that if the index file contains
% \initial {@}
% as its first line, TeX doesn't complain about mismatched braces
% (because it thinks @} is a control sequence).
\catcode`\@ = 11
\openin 1 \jobname.#1s
\ifeof 1
% \enddoublecolumns gets confused if there is no text in the index,
% and it loses the chapter title and the aux file entries for the
% index. The easiest way to prevent this problem is to make sure
% there is some text.
\putwordIndexNonexistent
\else
%
% If the index file exists but is empty, then \openin leaves \ifeof
% false. We have to make TeX try to read something from the file, so
% it can discover if there is anything in it.
\read 1 to \temp
\ifeof 1
\putwordIndexIsEmpty
\else
% Index files are almost Texinfo source, but we use \ as the escape
% character. It would be better to use @, but that's too big a change
% to make right now.
\def\indexbackslash{\backslashcurfont}%
\catcode`\\ = 0
\escapechar = `\\
\begindoublecolumns
\input \jobname.#1s
\enddoublecolumns
\fi
\fi
\closein 1
\endgroup}
% These macros are used by the sorted index file itself.
% Change them to control the appearance of the index.
\def\initial#1{{%
% Some minor font changes for the special characters.
\let\tentt=\sectt \let\tt=\sectt \let\sf=\sectt
%
% Remove any glue we may have, we'll be inserting our own.
\removelastskip
%
% We like breaks before the index initials, so insert a bonus.
\nobreak
\vskip 0pt plus 3\baselineskip
\penalty 0
\vskip 0pt plus -3\baselineskip
%
% Typeset the initial. Making this add up to a whole number of
% baselineskips increases the chance of the dots lining up from column
% to column. It still won't often be perfect, because of the stretch
% we need before each entry, but it's better.
%
% No shrink because it confuses \balancecolumns.
\vskip 1.67\baselineskip plus .5\baselineskip
\leftline{\secbf #1}%
% Do our best not to break after the initial.
\nobreak
\vskip .33\baselineskip plus .1\baselineskip
}}
% \entry typesets a paragraph consisting of the text (#1), dot leaders, and
% then page number (#2) flushed to the right margin. It is used for index
% and table of contents entries. The paragraph is indented by \leftskip.
%
% A straightforward implementation would start like this:
% \def\entry#1#2{...
% But this freezes the catcodes in the argument, and can cause problems to
% @code, which sets - active. This problem was fixed by a kludge---
% ``-'' was active throughout whole index, but this isn't really right.
%
% The right solution is to prevent \entry from swallowing the whole text.
% --kasal, 21nov03
\def\entry{%
\begingroup
%
% Start a new paragraph if necessary, so our assignments below can't
% affect previous text.
\par
%
% Do not fill out the last line with white space.
\parfillskip = 0in
%
% No extra space above this paragraph.
\parskip = 0in
%
% Do not prefer a separate line ending with a hyphen to fewer lines.
\finalhyphendemerits = 0
%
% \hangindent is only relevant when the entry text and page number
% don't both fit on one line. In that case, bob suggests starting the
% dots pretty far over on the line. Unfortunately, a large
% indentation looks wrong when the entry text itself is broken across
% lines. So we use a small indentation and put up with long leaders.
%
% \hangafter is reset to 1 (which is the value we want) at the start
% of each paragraph, so we need not do anything with that.
\hangindent = 2em
%
% When the entry text needs to be broken, just fill out the first line
% with blank space.
\rightskip = 0pt plus1fil
%
% A bit of stretch before each entry for the benefit of balancing
% columns.
\vskip 0pt plus1pt
%
% Swallow the left brace of the text (first parameter):
\afterassignment\doentry
\let\temp =
}
\def\doentry{%
\bgroup % Instead of the swallowed brace.
\noindent
\aftergroup\finishentry
% And now comes the text of the entry.
}
\def\finishentry#1{%
% #1 is the page number.
%
% The following is kludged to not output a line of dots in the index if
% there are no page numbers. The next person who breaks this will be
% cursed by a Unix daemon.
\setbox\boxA = \hbox{#1}%
\ifdim\wd\boxA = 0pt
\ %
\else
%
% If we must, put the page number on a line of its own, and fill out
% this line with blank space. (The \hfil is overwhelmed with the
% fill leaders glue in \indexdotfill if the page number does fit.)
\hfil\penalty50
\null\nobreak\indexdotfill % Have leaders before the page number.
%
% The `\ ' here is removed by the implicit \unskip that TeX does as
% part of (the primitive) \par. Without it, a spurious underfull
% \hbox ensues.
\ifpdf
\pdfgettoks#1.%
\ \the\toksA
\else
\ #1%
\fi
\fi
\par
\endgroup
}
% Like plain.tex's \dotfill, except uses up at least 1 em.
\def\indexdotfill{\cleaders
\hbox{$\mathsurround=0pt \mkern1.5mu.\mkern1.5mu$}\hskip 1em plus 1fill}
\def\primary #1{\line{#1\hfil}}
\newskip\secondaryindent \secondaryindent=0.5cm
\def\secondary#1#2{{%
\parfillskip=0in
\parskip=0in
\hangindent=1in
\hangafter=1
\noindent\hskip\secondaryindent\hbox{#1}\indexdotfill
\ifpdf
\pdfgettoks#2.\ \the\toksA % The page number ends the paragraph.
\else
#2
\fi
\par
}}
% Define two-column mode, which we use to typeset indexes.
% Adapted from the TeXbook, page 416, which is to say,
% the manmac.tex format used to print the TeXbook itself.
\catcode`\@=11
\newbox\partialpage
\newdimen\doublecolumnhsize
\def\begindoublecolumns{\begingroup % ended by \enddoublecolumns
% Grab any single-column material above us.
\output = {%
%
% Here is a possibility not foreseen in manmac: if we accumulate a
% whole lot of material, we might end up calling this \output
% routine twice in a row (see the doublecol-lose test, which is
% essentially a couple of indexes with @setchapternewpage off). In
% that case we just ship out what is in \partialpage with the normal
% output routine. Generally, \partialpage will be empty when this
% runs and this will be a no-op. See the indexspread.tex test case.
\ifvoid\partialpage \else
\onepageout{\pagecontents\partialpage}%
\fi
%
\global\setbox\partialpage = \vbox{%
% Unvbox the main output page.
\unvbox\PAGE
\kern-\topskip \kern\baselineskip
}%
}%
\eject % run that output routine to set \partialpage
%
% Use the double-column output routine for subsequent pages.
\output = {\doublecolumnout}%
%
% Change the page size parameters. We could do this once outside this
% routine, in each of @smallbook, @afourpaper, and the default 8.5x11
% format, but then we repeat the same computation. Repeating a couple
% of assignments once per index is clearly meaningless for the
% execution time, so we may as well do it in one place.
%
% First we halve the line length, less a little for the gutter between
% the columns. We compute the gutter based on the line length, so it
% changes automatically with the paper format. The magic constant
% below is chosen so that the gutter has the same value (well, +-<1pt)
% as it did when we hard-coded it.
%
% We put the result in a separate register, \doublecolumhsize, so we
% can restore it in \pagesofar, after \hsize itself has (potentially)
% been clobbered.
%
\doublecolumnhsize = \hsize
\advance\doublecolumnhsize by -.04154\hsize
\divide\doublecolumnhsize by 2
\hsize = \doublecolumnhsize
%
% Double the \vsize as well. (We don't need a separate register here,
% since nobody clobbers \vsize.)
\vsize = 2\vsize
}
% The double-column output routine for all double-column pages except
% the last.
%
\def\doublecolumnout{%
\splittopskip=\topskip \splitmaxdepth=\maxdepth
% Get the available space for the double columns -- the normal
% (undoubled) page height minus any material left over from the
% previous page.
\dimen@ = \vsize
\divide\dimen@ by 2
\advance\dimen@ by -\ht\partialpage
%
% box0 will be the left-hand column, box2 the right.
\setbox0=\vsplit255 to\dimen@ \setbox2=\vsplit255 to\dimen@
\onepageout\pagesofar
\unvbox255
\penalty\outputpenalty
}
%
% Re-output the contents of the output page -- any previous material,
% followed by the two boxes we just split, in box0 and box2.
\def\pagesofar{%
\unvbox\partialpage
%
\hsize = \doublecolumnhsize
\wd0=\hsize \wd2=\hsize
\hbox to\pagewidth{\box0\hfil\box2}%
}
%
% All done with double columns.
\def\enddoublecolumns{%
% The following penalty ensures that the page builder is exercised
% _before_ we change the output routine. This is necessary in the
% following situation:
%
% The last section of the index consists only of a single entry.
% Before this section, \pagetotal is less than \pagegoal, so no
% break occurs before the last section starts. However, the last
% section, consisting of \initial and the single \entry, does not
% fit on the page and has to be broken off. Without the following
% penalty the page builder will not be exercised until \eject
% below, and by that time we'll already have changed the output
% routine to the \balancecolumns version, so the next-to-last
% double-column page will be processed with \balancecolumns, which
% is wrong: The two columns will go to the main vertical list, with
% the broken-off section in the recent contributions. As soon as
% the output routine finishes, TeX starts reconsidering the page
% break. The two columns and the broken-off section both fit on the
% page, because the two columns now take up only half of the page
% goal. When TeX sees \eject from below which follows the final
% section, it invokes the new output routine that we've set after
% \balancecolumns below; \onepageout will try to fit the two columns
% and the final section into the vbox of \pageheight (see
% \pagebody), causing an overfull box.
%
% Note that glue won't work here, because glue does not exercise the
% page builder, unlike penalties (see The TeXbook, pp. 280-281).
\penalty0
%
\output = {%
% Split the last of the double-column material. Leave it on the
% current page, no automatic page break.
\balancecolumns
%
% If we end up splitting too much material for the current page,
% though, there will be another page break right after this \output
% invocation ends. Having called \balancecolumns once, we do not
% want to call it again. Therefore, reset \output to its normal
% definition right away. (We hope \balancecolumns will never be
% called on to balance too much material, but if it is, this makes
% the output somewhat more palatable.)
\global\output = {\onepageout{\pagecontents\PAGE}}%
}%
\eject
\endgroup % started in \begindoublecolumns
%
% \pagegoal was set to the doubled \vsize above, since we restarted
% the current page. We're now back to normal single-column
% typesetting, so reset \pagegoal to the normal \vsize (after the
% \endgroup where \vsize got restored).
\pagegoal = \vsize
}
%
% Called at the end of the double column material.
\def\balancecolumns{%
\setbox0 = \vbox{\unvbox255}% like \box255 but more efficient, see p.120.
\dimen@ = \ht0
\advance\dimen@ by \topskip
\advance\dimen@ by-\baselineskip
\divide\dimen@ by 2 % target to split to
%debug\message{final 2-column material height=\the\ht0, target=\the\dimen@.}%
\splittopskip = \topskip
% Loop until we get a decent breakpoint.
{%
\vbadness = 10000
\loop
\global\setbox3 = \copy0
\global\setbox1 = \vsplit3 to \dimen@
\ifdim\ht3>\dimen@
\global\advance\dimen@ by 1pt
\repeat
}%
%debug\message{split to \the\dimen@, column heights: \the\ht1, \the\ht3.}%
\setbox0=\vbox to\dimen@{\unvbox1}%
\setbox2=\vbox to\dimen@{\unvbox3}%
%
\pagesofar
}
\catcode`\@ = \other
\message{sectioning,}
% Chapters, sections, etc.
% \unnumberedno is an oxymoron, of course. But we count the unnumbered
% sections so that we can refer to them unambiguously in the pdf
% outlines by their "section number". We avoid collisions with chapter
% numbers by starting them at 10000. (If a document ever has 10000
% chapters, we're in trouble anyway, I'm sure.)
\newcount\unnumberedno \unnumberedno = 10000
\newcount\chapno
\newcount\secno \secno=0
\newcount\subsecno \subsecno=0
\newcount\subsubsecno \subsubsecno=0
% This counter is funny since it counts through charcodes of letters A, B, ...
\newcount\appendixno \appendixno = `\@
%
% \def\appendixletter{\char\the\appendixno}
% We do the following ugly conditional instead of the above simple
% construct for the sake of pdftex, which needs the actual
% letter in the expansion, not just typeset.
%
\def\appendixletter{%
\ifnum\appendixno=`A A%
\else\ifnum\appendixno=`B B%
\else\ifnum\appendixno=`C C%
\else\ifnum\appendixno=`D D%
\else\ifnum\appendixno=`E E%
\else\ifnum\appendixno=`F F%
\else\ifnum\appendixno=`G G%
\else\ifnum\appendixno=`H H%
\else\ifnum\appendixno=`I I%
\else\ifnum\appendixno=`J J%
\else\ifnum\appendixno=`K K%
\else\ifnum\appendixno=`L L%
\else\ifnum\appendixno=`M M%
\else\ifnum\appendixno=`N N%
\else\ifnum\appendixno=`O O%
\else\ifnum\appendixno=`P P%
\else\ifnum\appendixno=`Q Q%
\else\ifnum\appendixno=`R R%
\else\ifnum\appendixno=`S S%
\else\ifnum\appendixno=`T T%
\else\ifnum\appendixno=`U U%
\else\ifnum\appendixno=`V V%
\else\ifnum\appendixno=`W W%
\else\ifnum\appendixno=`X X%
\else\ifnum\appendixno=`Y Y%
\else\ifnum\appendixno=`Z Z%
% The \the is necessary, despite appearances, because \appendixletter is
% expanded while writing the .toc file. \char\appendixno is not
% expandable, thus it is written literally, thus all appendixes come out
% with the same letter (or @) in the toc without it.
\else\char\the\appendixno
\fi\fi\fi\fi\fi\fi\fi\fi\fi\fi\fi\fi\fi
\fi\fi\fi\fi\fi\fi\fi\fi\fi\fi\fi\fi\fi}
% Each @chapter defines these (using marks) as the number+name, number
% and name of the chapter. Page headings and footings can use
% these. @section does likewise.
\def\thischapter{}
\def\thischapternum{}
\def\thischaptername{}
\def\thissection{}
\def\thissectionnum{}
\def\thissectionname{}
\newcount\absseclevel % used to calculate proper heading level
\newcount\secbase\secbase=0 % @raisesections/@lowersections modify this count
% @raisesections: treat @section as chapter, @subsection as section, etc.
\def\raisesections{\global\advance\secbase by -1}
\let\up=\raisesections % original BFox name
% @lowersections: treat @chapter as section, @section as subsection, etc.
\def\lowersections{\global\advance\secbase by 1}
\let\down=\lowersections % original BFox name
% we only have subsub.
\chardef\maxseclevel = 3
%
% A numbered section within an unnumbered changes to unnumbered too.
% To achive this, remember the "biggest" unnum. sec. we are currently in:
\chardef\unmlevel = \maxseclevel
%
% Trace whether the current chapter is an appendix or not:
% \chapheadtype is "N" or "A", unnumbered chapters are ignored.
\def\chapheadtype{N}
% Choose a heading macro
% #1 is heading type
% #2 is heading level
% #3 is text for heading
\def\genhead#1#2#3{%
% Compute the abs. sec. level:
\absseclevel=#2
\advance\absseclevel by \secbase
% Make sure \absseclevel doesn't fall outside the range:
\ifnum \absseclevel < 0
\absseclevel = 0
\else
\ifnum \absseclevel > 3
\absseclevel = 3
\fi
\fi
% The heading type:
\def\headtype{#1}%
\if \headtype U%
\ifnum \absseclevel < \unmlevel
\chardef\unmlevel = \absseclevel
\fi
\else
% Check for appendix sections:
\ifnum \absseclevel = 0
\edef\chapheadtype{\headtype}%
\else
\if \headtype A\if \chapheadtype N%
\errmessage{@appendix... within a non-appendix chapter}%
\fi\fi
\fi
% Check for numbered within unnumbered:
\ifnum \absseclevel > \unmlevel
\def\headtype{U}%
\else
\chardef\unmlevel = 3
\fi
\fi
% Now print the heading:
\if \headtype U%
\ifcase\absseclevel
\unnumberedzzz{#3}%
\or \unnumberedseczzz{#3}%
\or \unnumberedsubseczzz{#3}%
\or \unnumberedsubsubseczzz{#3}%
\fi
\else
\if \headtype A%
\ifcase\absseclevel
\appendixzzz{#3}%
\or \appendixsectionzzz{#3}%
\or \appendixsubseczzz{#3}%
\or \appendixsubsubseczzz{#3}%
\fi
\else
\ifcase\absseclevel
\chapterzzz{#3}%
\or \seczzz{#3}%
\or \numberedsubseczzz{#3}%
\or \numberedsubsubseczzz{#3}%
\fi
\fi
\fi
\suppressfirstparagraphindent
}
% an interface:
\def\numhead{\genhead N}
\def\apphead{\genhead A}
\def\unnmhead{\genhead U}
% @chapter, @appendix, @unnumbered. Increment top-level counter, reset
% all lower-level sectioning counters to zero.
%
% Also set \chaplevelprefix, which we prepend to @float sequence numbers
% (e.g., figures), q.v. By default (before any chapter), that is empty.
\let\chaplevelprefix = \empty
%
\outer\parseargdef\chapter{\numhead0{#1}} % normally numhead0 calls chapterzzz
\def\chapterzzz#1{%
% section resetting is \global in case the chapter is in a group, such
% as an @include file.
\global\secno=0 \global\subsecno=0 \global\subsubsecno=0
\global\advance\chapno by 1
%
% Used for \float.
\gdef\chaplevelprefix{\the\chapno.}%
\resetallfloatnos
%
\message{\putwordChapter\space \the\chapno}%
%
% Write the actual heading.
\chapmacro{#1}{Ynumbered}{\the\chapno}%
%
% So @section and the like are numbered underneath this chapter.
\global\let\section = \numberedsec
\global\let\subsection = \numberedsubsec
\global\let\subsubsection = \numberedsubsubsec
}
\outer\parseargdef\appendix{\apphead0{#1}} % normally apphead0 calls appendixzzz
\def\appendixzzz#1{%
\global\secno=0 \global\subsecno=0 \global\subsubsecno=0
\global\advance\appendixno by 1
\gdef\chaplevelprefix{\appendixletter.}%
\resetallfloatnos
%
\def\appendixnum{\putwordAppendix\space \appendixletter}%
\message{\appendixnum}%
%
\chapmacro{#1}{Yappendix}{\appendixletter}%
%
\global\let\section = \appendixsec
\global\let\subsection = \appendixsubsec
\global\let\subsubsection = \appendixsubsubsec
}
\outer\parseargdef\unnumbered{\unnmhead0{#1}} % normally unnmhead0 calls unnumberedzzz
\def\unnumberedzzz#1{%
\global\secno=0 \global\subsecno=0 \global\subsubsecno=0
\global\advance\unnumberedno by 1
%
% Since an unnumbered has no number, no prefix for figures.
\global\let\chaplevelprefix = \empty
\resetallfloatnos
%
% This used to be simply \message{#1}, but TeX fully expands the
% argument to \message. Therefore, if #1 contained @-commands, TeX
% expanded them. For example, in `@unnumbered The @cite{Book}', TeX
% expanded @cite (which turns out to cause errors because \cite is meant
% to be executed, not expanded).
%
% Anyway, we don't want the fully-expanded definition of @cite to appear
% as a result of the \message, we just want `@cite' itself. We use
% \the to achieve this: TeX expands \the only once,
% simply yielding the contents of . (We also do this for
% the toc entries.)
\toks0 = {#1}%
\message{(\the\toks0)}%
%
\chapmacro{#1}{Ynothing}{\the\unnumberedno}%
%
\global\let\section = \unnumberedsec
\global\let\subsection = \unnumberedsubsec
\global\let\subsubsection = \unnumberedsubsubsec
}
% @centerchap is like @unnumbered, but the heading is centered.
\outer\parseargdef\centerchap{%
% Well, we could do the following in a group, but that would break
% an assumption that \chapmacro is called at the outermost level.
% Thus we are safer this way: --kasal, 24feb04
\let\centerparametersmaybe = \centerparameters
\unnmhead0{#1}%
\let\centerparametersmaybe = \relax
}
% @top is like @unnumbered.
\let\top\unnumbered
% Sections.
\outer\parseargdef\numberedsec{\numhead1{#1}} % normally calls seczzz
\def\seczzz#1{%
\global\subsecno=0 \global\subsubsecno=0 \global\advance\secno by 1
\sectionheading{#1}{sec}{Ynumbered}{\the\chapno.\the\secno}%
}
\outer\parseargdef\appendixsection{\apphead1{#1}} % normally calls appendixsectionzzz
\def\appendixsectionzzz#1{%
\global\subsecno=0 \global\subsubsecno=0 \global\advance\secno by 1
\sectionheading{#1}{sec}{Yappendix}{\appendixletter.\the\secno}%
}
\let\appendixsec\appendixsection
\outer\parseargdef\unnumberedsec{\unnmhead1{#1}} % normally calls unnumberedseczzz
\def\unnumberedseczzz#1{%
\global\subsecno=0 \global\subsubsecno=0 \global\advance\secno by 1
\sectionheading{#1}{sec}{Ynothing}{\the\unnumberedno.\the\secno}%
}
% Subsections.
\outer\parseargdef\numberedsubsec{\numhead2{#1}} % normally calls numberedsubseczzz
\def\numberedsubseczzz#1{%
\global\subsubsecno=0 \global\advance\subsecno by 1
\sectionheading{#1}{subsec}{Ynumbered}{\the\chapno.\the\secno.\the\subsecno}%
}
\outer\parseargdef\appendixsubsec{\apphead2{#1}} % normally calls appendixsubseczzz
\def\appendixsubseczzz#1{%
\global\subsubsecno=0 \global\advance\subsecno by 1
\sectionheading{#1}{subsec}{Yappendix}%
{\appendixletter.\the\secno.\the\subsecno}%
}
\outer\parseargdef\unnumberedsubsec{\unnmhead2{#1}} %normally calls unnumberedsubseczzz
\def\unnumberedsubseczzz#1{%
\global\subsubsecno=0 \global\advance\subsecno by 1
\sectionheading{#1}{subsec}{Ynothing}%
{\the\unnumberedno.\the\secno.\the\subsecno}%
}
% Subsubsections.
\outer\parseargdef\numberedsubsubsec{\numhead3{#1}} % normally numberedsubsubseczzz
\def\numberedsubsubseczzz#1{%
\global\advance\subsubsecno by 1
\sectionheading{#1}{subsubsec}{Ynumbered}%
{\the\chapno.\the\secno.\the\subsecno.\the\subsubsecno}%
}
\outer\parseargdef\appendixsubsubsec{\apphead3{#1}} % normally appendixsubsubseczzz
\def\appendixsubsubseczzz#1{%
\global\advance\subsubsecno by 1
\sectionheading{#1}{subsubsec}{Yappendix}%
{\appendixletter.\the\secno.\the\subsecno.\the\subsubsecno}%
}
\outer\parseargdef\unnumberedsubsubsec{\unnmhead3{#1}} %normally unnumberedsubsubseczzz
\def\unnumberedsubsubseczzz#1{%
\global\advance\subsubsecno by 1
\sectionheading{#1}{subsubsec}{Ynothing}%
{\the\unnumberedno.\the\secno.\the\subsecno.\the\subsubsecno}%
}
% These macros control what the section commands do, according
% to what kind of chapter we are in (ordinary, appendix, or unnumbered).
% Define them by default for a numbered chapter.
\let\section = \numberedsec
\let\subsection = \numberedsubsec
\let\subsubsection = \numberedsubsubsec
% Define @majorheading, @heading and @subheading
% NOTE on use of \vbox for chapter headings, section headings, and such:
% 1) We use \vbox rather than the earlier \line to permit
% overlong headings to fold.
% 2) \hyphenpenalty is set to 10000 because hyphenation in a
% heading is obnoxious; this forbids it.
% 3) Likewise, headings look best if no \parindent is used, and
% if justification is not attempted. Hence \raggedright.
\def\majorheading{%
{\advance\chapheadingskip by 10pt \chapbreak }%
\parsearg\chapheadingzzz
}
\def\chapheading{\chapbreak \parsearg\chapheadingzzz}
\def\chapheadingzzz#1{%
{\chapfonts \vbox{\hyphenpenalty=10000\tolerance=5000
\parindent=0pt\raggedright
\rmisbold #1\hfill}}%
\bigskip \par\penalty 200\relax
\suppressfirstparagraphindent
}
% @heading, @subheading, @subsubheading.
\parseargdef\heading{\sectionheading{#1}{sec}{Yomitfromtoc}{}
\suppressfirstparagraphindent}
\parseargdef\subheading{\sectionheading{#1}{subsec}{Yomitfromtoc}{}
\suppressfirstparagraphindent}
\parseargdef\subsubheading{\sectionheading{#1}{subsubsec}{Yomitfromtoc}{}
\suppressfirstparagraphindent}
% These macros generate a chapter, section, etc. heading only
% (including whitespace, linebreaking, etc. around it),
% given all the information in convenient, parsed form.
%%% Args are the skip and penalty (usually negative)
\def\dobreak#1#2{\par\ifdim\lastskip<#1\removelastskip\penalty#2\vskip#1\fi}
%%% Define plain chapter starts, and page on/off switching for it
% Parameter controlling skip before chapter headings (if needed)
\newskip\chapheadingskip
\def\chapbreak{\dobreak \chapheadingskip {-4000}}
\def\chappager{\par\vfill\supereject}
% Because \domark is called before \chapoddpage, the filler page will
% get the headings for the next chapter, which is wrong. But we don't
% care -- we just disable all headings on the filler page.
\def\chapoddpage{%
\chappager
\ifodd\pageno \else
\begingroup
\evenheadline={\hfil}\evenfootline={\hfil}%
\oddheadline={\hfil}\oddfootline={\hfil}%
\hbox to 0pt{}%
\chappager
\endgroup
\fi
}
\def\setchapternewpage #1 {\csname CHAPPAG#1\endcsname}
\def\CHAPPAGoff{%
\global\let\contentsalignmacro = \chappager
\global\let\pchapsepmacro=\chapbreak
\global\let\pagealignmacro=\chappager}
\def\CHAPPAGon{%
\global\let\contentsalignmacro = \chappager
\global\let\pchapsepmacro=\chappager
\global\let\pagealignmacro=\chappager
\global\def\HEADINGSon{\HEADINGSsingle}}
\def\CHAPPAGodd{%
\global\let\contentsalignmacro = \chapoddpage
\global\let\pchapsepmacro=\chapoddpage
\global\let\pagealignmacro=\chapoddpage
\global\def\HEADINGSon{\HEADINGSdouble}}
\CHAPPAGon
% Chapter opening.
%
% #1 is the text, #2 is the section type (Ynumbered, Ynothing,
% Yappendix, Yomitfromtoc), #3 the chapter number.
%
% To test against our argument.
\def\Ynothingkeyword{Ynothing}
\def\Yomitfromtockeyword{Yomitfromtoc}
\def\Yappendixkeyword{Yappendix}
%
\def\chapmacro#1#2#3{%
% Insert the first mark before the heading break (see notes for \domark).
\let\prevchapterdefs=\lastchapterdefs
\let\prevsectiondefs=\lastsectiondefs
\gdef\lastsectiondefs{\gdef\thissectionname{}\gdef\thissectionnum{}%
\gdef\thissection{}}%
%
\def\temptype{#2}%
\ifx\temptype\Ynothingkeyword
\gdef\lastchapterdefs{\gdef\thischaptername{#1}\gdef\thischapternum{}%
\gdef\thischapter{\thischaptername}}%
\else\ifx\temptype\Yomitfromtockeyword
\gdef\lastchapterdefs{\gdef\thischaptername{#1}\gdef\thischapternum{}%
\gdef\thischapter{}}%
\else\ifx\temptype\Yappendixkeyword
\toks0={#1}%
\xdef\lastchapterdefs{%
\gdef\noexpand\thischaptername{\the\toks0}%
\gdef\noexpand\thischapternum{\appendixletter}%
\gdef\noexpand\thischapter{\putwordAppendix{} \noexpand\thischapternum:
\noexpand\thischaptername}%
}%
\else
\toks0={#1}%
\xdef\lastchapterdefs{%
\gdef\noexpand\thischaptername{\the\toks0}%
\gdef\noexpand\thischapternum{\the\chapno}%
\gdef\noexpand\thischapter{\putwordChapter{} \noexpand\thischapternum:
\noexpand\thischaptername}%
}%
\fi\fi\fi
%
% Output the mark. Pass it through \safewhatsit, to take care of
% the preceding space.
\safewhatsit\domark
%
% Insert the chapter heading break.
\pchapsepmacro
%
% Now the second mark, after the heading break. No break points
% between here and the heading.
\let\prevchapterdefs=\lastchapterdefs
\let\prevsectiondefs=\lastsectiondefs
\domark
%
{%
\chapfonts \rmisbold
%
% Have to define \lastsection before calling \donoderef, because the
% xref code eventually uses it. On the other hand, it has to be called
% after \pchapsepmacro, or the headline will change too soon.
\gdef\lastsection{#1}%
%
% Only insert the separating space if we have a chapter/appendix
% number, and don't print the unnumbered ``number''.
\ifx\temptype\Ynothingkeyword
\setbox0 = \hbox{}%
\def\toctype{unnchap}%
\else\ifx\temptype\Yomitfromtockeyword
\setbox0 = \hbox{}% contents like unnumbered, but no toc entry
\def\toctype{omit}%
\else\ifx\temptype\Yappendixkeyword
\setbox0 = \hbox{\putwordAppendix{} #3\enspace}%
\def\toctype{app}%
\else
\setbox0 = \hbox{#3\enspace}%
\def\toctype{numchap}%
\fi\fi\fi
%
% Write the toc entry for this chapter. Must come before the
% \donoderef, because we include the current node name in the toc
% entry, and \donoderef resets it to empty.
\writetocentry{\toctype}{#1}{#3}%
%
% For pdftex, we have to write out the node definition (aka, make
% the pdfdest) after any page break, but before the actual text has
% been typeset. If the destination for the pdf outline is after the
% text, then jumping from the outline may wind up with the text not
% being visible, for instance under high magnification.
\donoderef{#2}%
%
% Typeset the actual heading.
\nobreak % Avoid page breaks at the interline glue.
\vbox{\hyphenpenalty=10000 \tolerance=5000 \parindent=0pt \raggedright
\hangindent=\wd0 \centerparametersmaybe
\unhbox0 #1\par}%
}%
\nobreak\bigskip % no page break after a chapter title
\nobreak
}
% @centerchap -- centered and unnumbered.
\let\centerparametersmaybe = \relax
\def\centerparameters{%
\advance\rightskip by 3\rightskip
\leftskip = \rightskip
\parfillskip = 0pt
}
% I don't think this chapter style is supported any more, so I'm not
% updating it with the new noderef stuff. We'll see. --karl, 11aug03.
%
\def\setchapterstyle #1 {\csname CHAPF#1\endcsname}
%
\def\unnchfopen #1{%
\chapoddpage {\chapfonts \vbox{\hyphenpenalty=10000\tolerance=5000
\parindent=0pt\raggedright
\rmisbold #1\hfill}}\bigskip \par\nobreak
}
\def\chfopen #1#2{\chapoddpage {\chapfonts
\vbox to 3in{\vfil \hbox to\hsize{\hfil #2} \hbox to\hsize{\hfil #1} \vfil}}%
\par\penalty 5000 %
}
\def\centerchfopen #1{%
\chapoddpage {\chapfonts \vbox{\hyphenpenalty=10000\tolerance=5000
\parindent=0pt
\hfill {\rmisbold #1}\hfill}}\bigskip \par\nobreak
}
\def\CHAPFopen{%
\global\let\chapmacro=\chfopen
\global\let\centerchapmacro=\centerchfopen}
% Section titles. These macros combine the section number parts and
% call the generic \sectionheading to do the printing.
%
\newskip\secheadingskip
\def\secheadingbreak{\dobreak \secheadingskip{-1000}}
% Subsection titles.
\newskip\subsecheadingskip
\def\subsecheadingbreak{\dobreak \subsecheadingskip{-500}}
% Subsubsection titles.
\def\subsubsecheadingskip{\subsecheadingskip}
\def\subsubsecheadingbreak{\subsecheadingbreak}
% Print any size, any type, section title.
%
% #1 is the text, #2 is the section level (sec/subsec/subsubsec), #3 is
% the section type for xrefs (Ynumbered, Ynothing, Yappendix), #4 is the
% section number.
%
\def\seckeyword{sec}
%
\def\sectionheading#1#2#3#4{%
{%
% Switch to the right set of fonts.
\csname #2fonts\endcsname \rmisbold
%
\def\sectionlevel{#2}%
\def\temptype{#3}%
%
% Insert first mark before the heading break (see notes for \domark).
\let\prevsectiondefs=\lastsectiondefs
\ifx\temptype\Ynothingkeyword
\ifx\sectionlevel\seckeyword
\gdef\lastsectiondefs{\gdef\thissectionname{#1}\gdef\thissectionnum{}%
\gdef\thissection{\thissectionname}}%
\fi
\else\ifx\temptype\Yomitfromtockeyword
% Don't redefine \thissection.
\else\ifx\temptype\Yappendixkeyword
\ifx\sectionlevel\seckeyword
\toks0={#1}%
\xdef\lastsectiondefs{%
\gdef\noexpand\thissectionname{\the\toks0}%
\gdef\noexpand\thissectionnum{#4}%
\gdef\noexpand\thissection{\putwordSection{} \noexpand\thissectionnum:
\noexpand\thissectionname}%
}%
\fi
\else
\ifx\sectionlevel\seckeyword
\toks0={#1}%
\xdef\lastsectiondefs{%
\gdef\noexpand\thissectionname{\the\toks0}%
\gdef\noexpand\thissectionnum{#4}%
\gdef\noexpand\thissection{\putwordSection{} \noexpand\thissectionnum:
\noexpand\thissectionname}%
}%
\fi
\fi\fi\fi
%
% Output the mark. Pass it through \safewhatsit, to take care of
% the preceding space.
\safewhatsit\domark
%
% Insert space above the heading.
\csname #2headingbreak\endcsname
%
% Now the second mark, after the heading break. No break points
% between here and the heading.
\let\prevsectiondefs=\lastsectiondefs
\domark
%
% Only insert the space after the number if we have a section number.
\ifx\temptype\Ynothingkeyword
\setbox0 = \hbox{}%
\def\toctype{unn}%
\gdef\lastsection{#1}%
\else\ifx\temptype\Yomitfromtockeyword
% for @headings -- no section number, don't include in toc,
% and don't redefine \lastsection.
\setbox0 = \hbox{}%
\def\toctype{omit}%
\let\sectionlevel=\empty
\else\ifx\temptype\Yappendixkeyword
\setbox0 = \hbox{#4\enspace}%
\def\toctype{app}%
\gdef\lastsection{#1}%
\else
\setbox0 = \hbox{#4\enspace}%
\def\toctype{num}%
\gdef\lastsection{#1}%
\fi\fi\fi
%
% Write the toc entry (before \donoderef). See comments in \chapmacro.
\writetocentry{\toctype\sectionlevel}{#1}{#4}%
%
% Write the node reference (= pdf destination for pdftex).
% Again, see comments in \chapmacro.
\donoderef{#3}%
%
% Interline glue will be inserted when the vbox is completed.
% That glue will be a valid breakpoint for the page, since it'll be
% preceded by a whatsit (usually from the \donoderef, or from the
% \writetocentry if there was no node). We don't want to allow that
% break, since then the whatsits could end up on page n while the
% section is on page n+1, thus toc/etc. are wrong. Debian bug 276000.
\nobreak
%
% Output the actual section heading.
\vbox{\hyphenpenalty=10000 \tolerance=5000 \parindent=0pt \raggedright
\hangindent=\wd0 % zero if no section number
\unhbox0 #1}%
}%
% Add extra space after the heading -- half of whatever came above it.
% Don't allow stretch, though.
\kern .5 \csname #2headingskip\endcsname
%
% Do not let the kern be a potential breakpoint, as it would be if it
% was followed by glue.
\nobreak
%
% We'll almost certainly start a paragraph next, so don't let that
% glue accumulate. (Not a breakpoint because it's preceded by a
% discardable item.)
\vskip-\parskip
%
% This is purely so the last item on the list is a known \penalty >
% 10000. This is so \startdefun can avoid allowing breakpoints after
% section headings. Otherwise, it would insert a valid breakpoint between:
%
% @section sec-whatever
% @deffn def-whatever
\penalty 10001
}
\message{toc,}
% Table of contents.
\newwrite\tocfile
% Write an entry to the toc file, opening it if necessary.
% Called from @chapter, etc.
%
% Example usage: \writetocentry{sec}{Section Name}{\the\chapno.\the\secno}
% We append the current node name (if any) and page number as additional
% arguments for the \{chap,sec,...}entry macros which will eventually
% read this. The node name is used in the pdf outlines as the
% destination to jump to.
%
% We open the .toc file for writing here instead of at @setfilename (or
% any other fixed time) so that @contents can be anywhere in the document.
% But if #1 is `omit', then we don't do anything. This is used for the
% table of contents chapter openings themselves.
%
\newif\iftocfileopened
\def\omitkeyword{omit}%
%
\def\writetocentry#1#2#3{%
\edef\writetoctype{#1}%
\ifx\writetoctype\omitkeyword \else
\iftocfileopened\else
\immediate\openout\tocfile = \jobname.toc
\global\tocfileopenedtrue
\fi
%
\iflinks
{\atdummies
\edef\temp{%
\write\tocfile{@#1entry{#2}{#3}{\lastnode}{\noexpand\folio}}}%
\temp
}%
\fi
\fi
%
% Tell \shipout to create a pdf destination on each page, if we're
% writing pdf. These are used in the table of contents. We can't
% just write one on every page because the title pages are numbered
% 1 and 2 (the page numbers aren't printed), and so are the first
% two pages of the document. Thus, we'd have two destinations named
% `1', and two named `2'.
\ifpdf \global\pdfmakepagedesttrue \fi
}
% These characters do not print properly in the Computer Modern roman
% fonts, so we must take special care. This is more or less redundant
% with the Texinfo input format setup at the end of this file.
%
\def\activecatcodes{%
\catcode`\"=\active
\catcode`\$=\active
\catcode`\<=\active
\catcode`\>=\active
\catcode`\\=\active
\catcode`\^=\active
\catcode`\_=\active
\catcode`\|=\active
\catcode`\~=\active
}
% Read the toc file, which is essentially Texinfo input.
\def\readtocfile{%
\setupdatafile
\activecatcodes
\input \tocreadfilename
}
\newskip\contentsrightmargin \contentsrightmargin=1in
\newcount\savepageno
\newcount\lastnegativepageno \lastnegativepageno = -1
% Prepare to read what we've written to \tocfile.
%
\def\startcontents#1{%
% If @setchapternewpage on, and @headings double, the contents should
% start on an odd page, unlike chapters. Thus, we maintain
% \contentsalignmacro in parallel with \pagealignmacro.
% From: Torbjorn Granlund
\contentsalignmacro
\immediate\closeout\tocfile
%
% Don't need to put `Contents' or `Short Contents' in the headline.
% It is abundantly clear what they are.
\chapmacro{#1}{Yomitfromtoc}{}%
%
\savepageno = \pageno
\begingroup % Set up to handle contents files properly.
\raggedbottom % Worry more about breakpoints than the bottom.
\advance\hsize by -\contentsrightmargin % Don't use the full line length.
%
% Roman numerals for page numbers.
\ifnum \pageno>0 \global\pageno = \lastnegativepageno \fi
}
% redefined for the two-volume lispref. We always output on
% \jobname.toc even if this is redefined.
%
\def\tocreadfilename{\jobname.toc}
% Normal (long) toc.
%
\def\contents{%
\startcontents{\putwordTOC}%
\openin 1 \tocreadfilename\space
\ifeof 1 \else
\readtocfile
\fi
\vfill \eject
\contentsalignmacro % in case @setchapternewpage odd is in effect
\ifeof 1 \else
\pdfmakeoutlines
\fi
\closein 1
\endgroup
\lastnegativepageno = \pageno
\global\pageno = \savepageno
}
% And just the chapters.
\def\summarycontents{%
\startcontents{\putwordShortTOC}%
%
\let\numchapentry = \shortchapentry
\let\appentry = \shortchapentry
\let\unnchapentry = \shortunnchapentry
% We want a true roman here for the page numbers.
\secfonts
\let\rm=\shortcontrm \let\bf=\shortcontbf
\let\sl=\shortcontsl \let\tt=\shortconttt
\rm
\hyphenpenalty = 10000
\advance\baselineskip by 1pt % Open it up a little.
\def\numsecentry##1##2##3##4{}
\let\appsecentry = \numsecentry
\let\unnsecentry = \numsecentry
\let\numsubsecentry = \numsecentry
\let\appsubsecentry = \numsecentry
\let\unnsubsecentry = \numsecentry
\let\numsubsubsecentry = \numsecentry
\let\appsubsubsecentry = \numsecentry
\let\unnsubsubsecentry = \numsecentry
\openin 1 \tocreadfilename\space
\ifeof 1 \else
\readtocfile
\fi
\closein 1
\vfill \eject
\contentsalignmacro % in case @setchapternewpage odd is in effect
\endgroup
\lastnegativepageno = \pageno
\global\pageno = \savepageno
}
\let\shortcontents = \summarycontents
% Typeset the label for a chapter or appendix for the short contents.
% The arg is, e.g., `A' for an appendix, or `3' for a chapter.
%
\def\shortchaplabel#1{%
% This space should be enough, since a single number is .5em, and the
% widest letter (M) is 1em, at least in the Computer Modern fonts.
% But use \hss just in case.
% (This space doesn't include the extra space that gets added after
% the label; that gets put in by \shortchapentry above.)
%
% We'd like to right-justify chapter numbers, but that looks strange
% with appendix letters. And right-justifying numbers and
% left-justifying letters looks strange when there is less than 10
% chapters. Have to read the whole toc once to know how many chapters
% there are before deciding ...
\hbox to 1em{#1\hss}%
}
% These macros generate individual entries in the table of contents.
% The first argument is the chapter or section name.
% The last argument is the page number.
% The arguments in between are the chapter number, section number, ...
% Chapters, in the main contents.
\def\numchapentry#1#2#3#4{\dochapentry{#2\labelspace#1}{#4}}
%
% Chapters, in the short toc.
% See comments in \dochapentry re vbox and related settings.
\def\shortchapentry#1#2#3#4{%
\tocentry{\shortchaplabel{#2}\labelspace #1}{\doshortpageno\bgroup#4\egroup}%
}
% Appendices, in the main contents.
% Need the word Appendix, and a fixed-size box.
%
\def\appendixbox#1{%
% We use M since it's probably the widest letter.
\setbox0 = \hbox{\putwordAppendix{} M}%
\hbox to \wd0{\putwordAppendix{} #1\hss}}
%
\def\appentry#1#2#3#4{\dochapentry{\appendixbox{#2}\labelspace#1}{#4}}
% Unnumbered chapters.
\def\unnchapentry#1#2#3#4{\dochapentry{#1}{#4}}
\def\shortunnchapentry#1#2#3#4{\tocentry{#1}{\doshortpageno\bgroup#4\egroup}}
% Sections.
\def\numsecentry#1#2#3#4{\dosecentry{#2\labelspace#1}{#4}}
\let\appsecentry=\numsecentry
\def\unnsecentry#1#2#3#4{\dosecentry{#1}{#4}}
% Subsections.
\def\numsubsecentry#1#2#3#4{\dosubsecentry{#2\labelspace#1}{#4}}
\let\appsubsecentry=\numsubsecentry
\def\unnsubsecentry#1#2#3#4{\dosubsecentry{#1}{#4}}
% And subsubsections.
\def\numsubsubsecentry#1#2#3#4{\dosubsubsecentry{#2\labelspace#1}{#4}}
\let\appsubsubsecentry=\numsubsubsecentry
\def\unnsubsubsecentry#1#2#3#4{\dosubsubsecentry{#1}{#4}}
% This parameter controls the indentation of the various levels.
% Same as \defaultparindent.
\newdimen\tocindent \tocindent = 15pt
% Now for the actual typesetting. In all these, #1 is the text and #2 is the
% page number.
%
% If the toc has to be broken over pages, we want it to be at chapters
% if at all possible; hence the \penalty.
\def\dochapentry#1#2{%
\penalty-300 \vskip1\baselineskip plus.33\baselineskip minus.25\baselineskip
\begingroup
\chapentryfonts
\tocentry{#1}{\dopageno\bgroup#2\egroup}%
\endgroup
\nobreak\vskip .25\baselineskip plus.1\baselineskip
}
\def\dosecentry#1#2{\begingroup
\secentryfonts \leftskip=\tocindent
\tocentry{#1}{\dopageno\bgroup#2\egroup}%
\endgroup}
\def\dosubsecentry#1#2{\begingroup
\subsecentryfonts \leftskip=2\tocindent
\tocentry{#1}{\dopageno\bgroup#2\egroup}%
\endgroup}
\def\dosubsubsecentry#1#2{\begingroup
\subsubsecentryfonts \leftskip=3\tocindent
\tocentry{#1}{\dopageno\bgroup#2\egroup}%
\endgroup}
% We use the same \entry macro as for the index entries.
\let\tocentry = \entry
% Space between chapter (or whatever) number and the title.
\def\labelspace{\hskip1em \relax}
\def\dopageno#1{{\rm #1}}
\def\doshortpageno#1{{\rm #1}}
\def\chapentryfonts{\secfonts \rm}
\def\secentryfonts{\textfonts}
\def\subsecentryfonts{\textfonts}
\def\subsubsecentryfonts{\textfonts}
\message{environments,}
% @foo ... @end foo.
% Markup style infrastructure. \defmarkupstylesetup\INITMACRO will
% define and register \INITMACRO to be called on markup style changes.
% \INITMACRO can check \currentmarkupstyle for the innermost
% style and the set of \ifmarkupSTYLE switches for all styles
% currently in effect.
\newif\ifmarkupvar
\newif\ifmarkupsamp
\newif\ifmarkupkey
%\newif\ifmarkupfile % @file == @samp.
%\newif\ifmarkupoption % @option == @samp.
\newif\ifmarkupcode
\newif\ifmarkupkbd
%\newif\ifmarkupenv % @env == @code.
%\newif\ifmarkupcommand % @command == @code.
\newif\ifmarkuptex % @tex (and part of @math, for now).
\newif\ifmarkupexample
\newif\ifmarkupverb
\newif\ifmarkupverbatim
\let\currentmarkupstyle\empty
\def\setupmarkupstyle#1{%
\csname markup#1true\endcsname
\def\currentmarkupstyle{#1}%
\markupstylesetup
}
\let\markupstylesetup\empty
\def\defmarkupstylesetup#1{%
\expandafter\def\expandafter\markupstylesetup
\expandafter{\markupstylesetup #1}%
\def#1%
}
% Markup style setup for left and right quotes.
\defmarkupstylesetup\markupsetuplq{%
\expandafter\let\expandafter \temp \csname markupsetuplq\currentmarkupstyle\endcsname
\ifx\temp\relax \markupsetuplqdefault \else \temp \fi
}
\defmarkupstylesetup\markupsetuprq{%
\expandafter\let\expandafter \temp \csname markupsetuprq\currentmarkupstyle\endcsname
\ifx\temp\relax \markupsetuprqdefault \else \temp \fi
}
{
\catcode`\'=\active
\catcode`\`=\active
\gdef\markupsetuplqdefault{\let`\lq}
\gdef\markupsetuprqdefault{\let'\rq}
\gdef\markupsetcodequoteleft{\let`\codequoteleft}
\gdef\markupsetcodequoteright{\let'\codequoteright}
\gdef\markupsetnoligaturesquoteleft{\let`\noligaturesquoteleft}
}
\let\markupsetuplqcode \markupsetcodequoteleft
\let\markupsetuprqcode \markupsetcodequoteright
\let\markupsetuplqexample \markupsetcodequoteleft
\let\markupsetuprqexample \markupsetcodequoteright
\let\markupsetuplqverb \markupsetcodequoteleft
\let\markupsetuprqverb \markupsetcodequoteright
\let\markupsetuplqverbatim \markupsetcodequoteleft
\let\markupsetuprqverbatim \markupsetcodequoteright
\let\markupsetuplqsamp \markupsetnoligaturesquoteleft
\let\markupsetuplqkbd \markupsetnoligaturesquoteleft
% Allow an option to not replace quotes with a regular directed right
% quote/apostrophe (char 0x27), but instead use the undirected quote
% from cmtt (char 0x0d). The undirected quote is ugly, so don't make it
% the default, but it works for pasting with more pdf viewers (at least
% evince), the lilypond developers report. xpdf does work with the
% regular 0x27.
%
\def\codequoteright{%
\expandafter\ifx\csname SETtxicodequoteundirected\endcsname\relax
\expandafter\ifx\csname SETcodequoteundirected\endcsname\relax
'%
\else \char'15 \fi
\else \char'15 \fi
}
%
% and a similar option for the left quote char vs. a grave accent.
% Modern fonts display ASCII 0x60 as a grave accent, so some people like
% the code environments to do likewise.
%
\def\codequoteleft{%
\expandafter\ifx\csname SETtxicodequotebacktick\endcsname\relax
\expandafter\ifx\csname SETcodequotebacktick\endcsname\relax
% [Knuth] pp. 380,381,391
% \relax disables Spanish ligatures ?` and !` of \tt font.
\relax`%
\else \char'22 \fi
\else \char'22 \fi
}
% [Knuth] pp. 380,381,391, disable Spanish ligatures ?` and !` of \tt font.
\def\noligaturesquoteleft{\relax\lq}
% @point{}, @result{}, @expansion{}, @print{}, @equiv{}.
%
% Since these characters are used in examples, they should be an even number of
% \tt widths. Each \tt character is 1en, so two makes it 1em.
%
\def\point{$\star$}
\def\arrow{\leavevmode\raise.05ex\hbox to 1em{\hfil$\rightarrow$\hfil}}
\def\result{\leavevmode\raise.05ex\hbox to 1em{\hfil$\Rightarrow$\hfil}}
\def\expansion{\leavevmode\hbox to 1em{\hfil$\mapsto$\hfil}}
\def\print{\leavevmode\lower.1ex\hbox to 1em{\hfil$\dashv$\hfil}}
\def\equiv{\leavevmode\hbox to 1em{\hfil$\ptexequiv$\hfil}}
% The @error{} command.
% Adapted from the TeXbook's \boxit.
%
\newbox\errorbox
%
{\tentt \global\dimen0 = 3em}% Width of the box.
\dimen2 = .55pt % Thickness of rules
% The text. (`r' is open on the right, `e' somewhat less so on the left.)
\setbox0 = \hbox{\kern-.75pt \reducedsf error\kern-1.5pt}
%
\setbox\errorbox=\hbox to \dimen0{\hfil
\hsize = \dimen0 \advance\hsize by -5.8pt % Space to left+right.
\advance\hsize by -2\dimen2 % Rules.
\vbox{%
\hrule height\dimen2
\hbox{\vrule width\dimen2 \kern3pt % Space to left of text.
\vtop{\kern2.4pt \box0 \kern2.4pt}% Space above/below.
\kern3pt\vrule width\dimen2}% Space to right.
\hrule height\dimen2}
\hfil}
%
\def\error{\leavevmode\lower.7ex\copy\errorbox}
% @tex ... @end tex escapes into raw Tex temporarily.
% One exception: @ is still an escape character, so that @end tex works.
% But \@ or @@ will get a plain tex @ character.
\envdef\tex{%
\setupmarkupstyle{tex}%
\catcode `\\=0 \catcode `\{=1 \catcode `\}=2
\catcode `\$=3 \catcode `\&=4 \catcode `\#=6
\catcode `\^=7 \catcode `\_=8 \catcode `\~=\active \let~=\tie
\catcode `\%=14
\catcode `\+=\other
\catcode `\"=\other
\catcode `\|=\other
\catcode `\<=\other
\catcode `\>=\other
\catcode`\`=\other
\catcode`\'=\other
\escapechar=`\\
%
\let\b=\ptexb
\let\bullet=\ptexbullet
\let\c=\ptexc
\let\,=\ptexcomma
\let\.=\ptexdot
\let\dots=\ptexdots
\let\equiv=\ptexequiv
\let\!=\ptexexclam
\let\i=\ptexi
\let\indent=\ptexindent
\let\noindent=\ptexnoindent
\let\{=\ptexlbrace
\let\+=\tabalign
\let\}=\ptexrbrace
\let\/=\ptexslash
\let\*=\ptexstar
\let\t=\ptext
\expandafter \let\csname top\endcsname=\ptextop % outer
\let\frenchspacing=\plainfrenchspacing
%
\def\endldots{\mathinner{\ldots\ldots\ldots\ldots}}%
\def\enddots{\relax\ifmmode\endldots\else$\mathsurround=0pt \endldots\,$\fi}%
\def\@{@}%
}
% There is no need to define \Etex.
% Define @lisp ... @end lisp.
% @lisp environment forms a group so it can rebind things,
% including the definition of @end lisp (which normally is erroneous).
% Amount to narrow the margins by for @lisp.
\newskip\lispnarrowing \lispnarrowing=0.4in
% This is the definition that ^^M gets inside @lisp, @example, and other
% such environments. \null is better than a space, since it doesn't
% have any width.
\def\lisppar{\null\endgraf}
% This space is always present above and below environments.
\newskip\envskipamount \envskipamount = 0pt
% Make spacing and below environment symmetrical. We use \parskip here
% to help in doing that, since in @example-like environments \parskip
% is reset to zero; thus the \afterenvbreak inserts no space -- but the
% start of the next paragraph will insert \parskip.
%
\def\aboveenvbreak{{%
% =10000 instead of <10000 because of a special case in \itemzzz and
% \sectionheading, q.v.
\ifnum \lastpenalty=10000 \else
\advance\envskipamount by \parskip
\endgraf
\ifdim\lastskip<\envskipamount
\removelastskip
% it's not a good place to break if the last penalty was \nobreak
% or better ...
\ifnum\lastpenalty<10000 \penalty-50 \fi
\vskip\envskipamount
\fi
\fi
}}
\let\afterenvbreak = \aboveenvbreak
% \nonarrowing is a flag. If "set", @lisp etc don't narrow margins; it will
% also clear it, so that its embedded environments do the narrowing again.
\let\nonarrowing=\relax
% @cartouche ... @end cartouche: draw rectangle w/rounded corners around
% environment contents.
\font\circle=lcircle10
\newdimen\circthick
\newdimen\cartouter\newdimen\cartinner
\newskip\normbskip\newskip\normpskip\newskip\normlskip
\circthick=\fontdimen8\circle
%
\def\ctl{{\circle\char'013\hskip -6pt}}% 6pt from pl file: 1/2charwidth
\def\ctr{{\hskip 6pt\circle\char'010}}
\def\cbl{{\circle\char'012\hskip -6pt}}
\def\cbr{{\hskip 6pt\circle\char'011}}
\def\carttop{\hbox to \cartouter{\hskip\lskip
\ctl\leaders\hrule height\circthick\hfil\ctr
\hskip\rskip}}
\def\cartbot{\hbox to \cartouter{\hskip\lskip
\cbl\leaders\hrule height\circthick\hfil\cbr
\hskip\rskip}}
%
\newskip\lskip\newskip\rskip
\envdef\cartouche{%
\ifhmode\par\fi % can't be in the midst of a paragraph.
\startsavinginserts
\lskip=\leftskip \rskip=\rightskip
\leftskip=0pt\rightskip=0pt % we want these *outside*.
\cartinner=\hsize \advance\cartinner by-\lskip
\advance\cartinner by-\rskip
\cartouter=\hsize
\advance\cartouter by 18.4pt % allow for 3pt kerns on either
% side, and for 6pt waste from
% each corner char, and rule thickness
\normbskip=\baselineskip \normpskip=\parskip \normlskip=\lineskip
% Flag to tell @lisp, etc., not to narrow margin.
\let\nonarrowing = t%
\vbox\bgroup
\baselineskip=0pt\parskip=0pt\lineskip=0pt
\carttop
\hbox\bgroup
\hskip\lskip
\vrule\kern3pt
\vbox\bgroup
\kern3pt
\hsize=\cartinner
\baselineskip=\normbskip
\lineskip=\normlskip
\parskip=\normpskip
\vskip -\parskip
\comment % For explanation, see the end of \def\group.
}
\def\Ecartouche{%
\ifhmode\par\fi
\kern3pt
\egroup
\kern3pt\vrule
\hskip\rskip
\egroup
\cartbot
\egroup
\checkinserts
}
% This macro is called at the beginning of all the @example variants,
% inside a group.
\def\nonfillstart{%
\aboveenvbreak
\hfuzz = 12pt % Don't be fussy
\sepspaces % Make spaces be word-separators rather than space tokens.
\let\par = \lisppar % don't ignore blank lines
\obeylines % each line of input is a line of output
\parskip = 0pt
\parindent = 0pt
\emergencystretch = 0pt % don't try to avoid overfull boxes
\ifx\nonarrowing\relax
\advance \leftskip by \lispnarrowing
\exdentamount=\lispnarrowing
\else
\let\nonarrowing = \relax
\fi
\let\exdent=\nofillexdent
}
% If you want all examples etc. small: @set dispenvsize small.
% If you want even small examples the full size: @set dispenvsize nosmall.
% This affects the following displayed environments:
% @example, @display, @format, @lisp
%
\def\smallword{small}
\def\nosmallword{nosmall}
\let\SETdispenvsize\relax
\def\setnormaldispenv{%
\ifx\SETdispenvsize\smallword
% end paragraph for sake of leading, in case document has no blank
% line. This is redundant with what happens in \aboveenvbreak, but
% we need to do it before changing the fonts, and it's inconvenient
% to change the fonts afterward.
\ifnum \lastpenalty=10000 \else \endgraf \fi
\smallexamplefonts \rm
\fi
}
\def\setsmalldispenv{%
\ifx\SETdispenvsize\nosmallword
\else
\ifnum \lastpenalty=10000 \else \endgraf \fi
\smallexamplefonts \rm
\fi
}
% We often define two environments, @foo and @smallfoo.
% Let's do it by one command:
\def\makedispenv #1#2{
\expandafter\envdef\csname#1\endcsname {\setnormaldispenv #2}
\expandafter\envdef\csname small#1\endcsname {\setsmalldispenv #2}
\expandafter\let\csname E#1\endcsname \afterenvbreak
\expandafter\let\csname Esmall#1\endcsname \afterenvbreak
}
% Define two synonyms:
\def\maketwodispenvs #1#2#3{
\makedispenv{#1}{#3}
\makedispenv{#2}{#3}
}
% @lisp: indented, narrowed, typewriter font; @example: same as @lisp.
%
% @smallexample and @smalllisp: use smaller fonts.
% Originally contributed by Pavel@xerox.
%
\maketwodispenvs {lisp}{example}{%
\nonfillstart
\tt\setupmarkupstyle{example}%
\let\kbdfont = \kbdexamplefont % Allow @kbd to do something special.
\gobble % eat return
}
% @display/@smalldisplay: same as @lisp except keep current font.
%
\makedispenv {display}{%
\nonfillstart
\gobble
}
% @format/@smallformat: same as @display except don't narrow margins.
%
\makedispenv{format}{%
\let\nonarrowing = t%
\nonfillstart
\gobble
}
% @flushleft: same as @format, but doesn't obey \SETdispenvsize.
\envdef\flushleft{%
\let\nonarrowing = t%
\nonfillstart
\gobble
}
\let\Eflushleft = \afterenvbreak
% @flushright.
%
\envdef\flushright{%
\let\nonarrowing = t%
\nonfillstart
\advance\leftskip by 0pt plus 1fill
\gobble
}
\let\Eflushright = \afterenvbreak
% @quotation does normal linebreaking (hence we can't use \nonfillstart)
% and narrows the margins. We keep \parskip nonzero in general, since
% we're doing normal filling. So, when using \aboveenvbreak and
% \afterenvbreak, temporarily make \parskip 0.
%
\def\quotationstart{%
{\parskip=0pt \aboveenvbreak}% because \aboveenvbreak inserts \parskip
\parindent=0pt
%
% @cartouche defines \nonarrowing to inhibit narrowing at next level down.
\ifx\nonarrowing\relax
\advance\leftskip by \lispnarrowing
\advance\rightskip by \lispnarrowing
\exdentamount = \lispnarrowing
\else
\let\nonarrowing = \relax
\fi
\parsearg\quotationlabel
}
\envdef\quotation{%
\setnormaldispenv
\quotationstart
}
\envdef\smallquotation{%
\setsmalldispenv
\quotationstart
}
\let\Esmallquotation = \Equotation
% We have retained a nonzero parskip for the environment, since we're
% doing normal filling.
%
\def\Equotation{%
\par
\ifx\quotationauthor\undefined\else
% indent a bit.
\leftline{\kern 2\leftskip \sl ---\quotationauthor}%
\fi
{\parskip=0pt \afterenvbreak}%
}
% If we're given an argument, typeset it in bold with a colon after.
\def\quotationlabel#1{%
\def\temp{#1}%
\ifx\temp\empty \else
{\bf #1: }%
\fi
}
% LaTeX-like @verbatim...@end verbatim and @verb{...}
% If we want to allow any as delimiter,
% we need the curly braces so that makeinfo sees the @verb command, eg:
% `@verbx...x' would look like the '@verbx' command. --janneke@gnu.org
%
% [Knuth]: Donald Ervin Knuth, 1996. The TeXbook.
%
% [Knuth] p.344; only we need to do the other characters Texinfo sets
% active too. Otherwise, they get lost as the first character on a
% verbatim line.
\def\dospecials{%
\do\ \do\\\do\{\do\}\do\$\do\&%
\do\#\do\^\do\^^K\do\_\do\^^A\do\%\do\~%
\do\<\do\>\do\|\do\@\do+\do\"%
% Don't do the quotes -- if we do, @set txicodequoteundirected and
% @set txicodequotebacktick will not have effect on @verb and
% @verbatim, and ?` and !` ligatures won't get disabled.
%\do\`\do\'%
}
%
% [Knuth] p. 380
\def\uncatcodespecials{%
\def\do##1{\catcode`##1=\other}\dospecials}
%
% Setup for the @verb command.
%
% Eight spaces for a tab
\begingroup
\catcode`\^^I=\active
\gdef\tabeightspaces{\catcode`\^^I=\active\def^^I{\ \ \ \ \ \ \ \ }}
\endgroup
%
\def\setupverb{%
\tt % easiest (and conventionally used) font for verbatim
\def\par{\leavevmode\endgraf}%
\setupmarkupstyle{verb}%
\tabeightspaces
% Respect line breaks,
% print special symbols as themselves, and
% make each space count
% must do in this order:
\obeylines \uncatcodespecials \sepspaces
}
% Setup for the @verbatim environment
%
% Real tab expansion
\newdimen\tabw \setbox0=\hbox{\tt\space} \tabw=8\wd0 % tab amount
%
\def\starttabbox{\setbox0=\hbox\bgroup}
%
\begingroup
\catcode`\^^I=\active
\gdef\tabexpand{%
\catcode`\^^I=\active
\def^^I{\leavevmode\egroup
\dimen0=\wd0 % the width so far, or since the previous tab
\divide\dimen0 by\tabw
\multiply\dimen0 by\tabw % compute previous multiple of \tabw
\advance\dimen0 by\tabw % advance to next multiple of \tabw
\wd0=\dimen0 \box0 \starttabbox
}%
}
\endgroup
% start the verbatim environment.
\def\setupverbatim{%
\let\nonarrowing = t%
\nonfillstart
% Easiest (and conventionally used) font for verbatim
\tt
\def\par{\leavevmode\egroup\box0\endgraf}%
\tabexpand
\setupmarkupstyle{verbatim}%
% Respect line breaks,
% print special symbols as themselves, and
% make each space count
% must do in this order:
\obeylines \uncatcodespecials \sepspaces
\everypar{\starttabbox}%
}
% Do the @verb magic: verbatim text is quoted by unique
% delimiter characters. Before first delimiter expect a
% right brace, after last delimiter expect closing brace:
%
% \def\doverb'{'#1'}'{#1}
%
% [Knuth] p. 382; only eat outer {}
\begingroup
\catcode`[=1\catcode`]=2\catcode`\{=\other\catcode`\}=\other
\gdef\doverb{#1[\def\next##1#1}[##1\endgroup]\next]
\endgroup
%
\def\verb{\begingroup\setupverb\doverb}
%
%
% Do the @verbatim magic: define the macro \doverbatim so that
% the (first) argument ends when '@end verbatim' is reached, ie:
%
% \def\doverbatim#1@end verbatim{#1}
%
% For Texinfo it's a lot easier than for LaTeX,
% because texinfo's \verbatim doesn't stop at '\end{verbatim}':
% we need not redefine '\', '{' and '}'.
%
% Inspired by LaTeX's verbatim command set [latex.ltx]
%
\begingroup
\catcode`\ =\active
\obeylines %
% ignore everything up to the first ^^M, that's the newline at the end
% of the @verbatim input line itself. Otherwise we get an extra blank
% line in the output.
\xdef\doverbatim#1^^M#2@end verbatim{#2\noexpand\end\gobble verbatim}%
% We really want {...\end verbatim} in the body of the macro, but
% without the active space; thus we have to use \xdef and \gobble.
\endgroup
%
\envdef\verbatim{%
\setupverbatim\doverbatim
}
\let\Everbatim = \afterenvbreak
% @verbatiminclude FILE - insert text of file in verbatim environment.
%
\def\verbatiminclude{\parseargusing\filenamecatcodes\doverbatiminclude}
%
\def\doverbatiminclude#1{%
{%
\makevalueexpandable
\setupverbatim
\indexnofonts % Allow `@@' and other weird things in file names.
\input #1
\afterenvbreak
}%
}
% @copying ... @end copying.
% Save the text away for @insertcopying later.
%
% We save the uninterpreted tokens, rather than creating a box.
% Saving the text in a box would be much easier, but then all the
% typesetting commands (@smallbook, font changes, etc.) have to be done
% beforehand -- and a) we want @copying to be done first in the source
% file; b) letting users define the frontmatter in as flexible order as
% possible is very desirable.
%
\def\copying{\checkenv{}\begingroup\scanargctxt\docopying}
\def\docopying#1@end copying{\endgroup\def\copyingtext{#1}}
%
\def\insertcopying{%
\begingroup
\parindent = 0pt % paragraph indentation looks wrong on title page
\scanexp\copyingtext
\endgroup
}
\message{defuns,}
% @defun etc.
\newskip\defbodyindent \defbodyindent=.4in
\newskip\defargsindent \defargsindent=50pt
\newskip\deflastargmargin \deflastargmargin=18pt
\newcount\defunpenalty
% Start the processing of @deffn:
\def\startdefun{%
\ifnum\lastpenalty<10000
\medbreak
\defunpenalty=10003 % Will keep this @deffn together with the
% following @def command, see below.
\else
% If there are two @def commands in a row, we'll have a \nobreak,
% which is there to keep the function description together with its
% header. But if there's nothing but headers, we need to allow a
% break somewhere. Check specifically for penalty 10002, inserted
% by \printdefunline, instead of 10000, since the sectioning
% commands also insert a nobreak penalty, and we don't want to allow
% a break between a section heading and a defun.
%
% As a minor refinement, we avoid "club" headers by signalling
% with penalty of 10003 after the very first @deffn in the
% sequence (see above), and penalty of 10002 after any following
% @def command.
\ifnum\lastpenalty=10002 \penalty2000 \else \defunpenalty=10002 \fi
%
% Similarly, after a section heading, do not allow a break.
% But do insert the glue.
\medskip % preceded by discardable penalty, so not a breakpoint
\fi
%
\parindent=0in
\advance\leftskip by \defbodyindent
\exdentamount=\defbodyindent
}
\def\dodefunx#1{%
% First, check whether we are in the right environment:
\checkenv#1%
%
% As above, allow line break if we have multiple x headers in a row.
% It's not a great place, though.
\ifnum\lastpenalty=10002 \penalty3000 \else \defunpenalty=10002 \fi
%
% And now, it's time to reuse the body of the original defun:
\expandafter\gobbledefun#1%
}
\def\gobbledefun#1\startdefun{}
% \printdefunline \deffnheader{text}
%
\def\printdefunline#1#2{%
\begingroup
% call \deffnheader:
#1#2 \endheader
% common ending:
\interlinepenalty = 10000
\advance\rightskip by 0pt plus 1fil
\endgraf
\nobreak\vskip -\parskip
\penalty\defunpenalty % signal to \startdefun and \dodefunx
% Some of the @defun-type tags do not enable magic parentheses,
% rendering the following check redundant. But we don't optimize.
\checkparencounts
\endgroup
}
\def\Edefun{\endgraf\medbreak}
% \makedefun{deffn} creates \deffn, \deffnx and \Edeffn;
% the only thing remaining is to define \deffnheader.
%
\def\makedefun#1{%
\expandafter\let\csname E#1\endcsname = \Edefun
\edef\temp{\noexpand\domakedefun
\makecsname{#1}\makecsname{#1x}\makecsname{#1header}}%
\temp
}
% \domakedefun \deffn \deffnx \deffnheader
%
% Define \deffn and \deffnx, without parameters.
% \deffnheader has to be defined explicitly.
%
\def\domakedefun#1#2#3{%
\envdef#1{%
\startdefun
\parseargusing\activeparens{\printdefunline#3}%
}%
\def#2{\dodefunx#1}%
\def#3%
}
%%% Untyped functions:
% @deffn category name args
\makedefun{deffn}{\deffngeneral{}}
% @deffn category class name args
\makedefun{defop}#1 {\defopon{#1\ \putwordon}}
% \defopon {category on}class name args
\def\defopon#1#2 {\deffngeneral{\putwordon\ \code{#2}}{#1\ \code{#2}} }
% \deffngeneral {subind}category name args
%
\def\deffngeneral#1#2 #3 #4\endheader{%
% Remember that \dosubind{fn}{foo}{} is equivalent to \doind{fn}{foo}.
\dosubind{fn}{\code{#3}}{#1}%
\defname{#2}{}{#3}\magicamp\defunargs{#4\unskip}%
}
%%% Typed functions:
% @deftypefn category type name args
\makedefun{deftypefn}{\deftypefngeneral{}}
% @deftypeop category class type name args
\makedefun{deftypeop}#1 {\deftypeopon{#1\ \putwordon}}
% \deftypeopon {category on}class type name args
\def\deftypeopon#1#2 {\deftypefngeneral{\putwordon\ \code{#2}}{#1\ \code{#2}} }
% \deftypefngeneral {subind}category type name args
%
\def\deftypefngeneral#1#2 #3 #4 #5\endheader{%
\dosubind{fn}{\code{#4}}{#1}%
\defname{#2}{#3}{#4}\defunargs{#5\unskip}%
}
%%% Typed variables:
% @deftypevr category type var args
\makedefun{deftypevr}{\deftypecvgeneral{}}
% @deftypecv category class type var args
\makedefun{deftypecv}#1 {\deftypecvof{#1\ \putwordof}}
% \deftypecvof {category of}class type var args
\def\deftypecvof#1#2 {\deftypecvgeneral{\putwordof\ \code{#2}}{#1\ \code{#2}} }
% \deftypecvgeneral {subind}category type var args
%
\def\deftypecvgeneral#1#2 #3 #4 #5\endheader{%
\dosubind{vr}{\code{#4}}{#1}%
\defname{#2}{#3}{#4}\defunargs{#5\unskip}%
}
%%% Untyped variables:
% @defvr category var args
\makedefun{defvr}#1 {\deftypevrheader{#1} {} }
% @defcv category class var args
\makedefun{defcv}#1 {\defcvof{#1\ \putwordof}}
% \defcvof {category of}class var args
\def\defcvof#1#2 {\deftypecvof{#1}#2 {} }
%%% Type:
% @deftp category name args
\makedefun{deftp}#1 #2 #3\endheader{%
\doind{tp}{\code{#2}}%
\defname{#1}{}{#2}\defunargs{#3\unskip}%
}
% Remaining @defun-like shortcuts:
\makedefun{defun}{\deffnheader{\putwordDeffunc} }
\makedefun{defmac}{\deffnheader{\putwordDefmac} }
\makedefun{defspec}{\deffnheader{\putwordDefspec} }
\makedefun{deftypefun}{\deftypefnheader{\putwordDeffunc} }
\makedefun{defvar}{\defvrheader{\putwordDefvar} }
\makedefun{defopt}{\defvrheader{\putwordDefopt} }
\makedefun{deftypevar}{\deftypevrheader{\putwordDefvar} }
\makedefun{defmethod}{\defopon\putwordMethodon}
\makedefun{deftypemethod}{\deftypeopon\putwordMethodon}
\makedefun{defivar}{\defcvof\putwordInstanceVariableof}
\makedefun{deftypeivar}{\deftypecvof\putwordInstanceVariableof}
% \defname, which formats the name of the @def (not the args).
% #1 is the category, such as "Function".
% #2 is the return type, if any.
% #3 is the function name.
%
% We are followed by (but not passed) the arguments, if any.
%
\def\defname#1#2#3{%
% Get the values of \leftskip and \rightskip as they were outside the @def...
\advance\leftskip by -\defbodyindent
%
% How we'll format the type name. Putting it in brackets helps
% distinguish it from the body text that may end up on the next line
% just below it.
\def\temp{#1}%
\setbox0=\hbox{\kern\deflastargmargin \ifx\temp\empty\else [\rm\temp]\fi}
%
% Figure out line sizes for the paragraph shape.
% The first line needs space for \box0; but if \rightskip is nonzero,
% we need only space for the part of \box0 which exceeds it:
\dimen0=\hsize \advance\dimen0 by -\wd0 \advance\dimen0 by \rightskip
% The continuations:
\dimen2=\hsize \advance\dimen2 by -\defargsindent
% (plain.tex says that \dimen1 should be used only as global.)
\parshape 2 0in \dimen0 \defargsindent \dimen2
%
% Put the type name to the right margin.
\noindent
\hbox to 0pt{%
\hfil\box0 \kern-\hsize
% \hsize has to be shortened this way:
\kern\leftskip
% Intentionally do not respect \rightskip, since we need the space.
}%
%
% Allow all lines to be underfull without complaint:
\tolerance=10000 \hbadness=10000
\exdentamount=\defbodyindent
{%
% defun fonts. We use typewriter by default (used to be bold) because:
% . we're printing identifiers, they should be in tt in principle.
% . in languages with many accents, such as Czech or French, it's
% common to leave accents off identifiers. The result looks ok in
% tt, but exceedingly strange in rm.
% . we don't want -- and --- to be treated as ligatures.
% . this still does not fix the ?` and !` ligatures, but so far no
% one has made identifiers using them :).
\df \tt
\def\temp{#2}% return value type
\ifx\temp\empty\else \tclose{\temp} \fi
#3% output function name
}%
{\rm\enskip}% hskip 0.5 em of \tenrm
%
\boldbrax
% arguments will be output next, if any.
}
% Print arguments in slanted roman (not ttsl), inconsistently with using
% tt for the name. This is because literal text is sometimes needed in
% the argument list (groff manual), and ttsl and tt are not very
% distinguishable. Prevent hyphenation at `-' chars.
%
\def\defunargs#1{%
% use sl by default (not ttsl),
% tt for the names.
\df \sl \hyphenchar\font=0
%
% On the other hand, if an argument has two dashes (for instance), we
% want a way to get ttsl. Let's try @var for that.
\def\var##1{{\setupmarkupstyle{var}\ttslanted{##1}}}%
#1%
\sl\hyphenchar\font=45
}
% We want ()&[] to print specially on the defun line.
%
\def\activeparens{%
\catcode`\(=\active \catcode`\)=\active
\catcode`\[=\active \catcode`\]=\active
\catcode`\&=\active
}
% Make control sequences which act like normal parenthesis chars.
\let\lparen = ( \let\rparen = )
% Be sure that we always have a definition for `(', etc. For example,
% if the fn name has parens in it, \boldbrax will not be in effect yet,
% so TeX would otherwise complain about undefined control sequence.
{
\activeparens
\global\let(=\lparen \global\let)=\rparen
\global\let[=\lbrack \global\let]=\rbrack
\global\let& = \&
\gdef\boldbrax{\let(=\opnr\let)=\clnr\let[=\lbrb\let]=\rbrb}
\gdef\magicamp{\let&=\amprm}
}
\newcount\parencount
% If we encounter &foo, then turn on ()-hacking afterwards
\newif\ifampseen
\def\amprm#1 {\ampseentrue{\bf\ }}
\def\parenfont{%
\ifampseen
% At the first level, print parens in roman,
% otherwise use the default font.
\ifnum \parencount=1 \rm \fi
\else
% The \sf parens (in \boldbrax) actually are a little bolder than
% the contained text. This is especially needed for [ and ] .
\sf
\fi
}
\def\infirstlevel#1{%
\ifampseen
\ifnum\parencount=1
#1%
\fi
\fi
}
\def\bfafterword#1 {#1 \bf}
\def\opnr{%
\global\advance\parencount by 1
{\parenfont(}%
\infirstlevel \bfafterword
}
\def\clnr{%
{\parenfont)}%
\infirstlevel \sl
\global\advance\parencount by -1
}
\newcount\brackcount
\def\lbrb{%
\global\advance\brackcount by 1
{\bf[}%
}
\def\rbrb{%
{\bf]}%
\global\advance\brackcount by -1
}
\def\checkparencounts{%
\ifnum\parencount=0 \else \badparencount \fi
\ifnum\brackcount=0 \else \badbrackcount \fi
}
% these should not use \errmessage; the glibc manual, at least, actually
% has such constructs (when documenting function pointers).
\def\badparencount{%
\message{Warning: unbalanced parentheses in @def...}%
\global\parencount=0
}
\def\badbrackcount{%
\message{Warning: unbalanced square brackets in @def...}%
\global\brackcount=0
}
\message{macros,}
% @macro.
% To do this right we need a feature of e-TeX, \scantokens,
% which we arrange to emulate with a temporary file in ordinary TeX.
\ifx\eTeXversion\undefined
\newwrite\macscribble
\def\scantokens#1{%
\toks0={#1}%
\immediate\openout\macscribble=\jobname.tmp
\immediate\write\macscribble{\the\toks0}%
\immediate\closeout\macscribble
\input \jobname.tmp
}
\fi
\def\scanmacro#1{%
\begingroup
\newlinechar`\^^M
\let\xeatspaces\eatspaces
% Undo catcode changes of \startcontents and \doprintindex
% When called from @insertcopying or (short)caption, we need active
% backslash to get it printed correctly. Previously, we had
% \catcode`\\=\other instead. We'll see whether a problem appears
% with macro expansion. --kasal, 19aug04
\catcode`\@=0 \catcode`\\=\active \escapechar=`\@
% ... and \example
\spaceisspace
%
% Append \endinput to make sure that TeX does not see the ending newline.
% I've verified that it is necessary both for e-TeX and for ordinary TeX
% --kasal, 29nov03
\scantokens{#1\endinput}%
\endgroup
}
\def\scanexp#1{%
\edef\temp{\noexpand\scanmacro{#1}}%
\temp
}
\newcount\paramno % Count of parameters
\newtoks\macname % Macro name
\newif\ifrecursive % Is it recursive?
% List of all defined macros in the form
% \definedummyword\macro1\definedummyword\macro2...
% Currently is also contains all @aliases; the list can be split
% if there is a need.
\def\macrolist{}
% Add the macro to \macrolist
\def\addtomacrolist#1{\expandafter \addtomacrolistxxx \csname#1\endcsname}
\def\addtomacrolistxxx#1{%
\toks0 = \expandafter{\macrolist\definedummyword#1}%
\xdef\macrolist{\the\toks0}%
}
% Utility routines.
% This does \let #1 = #2, with \csnames; that is,
% \let \csname#1\endcsname = \csname#2\endcsname
% (except of course we have to play expansion games).
%
\def\cslet#1#2{%
\expandafter\let
\csname#1\expandafter\endcsname
\csname#2\endcsname
}
% Trim leading and trailing spaces off a string.
% Concepts from aro-bend problem 15 (see CTAN).
{\catcode`\@=11
\gdef\eatspaces #1{\expandafter\trim@\expandafter{#1 }}
\gdef\trim@ #1{\trim@@ @#1 @ #1 @ @@}
\gdef\trim@@ #1@ #2@ #3@@{\trim@@@\empty #2 @}
\def\unbrace#1{#1}
\unbrace{\gdef\trim@@@ #1 } #2@{#1}
}
% Trim a single trailing ^^M off a string.
{\catcode`\^^M=\other \catcode`\Q=3%
\gdef\eatcr #1{\eatcra #1Q^^MQ}%
\gdef\eatcra#1^^MQ{\eatcrb#1Q}%
\gdef\eatcrb#1Q#2Q{#1}%
}
% Macro bodies are absorbed as an argument in a context where
% all characters are catcode 10, 11 or 12, except \ which is active
% (as in normal texinfo). It is necessary to change the definition of \.
% Non-ASCII encodings make 8-bit characters active, so un-activate
% them to avoid their expansion. Must do this non-globally, to
% confine the change to the current group.
% It's necessary to have hard CRs when the macro is executed. This is
% done by making ^^M (\endlinechar) catcode 12 when reading the macro
% body, and then making it the \newlinechar in \scanmacro.
\def\scanctxt{%
\catcode`\"=\other
\catcode`\+=\other
\catcode`\<=\other
\catcode`\>=\other
\catcode`\@=\other
\catcode`\^=\other
\catcode`\_=\other
\catcode`\|=\other
\catcode`\~=\other
\ifx\declaredencoding\ascii \else \setnonasciicharscatcodenonglobal\other \fi
}
\def\scanargctxt{%
\scanctxt
\catcode`\\=\other
\catcode`\^^M=\other
}
\def\macrobodyctxt{%
\scanctxt
\catcode`\{=\other
\catcode`\}=\other
\catcode`\^^M=\other
\usembodybackslash
}
\def\macroargctxt{%
\scanctxt
\catcode`\\=\other
}
% \mbodybackslash is the definition of \ in @macro bodies.
% It maps \foo\ => \csname macarg.foo\endcsname => #N
% where N is the macro parameter number.
% We define \csname macarg.\endcsname to be \realbackslash, so
% \\ in macro replacement text gets you a backslash.
{\catcode`@=0 @catcode`@\=@active
@gdef@usembodybackslash{@let\=@mbodybackslash}
@gdef@mbodybackslash#1\{@csname macarg.#1@endcsname}
}
\expandafter\def\csname macarg.\endcsname{\realbackslash}
\def\macro{\recursivefalse\parsearg\macroxxx}
\def\rmacro{\recursivetrue\parsearg\macroxxx}
\def\macroxxx#1{%
\getargs{#1}% now \macname is the macname and \argl the arglist
\ifx\argl\empty % no arguments
\paramno=0%
\else
\expandafter\parsemargdef \argl;%
\fi
\if1\csname ismacro.\the\macname\endcsname
\message{Warning: redefining \the\macname}%
\else
\expandafter\ifx\csname \the\macname\endcsname \relax
\else \errmessage{Macro name \the\macname\space already defined}\fi
\global\cslet{macsave.\the\macname}{\the\macname}%
\global\expandafter\let\csname ismacro.\the\macname\endcsname=1%
\addtomacrolist{\the\macname}%
\fi
\begingroup \macrobodyctxt
\ifrecursive \expandafter\parsermacbody
\else \expandafter\parsemacbody
\fi}
\parseargdef\unmacro{%
\if1\csname ismacro.#1\endcsname
\global\cslet{#1}{macsave.#1}%
\global\expandafter\let \csname ismacro.#1\endcsname=0%
% Remove the macro name from \macrolist:
\begingroup
\expandafter\let\csname#1\endcsname \relax
\let\definedummyword\unmacrodo
\xdef\macrolist{\macrolist}%
\endgroup
\else
\errmessage{Macro #1 not defined}%
\fi
}
% Called by \do from \dounmacro on each macro. The idea is to omit any
% macro definitions that have been changed to \relax.
%
\def\unmacrodo#1{%
\ifx #1\relax
% remove this
\else
\noexpand\definedummyword \noexpand#1%
\fi
}
% This makes use of the obscure feature that if the last token of a
% is #, then the preceding argument is delimited by
% an opening brace, and that opening brace is not consumed.
\def\getargs#1{\getargsxxx#1{}}
\def\getargsxxx#1#{\getmacname #1 \relax\getmacargs}
\def\getmacname #1 #2\relax{\macname={#1}}
\def\getmacargs#1{\def\argl{#1}}
% Parse the optional {params} list. Set up \paramno and \paramlist
% so \defmacro knows what to do. Define \macarg.blah for each blah
% in the params list, to be ##N where N is the position in that list.
% That gets used by \mbodybackslash (above).
% We need to get `macro parameter char #' into several definitions.
% The technique used is stolen from LaTeX: let \hash be something
% unexpandable, insert that wherever you need a #, and then redefine
% it to # just before using the token list produced.
%
% The same technique is used to protect \eatspaces till just before
% the macro is used.
\def\parsemargdef#1;{\paramno=0\def\paramlist{}%
\let\hash\relax\let\xeatspaces\relax\parsemargdefxxx#1,;,}
\def\parsemargdefxxx#1,{%
\if#1;\let\next=\relax
\else \let\next=\parsemargdefxxx
\advance\paramno by 1%
\expandafter\edef\csname macarg.\eatspaces{#1}\endcsname
{\xeatspaces{\hash\the\paramno}}%
\edef\paramlist{\paramlist\hash\the\paramno,}%
\fi\next}
% These two commands read recursive and nonrecursive macro bodies.
% (They're different since rec and nonrec macros end differently.)
\long\def\parsemacbody#1@end macro%
{\xdef\temp{\eatcr{#1}}\endgroup\defmacro}%
\long\def\parsermacbody#1@end rmacro%
{\xdef\temp{\eatcr{#1}}\endgroup\defmacro}%
% This defines the macro itself. There are six cases: recursive and
% nonrecursive macros of zero, one, and many arguments.
% Much magic with \expandafter here.
% \xdef is used so that macro definitions will survive the file
% they're defined in; @include reads the file inside a group.
\def\defmacro{%
\let\hash=##% convert placeholders to macro parameter chars
\ifrecursive
\ifcase\paramno
% 0
\expandafter\xdef\csname\the\macname\endcsname{%
\noexpand\scanmacro{\temp}}%
\or % 1
\expandafter\xdef\csname\the\macname\endcsname{%
\bgroup\noexpand\macroargctxt
\noexpand\braceorline
\expandafter\noexpand\csname\the\macname xxx\endcsname}%
\expandafter\xdef\csname\the\macname xxx\endcsname##1{%
\egroup\noexpand\scanmacro{\temp}}%
\else % many
\expandafter\xdef\csname\the\macname\endcsname{%
\bgroup\noexpand\macroargctxt
\noexpand\csname\the\macname xx\endcsname}%
\expandafter\xdef\csname\the\macname xx\endcsname##1{%
\expandafter\noexpand\csname\the\macname xxx\endcsname ##1,}%
\expandafter\expandafter
\expandafter\xdef
\expandafter\expandafter
\csname\the\macname xxx\endcsname
\paramlist{\egroup\noexpand\scanmacro{\temp}}%
\fi
\else
\ifcase\paramno
% 0
\expandafter\xdef\csname\the\macname\endcsname{%
\noexpand\norecurse{\the\macname}%
\noexpand\scanmacro{\temp}\egroup}%
\or % 1
\expandafter\xdef\csname\the\macname\endcsname{%
\bgroup\noexpand\macroargctxt
\noexpand\braceorline
\expandafter\noexpand\csname\the\macname xxx\endcsname}%
\expandafter\xdef\csname\the\macname xxx\endcsname##1{%
\egroup
\noexpand\norecurse{\the\macname}%
\noexpand\scanmacro{\temp}\egroup}%
\else % many
\expandafter\xdef\csname\the\macname\endcsname{%
\bgroup\noexpand\macroargctxt
\expandafter\noexpand\csname\the\macname xx\endcsname}%
\expandafter\xdef\csname\the\macname xx\endcsname##1{%
\expandafter\noexpand\csname\the\macname xxx\endcsname ##1,}%
\expandafter\expandafter
\expandafter\xdef
\expandafter\expandafter
\csname\the\macname xxx\endcsname
\paramlist{%
\egroup
\noexpand\norecurse{\the\macname}%
\noexpand\scanmacro{\temp}\egroup}%
\fi
\fi}
\def\norecurse#1{\bgroup\cslet{#1}{macsave.#1}}
% \braceorline decides whether the next nonwhitespace character is a
% {. If so it reads up to the closing }, if not, it reads the whole
% line. Whatever was read is then fed to the next control sequence
% as an argument (by \parsebrace or \parsearg)
\def\braceorline#1{\let\macnamexxx=#1\futurelet\nchar\braceorlinexxx}
\def\braceorlinexxx{%
\ifx\nchar\bgroup\else
\expandafter\parsearg
\fi \macnamexxx}
% @alias.
% We need some trickery to remove the optional spaces around the equal
% sign. Just make them active and then expand them all to nothing.
\def\alias{\parseargusing\obeyspaces\aliasxxx}
\def\aliasxxx #1{\aliasyyy#1\relax}
\def\aliasyyy #1=#2\relax{%
{%
\expandafter\let\obeyedspace=\empty
\addtomacrolist{#1}%
\xdef\next{\global\let\makecsname{#1}=\makecsname{#2}}%
}%
\next
}
\message{cross references,}
\newwrite\auxfile
\newif\ifhavexrefs % True if xref values are known.
\newif\ifwarnedxrefs % True if we warned once that they aren't known.
% @inforef is relatively simple.
\def\inforef #1{\inforefzzz #1,,,,**}
\def\inforefzzz #1,#2,#3,#4**{\putwordSee{} \putwordInfo{} \putwordfile{} \file{\ignorespaces #3{}},
node \samp{\ignorespaces#1{}}}
% @node's only job in TeX is to define \lastnode, which is used in
% cross-references. The @node line might or might not have commas, and
% might or might not have spaces before the first comma, like:
% @node foo , bar , ...
% We don't want such trailing spaces in the node name.
%
\parseargdef\node{\checkenv{}\donode #1 ,\finishnodeparse}
%
% also remove a trailing comma, in case of something like this:
% @node Help-Cross, , , Cross-refs
\def\donode#1 ,#2\finishnodeparse{\dodonode #1,\finishnodeparse}
\def\dodonode#1,#2\finishnodeparse{\gdef\lastnode{#1}}
\let\nwnode=\node
\let\lastnode=\empty
% Write a cross-reference definition for the current node. #1 is the
% type (Ynumbered, Yappendix, Ynothing).
%
\def\donoderef#1{%
\ifx\lastnode\empty\else
\setref{\lastnode}{#1}%
\global\let\lastnode=\empty
\fi
}
% @anchor{NAME} -- define xref target at arbitrary point.
%
\newcount\savesfregister
%
\def\savesf{\relax \ifhmode \savesfregister=\spacefactor \fi}
\def\restoresf{\relax \ifhmode \spacefactor=\savesfregister \fi}
\def\anchor#1{\savesf \setref{#1}{Ynothing}\restoresf \ignorespaces}
% \setref{NAME}{SNT} defines a cross-reference point NAME (a node or an
% anchor), which consists of three parts:
% 1) NAME-title - the current sectioning name taken from \lastsection,
% or the anchor name.
% 2) NAME-snt - section number and type, passed as the SNT arg, or
% empty for anchors.
% 3) NAME-pg - the page number.
%
% This is called from \donoderef, \anchor, and \dofloat. In the case of
% floats, there is an additional part, which is not written here:
% 4) NAME-lof - the text as it should appear in a @listoffloats.
%
\def\setref#1#2{%
\pdfmkdest{#1}%
\iflinks
{%
\atdummies % preserve commands, but don't expand them
\edef\writexrdef##1##2{%
\write\auxfile{@xrdef{#1-% #1 of \setref, expanded by the \edef
##1}{##2}}% these are parameters of \writexrdef
}%
\toks0 = \expandafter{\lastsection}%
\immediate \writexrdef{title}{\the\toks0 }%
\immediate \writexrdef{snt}{\csname #2\endcsname}% \Ynumbered etc.
\safewhatsit{\writexrdef{pg}{\folio}}% will be written later, during \shipout
}%
\fi
}
% @xref, @pxref, and @ref generate cross-references. For \xrefX, #1 is
% the node name, #2 the name of the Info cross-reference, #3 the printed
% node name, #4 the name of the Info file, #5 the name of the printed
% manual. All but the node name can be omitted.
%
\def\pxref#1{\putwordsee{} \xrefX[#1,,,,,,,]}
\def\xref#1{\putwordSee{} \xrefX[#1,,,,,,,]}
\def\ref#1{\xrefX[#1,,,,,,,]}
\def\xrefX[#1,#2,#3,#4,#5,#6]{\begingroup
\unsepspaces
\def\printedmanual{\ignorespaces #5}%
\def\printedrefname{\ignorespaces #3}%
\setbox1=\hbox{\printedmanual\unskip}%
\setbox0=\hbox{\printedrefname\unskip}%
\ifdim \wd0 = 0pt
% No printed node name was explicitly given.
\expandafter\ifx\csname SETxref-automatic-section-title\endcsname\relax
% Use the node name inside the square brackets.
\def\printedrefname{\ignorespaces #1}%
\else
% Use the actual chapter/section title appear inside
% the square brackets. Use the real section title if we have it.
\ifdim \wd1 > 0pt
% It is in another manual, so we don't have it.
\def\printedrefname{\ignorespaces #1}%
\else
\ifhavexrefs
% We know the real title if we have the xref values.
\def\printedrefname{\refx{#1-title}{}}%
\else
% Otherwise just copy the Info node name.
\def\printedrefname{\ignorespaces #1}%
\fi%
\fi
\fi
\fi
%
% Make link in pdf output.
\ifpdf
{\indexnofonts
\turnoffactive
% This expands tokens, so do it after making catcode changes, so _
% etc. don't get their TeX definitions.
\getfilename{#4}%
%
% See comments at \activebackslashdouble.
{\activebackslashdouble \xdef\pdfxrefdest{#1}%
\backslashparens\pdfxrefdest}%
%
\leavevmode
\startlink attr{/Border [0 0 0]}%
\ifnum\filenamelength>0
goto file{\the\filename.pdf} name{\pdfxrefdest}%
\else
goto name{\pdfmkpgn{\pdfxrefdest}}%
\fi
}%
\setcolor{\linkcolor}%
\fi
%
% Float references are printed completely differently: "Figure 1.2"
% instead of "[somenode], p.3". We distinguish them by the
% LABEL-title being set to a magic string.
{%
% Have to otherify everything special to allow the \csname to
% include an _ in the xref name, etc.
\indexnofonts
\turnoffactive
\expandafter\global\expandafter\let\expandafter\Xthisreftitle
\csname XR#1-title\endcsname
}%
\iffloat\Xthisreftitle
% If the user specified the print name (third arg) to the ref,
% print it instead of our usual "Figure 1.2".
\ifdim\wd0 = 0pt
\refx{#1-snt}{}%
\else
\printedrefname
\fi
%
% if the user also gave the printed manual name (fifth arg), append
% "in MANUALNAME".
\ifdim \wd1 > 0pt
\space \putwordin{} \cite{\printedmanual}%
\fi
\else
% node/anchor (non-float) references.
%
% If we use \unhbox0 and \unhbox1 to print the node names, TeX does not
% insert empty discretionaries after hyphens, which means that it will
% not find a line break at a hyphen in a node names. Since some manuals
% are best written with fairly long node names, containing hyphens, this
% is a loss. Therefore, we give the text of the node name again, so it
% is as if TeX is seeing it for the first time.
\ifdim \wd1 > 0pt
\putwordSection{} ``\printedrefname'' \putwordin{} \cite{\printedmanual}%
\else
% _ (for example) has to be the character _ for the purposes of the
% control sequence corresponding to the node, but it has to expand
% into the usual \leavevmode...\vrule stuff for purposes of
% printing. So we \turnoffactive for the \refx-snt, back on for the
% printing, back off for the \refx-pg.
{\turnoffactive
% Only output a following space if the -snt ref is nonempty; for
% @unnumbered and @anchor, it won't be.
\setbox2 = \hbox{\ignorespaces \refx{#1-snt}{}}%
\ifdim \wd2 > 0pt \refx{#1-snt}\space\fi
}%
% output the `[mynode]' via a macro so it can be overridden.
\xrefprintnodename\printedrefname
%
% But we always want a comma and a space:
,\space
%
% output the `page 3'.
\turnoffactive \putwordpage\tie\refx{#1-pg}{}%
\fi
\fi
\endlink
\endgroup}
% This macro is called from \xrefX for the `[nodename]' part of xref
% output. It's a separate macro only so it can be changed more easily,
% since square brackets don't work well in some documents. Particularly
% one that Bob is working on :).
%
\def\xrefprintnodename#1{[#1]}
% Things referred to by \setref.
%
\def\Ynothing{}
\def\Yomitfromtoc{}
\def\Ynumbered{%
\ifnum\secno=0
\putwordChapter@tie \the\chapno
\else \ifnum\subsecno=0
\putwordSection@tie \the\chapno.\the\secno
\else \ifnum\subsubsecno=0
\putwordSection@tie \the\chapno.\the\secno.\the\subsecno
\else
\putwordSection@tie \the\chapno.\the\secno.\the\subsecno.\the\subsubsecno
\fi\fi\fi
}
\def\Yappendix{%
\ifnum\secno=0
\putwordAppendix@tie @char\the\appendixno{}%
\else \ifnum\subsecno=0
\putwordSection@tie @char\the\appendixno.\the\secno
\else \ifnum\subsubsecno=0
\putwordSection@tie @char\the\appendixno.\the\secno.\the\subsecno
\else
\putwordSection@tie
@char\the\appendixno.\the\secno.\the\subsecno.\the\subsubsecno
\fi\fi\fi
}
% Define \refx{NAME}{SUFFIX} to reference a cross-reference string named NAME.
% If its value is nonempty, SUFFIX is output afterward.
%
\def\refx#1#2{%
{%
\indexnofonts
\otherbackslash
\expandafter\global\expandafter\let\expandafter\thisrefX
\csname XR#1\endcsname
}%
\ifx\thisrefX\relax
% If not defined, say something at least.
\angleleft un\-de\-fined\angleright
\iflinks
\ifhavexrefs
\message{\linenumber Undefined cross reference `#1'.}%
\else
\ifwarnedxrefs\else
\global\warnedxrefstrue
\message{Cross reference values unknown; you must run TeX again.}%
\fi
\fi
\fi
\else
% It's defined, so just use it.
\thisrefX
\fi
#2% Output the suffix in any case.
}
% This is the macro invoked by entries in the aux file. Usually it's
% just a \def (we prepend XR to the control sequence name to avoid
% collisions). But if this is a float type, we have more work to do.
%
\def\xrdef#1#2{%
{% The node name might contain 8-bit characters, which in our current
% implementation are changed to commands like @'e. Don't let these
% mess up the control sequence name.
\indexnofonts
\turnoffactive
\xdef\safexrefname{#1}%
}%
%
\expandafter\gdef\csname XR\safexrefname\endcsname{#2}% remember this xref
%
% Was that xref control sequence that we just defined for a float?
\expandafter\iffloat\csname XR\safexrefname\endcsname
% it was a float, and we have the (safe) float type in \iffloattype.
\expandafter\let\expandafter\floatlist
\csname floatlist\iffloattype\endcsname
%
% Is this the first time we've seen this float type?
\expandafter\ifx\floatlist\relax
\toks0 = {\do}% yes, so just \do
\else
% had it before, so preserve previous elements in list.
\toks0 = \expandafter{\floatlist\do}%
\fi
%
% Remember this xref in the control sequence \floatlistFLOATTYPE,
% for later use in \listoffloats.
\expandafter\xdef\csname floatlist\iffloattype\endcsname{\the\toks0
{\safexrefname}}%
\fi
}
% Read the last existing aux file, if any. No error if none exists.
%
\def\tryauxfile{%
\openin 1 \jobname.aux
\ifeof 1 \else
\readdatafile{aux}%
\global\havexrefstrue
\fi
\closein 1
}
\def\setupdatafile{%
\catcode`\^^@=\other
\catcode`\^^A=\other
\catcode`\^^B=\other
\catcode`\^^C=\other
\catcode`\^^D=\other
\catcode`\^^E=\other
\catcode`\^^F=\other
\catcode`\^^G=\other
\catcode`\^^H=\other
\catcode`\^^K=\other
\catcode`\^^L=\other
\catcode`\^^N=\other
\catcode`\^^P=\other
\catcode`\^^Q=\other
\catcode`\^^R=\other
\catcode`\^^S=\other
\catcode`\^^T=\other
\catcode`\^^U=\other
\catcode`\^^V=\other
\catcode`\^^W=\other
\catcode`\^^X=\other
\catcode`\^^Z=\other
\catcode`\^^[=\other
\catcode`\^^\=\other
\catcode`\^^]=\other
\catcode`\^^^=\other
\catcode`\^^_=\other
% It was suggested to set the catcode of ^ to 7, which would allow ^^e4 etc.
% in xref tags, i.e., node names. But since ^^e4 notation isn't
% supported in the main text, it doesn't seem desirable. Furthermore,
% that is not enough: for node names that actually contain a ^
% character, we would end up writing a line like this: 'xrdef {'hat
% b-title}{'hat b} and \xrdef does a \csname...\endcsname on the first
% argument, and \hat is not an expandable control sequence. It could
% all be worked out, but why? Either we support ^^ or we don't.
%
% The other change necessary for this was to define \auxhat:
% \def\auxhat{\def^{'hat }}% extra space so ok if followed by letter
% and then to call \auxhat in \setq.
%
\catcode`\^=\other
%
% Special characters. Should be turned off anyway, but...
\catcode`\~=\other
\catcode`\[=\other
\catcode`\]=\other
\catcode`\"=\other
\catcode`\_=\other
\catcode`\|=\other
\catcode`\<=\other
\catcode`\>=\other
\catcode`\$=\other
\catcode`\#=\other
\catcode`\&=\other
\catcode`\%=\other
\catcode`+=\other % avoid \+ for paranoia even though we've turned it off
%
% This is to support \ in node names and titles, since the \
% characters end up in a \csname. It's easier than
% leaving it active and making its active definition an actual \
% character. What I don't understand is why it works in the *value*
% of the xrdef. Seems like it should be a catcode12 \, and that
% should not typeset properly. But it works, so I'm moving on for
% now. --karl, 15jan04.
\catcode`\\=\other
%
% Make the characters 128-255 be printing characters.
{%
\count1=128
\def\loop{%
\catcode\count1=\other
\advance\count1 by 1
\ifnum \count1<256 \loop \fi
}%
}%
%
% @ is our escape character in .aux files, and we need braces.
\catcode`\{=1
\catcode`\}=2
\catcode`\@=0
}
\def\readdatafile#1{%
\begingroup
\setupdatafile
\input\jobname.#1
\endgroup}
\message{insertions,}
% including footnotes.
\newcount \footnoteno
% The trailing space in the following definition for supereject is
% vital for proper filling; pages come out unaligned when you do a
% pagealignmacro call if that space before the closing brace is
% removed. (Generally, numeric constants should always be followed by a
% space to prevent strange expansion errors.)
\def\supereject{\par\penalty -20000\footnoteno =0 }
% @footnotestyle is meaningful for info output only.
\let\footnotestyle=\comment
{\catcode `\@=11
%
% Auto-number footnotes. Otherwise like plain.
\gdef\footnote{%
\let\indent=\ptexindent
\let\noindent=\ptexnoindent
\global\advance\footnoteno by \@ne
\edef\thisfootno{$^{\the\footnoteno}$}%
%
% In case the footnote comes at the end of a sentence, preserve the
% extra spacing after we do the footnote number.
\let\@sf\empty
\ifhmode\edef\@sf{\spacefactor\the\spacefactor}\ptexslash\fi
%
% Remove inadvertent blank space before typesetting the footnote number.
\unskip
\thisfootno\@sf
\dofootnote
}%
% Don't bother with the trickery in plain.tex to not require the
% footnote text as a parameter. Our footnotes don't need to be so general.
%
% Oh yes, they do; otherwise, @ifset (and anything else that uses
% \parseargline) fails inside footnotes because the tokens are fixed when
% the footnote is read. --karl, 16nov96.
%
\gdef\dofootnote{%
\insert\footins\bgroup
% We want to typeset this text as a normal paragraph, even if the
% footnote reference occurs in (for example) a display environment.
% So reset some parameters.
\hsize=\pagewidth
\interlinepenalty\interfootnotelinepenalty
\splittopskip\ht\strutbox % top baseline for broken footnotes
\splitmaxdepth\dp\strutbox
\floatingpenalty\@MM
\leftskip\z@skip
\rightskip\z@skip
\spaceskip\z@skip
\xspaceskip\z@skip
\parindent\defaultparindent
%
\smallfonts \rm
%
% Because we use hanging indentation in footnotes, a @noindent appears
% to exdent this text, so make it be a no-op. makeinfo does not use
% hanging indentation so @noindent can still be needed within footnote
% text after an @example or the like (not that this is good style).
\let\noindent = \relax
%
% Hang the footnote text off the number. Use \everypar in case the
% footnote extends for more than one paragraph.
\everypar = {\hang}%
\textindent{\thisfootno}%
%
% Don't crash into the line above the footnote text. Since this
% expands into a box, it must come within the paragraph, lest it
% provide a place where TeX can split the footnote.
\footstrut
\futurelet\next\fo@t
}
}%end \catcode `\@=11
% In case a @footnote appears in a vbox, save the footnote text and create
% the real \insert just after the vbox finished. Otherwise, the insertion
% would be lost.
% Similarly, if a @footnote appears inside an alignment, save the footnote
% text to a box and make the \insert when a row of the table is finished.
% And the same can be done for other insert classes. --kasal, 16nov03.
% Replace the \insert primitive by a cheating macro.
% Deeper inside, just make sure that the saved insertions are not spilled
% out prematurely.
%
\def\startsavinginserts{%
\ifx \insert\ptexinsert
\let\insert\saveinsert
\else
\let\checkinserts\relax
\fi
}
% This \insert replacement works for both \insert\footins{foo} and
% \insert\footins\bgroup foo\egroup, but it doesn't work for \insert27{foo}.
%
\def\saveinsert#1{%
\edef\next{\noexpand\savetobox \makeSAVEname#1}%
\afterassignment\next
% swallow the left brace
\let\temp =
}
\def\makeSAVEname#1{\makecsname{SAVE\expandafter\gobble\string#1}}
\def\savetobox#1{\global\setbox#1 = \vbox\bgroup \unvbox#1}
\def\checksaveins#1{\ifvoid#1\else \placesaveins#1\fi}
\def\placesaveins#1{%
\ptexinsert \csname\expandafter\gobblesave\string#1\endcsname
{\box#1}%
}
% eat @SAVE -- beware, all of them have catcode \other:
{
\def\dospecials{\do S\do A\do V\do E} \uncatcodespecials % ;-)
\gdef\gobblesave @SAVE{}
}
% initialization:
\def\newsaveins #1{%
\edef\next{\noexpand\newsaveinsX \makeSAVEname#1}%
\next
}
\def\newsaveinsX #1{%
\csname newbox\endcsname #1%
\expandafter\def\expandafter\checkinserts\expandafter{\checkinserts
\checksaveins #1}%
}
% initialize:
\let\checkinserts\empty
\newsaveins\footins
\newsaveins\margin
% @image. We use the macros from epsf.tex to support this.
% If epsf.tex is not installed and @image is used, we complain.
%
% Check for and read epsf.tex up front. If we read it only at @image
% time, we might be inside a group, and then its definitions would get
% undone and the next image would fail.
\openin 1 = epsf.tex
\ifeof 1 \else
% Do not bother showing banner with epsf.tex v2.7k (available in
% doc/epsf.tex and on ctan).
\def\epsfannounce{\toks0 = }%
\input epsf.tex
\fi
\closein 1
%
% We will only complain once about lack of epsf.tex.
\newif\ifwarnednoepsf
\newhelp\noepsfhelp{epsf.tex must be installed for images to
work. It is also included in the Texinfo distribution, or you can get
it from ftp://tug.org/tex/epsf.tex.}
%
\def\image#1{%
\ifx\epsfbox\undefined
\ifwarnednoepsf \else
\errhelp = \noepsfhelp
\errmessage{epsf.tex not found, images will be ignored}%
\global\warnednoepsftrue
\fi
\else
\imagexxx #1,,,,,\finish
\fi
}
%
% Arguments to @image:
% #1 is (mandatory) image filename; we tack on .eps extension.
% #2 is (optional) width, #3 is (optional) height.
% #4 is (ignored optional) html alt text.
% #5 is (ignored optional) extension.
% #6 is just the usual extra ignored arg for parsing this stuff.
\newif\ifimagevmode
\def\imagexxx#1,#2,#3,#4,#5,#6\finish{\begingroup
\catcode`\^^M = 5 % in case we're inside an example
\normalturnoffactive % allow _ et al. in names
% If the image is by itself, center it.
\ifvmode
\imagevmodetrue
\nobreak\medskip
% Usually we'll have text after the image which will insert
% \parskip glue, so insert it here too to equalize the space
% above and below.
\nobreak\vskip\parskip
\nobreak
\fi
%
% Leave vertical mode so that indentation from an enclosing
% environment such as @quotation is respected. On the other hand, if
% it's at the top level, we don't want the normal paragraph indentation.
\noindent
%
% Output the image.
\ifpdf
\dopdfimage{#1}{#2}{#3}%
\else
% \epsfbox itself resets \epsf?size at each figure.
\setbox0 = \hbox{\ignorespaces #2}\ifdim\wd0 > 0pt \epsfxsize=#2\relax \fi
\setbox0 = \hbox{\ignorespaces #3}\ifdim\wd0 > 0pt \epsfysize=#3\relax \fi
\epsfbox{#1.eps}%
\fi
%
\ifimagevmode \medskip \fi % space after the standalone image
\endgroup}
% @float FLOATTYPE,LABEL,LOC ... @end float for displayed figures, tables,
% etc. We don't actually implement floating yet, we always include the
% float "here". But it seemed the best name for the future.
%
\envparseargdef\float{\eatcommaspace\eatcommaspace\dofloat#1, , ,\finish}
% There may be a space before second and/or third parameter; delete it.
\def\eatcommaspace#1, {#1,}
% #1 is the optional FLOATTYPE, the text label for this float, typically
% "Figure", "Table", "Example", etc. Can't contain commas. If omitted,
% this float will not be numbered and cannot be referred to.
%
% #2 is the optional xref label. Also must be present for the float to
% be referable.
%
% #3 is the optional positioning argument; for now, it is ignored. It
% will somehow specify the positions allowed to float to (here, top, bottom).
%
% We keep a separate counter for each FLOATTYPE, which we reset at each
% chapter-level command.
\let\resetallfloatnos=\empty
%
\def\dofloat#1,#2,#3,#4\finish{%
\let\thiscaption=\empty
\let\thisshortcaption=\empty
%
% don't lose footnotes inside @float.
%
% BEWARE: when the floats start float, we have to issue warning whenever an
% insert appears inside a float which could possibly float. --kasal, 26may04
%
\startsavinginserts
%
% We can't be used inside a paragraph.
\par
%
\vtop\bgroup
\def\floattype{#1}%
\def\floatlabel{#2}%
\def\floatloc{#3}% we do nothing with this yet.
%
\ifx\floattype\empty
\let\safefloattype=\empty
\else
{%
% the floattype might have accents or other special characters,
% but we need to use it in a control sequence name.
\indexnofonts
\turnoffactive
\xdef\safefloattype{\floattype}%
}%
\fi
%
% If label is given but no type, we handle that as the empty type.
\ifx\floatlabel\empty \else
% We want each FLOATTYPE to be numbered separately (Figure 1,
% Table 1, Figure 2, ...). (And if no label, no number.)
%
\expandafter\getfloatno\csname\safefloattype floatno\endcsname
\global\advance\floatno by 1
%
{%
% This magic value for \lastsection is output by \setref as the
% XREFLABEL-title value. \xrefX uses it to distinguish float
% labels (which have a completely different output format) from
% node and anchor labels. And \xrdef uses it to construct the
% lists of floats.
%
\edef\lastsection{\floatmagic=\safefloattype}%
\setref{\floatlabel}{Yfloat}%
}%
\fi
%
% start with \parskip glue, I guess.
\vskip\parskip
%
% Don't suppress indentation if a float happens to start a section.
\restorefirstparagraphindent
}
% we have these possibilities:
% @float Foo,lbl & @caption{Cap}: Foo 1.1: Cap
% @float Foo,lbl & no caption: Foo 1.1
% @float Foo & @caption{Cap}: Foo: Cap
% @float Foo & no caption: Foo
% @float ,lbl & Caption{Cap}: 1.1: Cap
% @float ,lbl & no caption: 1.1
% @float & @caption{Cap}: Cap
% @float & no caption:
%
\def\Efloat{%
\let\floatident = \empty
%
% In all cases, if we have a float type, it comes first.
\ifx\floattype\empty \else \def\floatident{\floattype}\fi
%
% If we have an xref label, the number comes next.
\ifx\floatlabel\empty \else
\ifx\floattype\empty \else % if also had float type, need tie first.
\appendtomacro\floatident{\tie}%
\fi
% the number.
\appendtomacro\floatident{\chaplevelprefix\the\floatno}%
\fi
%
% Start the printed caption with what we've constructed in
% \floatident, but keep it separate; we need \floatident again.
\let\captionline = \floatident
%
\ifx\thiscaption\empty \else
\ifx\floatident\empty \else
\appendtomacro\captionline{: }% had ident, so need a colon between
\fi
%
% caption text.
\appendtomacro\captionline{\scanexp\thiscaption}%
\fi
%
% If we have anything to print, print it, with space before.
% Eventually this needs to become an \insert.
\ifx\captionline\empty \else
\vskip.5\parskip
\captionline
%
% Space below caption.
\vskip\parskip
\fi
%
% If have an xref label, write the list of floats info. Do this
% after the caption, to avoid chance of it being a breakpoint.
\ifx\floatlabel\empty \else
% Write the text that goes in the lof to the aux file as
% \floatlabel-lof. Besides \floatident, we include the short
% caption if specified, else the full caption if specified, else nothing.
{%
\atdummies
%
% since we read the caption text in the macro world, where ^^M
% is turned into a normal character, we have to scan it back, so
% we don't write the literal three characters "^^M" into the aux file.
\scanexp{%
\xdef\noexpand\gtemp{%
\ifx\thisshortcaption\empty
\thiscaption
\else
\thisshortcaption
\fi
}%
}%
\immediate\write\auxfile{@xrdef{\floatlabel-lof}{\floatident
\ifx\gtemp\empty \else : \gtemp \fi}}%
}%
\fi
\egroup % end of \vtop
%
% place the captured inserts
%
% BEWARE: when the floats start floating, we have to issue warning
% whenever an insert appears inside a float which could possibly
% float. --kasal, 26may04
%
\checkinserts
}
% Append the tokens #2 to the definition of macro #1, not expanding either.
%
\def\appendtomacro#1#2{%
\expandafter\def\expandafter#1\expandafter{#1#2}%
}
% @caption, @shortcaption
%
\def\caption{\docaption\thiscaption}
\def\shortcaption{\docaption\thisshortcaption}
\def\docaption{\checkenv\float \bgroup\scanargctxt\defcaption}
\def\defcaption#1#2{\egroup \def#1{#2}}
% The parameter is the control sequence identifying the counter we are
% going to use. Create it if it doesn't exist and assign it to \floatno.
\def\getfloatno#1{%
\ifx#1\relax
% Haven't seen this figure type before.
\csname newcount\endcsname #1%
%
% Remember to reset this floatno at the next chap.
\expandafter\gdef\expandafter\resetallfloatnos
\expandafter{\resetallfloatnos #1=0 }%
\fi
\let\floatno#1%
}
% \setref calls this to get the XREFLABEL-snt value. We want an @xref
% to the FLOATLABEL to expand to "Figure 3.1". We call \setref when we
% first read the @float command.
%
\def\Yfloat{\floattype@tie \chaplevelprefix\the\floatno}%
% Magic string used for the XREFLABEL-title value, so \xrefX can
% distinguish floats from other xref types.
\def\floatmagic{!!float!!}
% #1 is the control sequence we are passed; we expand into a conditional
% which is true if #1 represents a float ref. That is, the magic
% \lastsection value which we \setref above.
%
\def\iffloat#1{\expandafter\doiffloat#1==\finish}
%
% #1 is (maybe) the \floatmagic string. If so, #2 will be the
% (safe) float type for this float. We set \iffloattype to #2.
%
\def\doiffloat#1=#2=#3\finish{%
\def\temp{#1}%
\def\iffloattype{#2}%
\ifx\temp\floatmagic
}
% @listoffloats FLOATTYPE - print a list of floats like a table of contents.
%
\parseargdef\listoffloats{%
\def\floattype{#1}% floattype
{%
% the floattype might have accents or other special characters,
% but we need to use it in a control sequence name.
\indexnofonts
\turnoffactive
\xdef\safefloattype{\floattype}%
}%
%
% \xrdef saves the floats as a \do-list in \floatlistSAFEFLOATTYPE.
\expandafter\ifx\csname floatlist\safefloattype\endcsname \relax
\ifhavexrefs
% if the user said @listoffloats foo but never @float foo.
\message{\linenumber No `\safefloattype' floats to list.}%
\fi
\else
\begingroup
\leftskip=\tocindent % indent these entries like a toc
\let\do=\listoffloatsdo
\csname floatlist\safefloattype\endcsname
\endgroup
\fi
}
% This is called on each entry in a list of floats. We're passed the
% xref label, in the form LABEL-title, which is how we save it in the
% aux file. We strip off the -title and look up \XRLABEL-lof, which
% has the text we're supposed to typeset here.
%
% Figures without xref labels will not be included in the list (since
% they won't appear in the aux file).
%
\def\listoffloatsdo#1{\listoffloatsdoentry#1\finish}
\def\listoffloatsdoentry#1-title\finish{{%
% Can't fully expand XR#1-lof because it can contain anything. Just
% pass the control sequence. On the other hand, XR#1-pg is just the
% page number, and we want to fully expand that so we can get a link
% in pdf output.
\toksA = \expandafter{\csname XR#1-lof\endcsname}%
%
% use the same \entry macro we use to generate the TOC and index.
\edef\writeentry{\noexpand\entry{\the\toksA}{\csname XR#1-pg\endcsname}}%
\writeentry
}}
\message{localization,}
% For single-language documents, @documentlanguage is usually given very
% early, just after @documentencoding. Single argument is the language
% (de) or locale (de_DE) abbreviation.
%
{
\catcode`\_ = \active
\globaldefs=1
\parseargdef\documentlanguage{\begingroup
\let_=\normalunderscore % normal _ character for filenames
\tex % read txi-??.tex file in plain TeX.
% Read the file by the name they passed if it exists.
\openin 1 txi-#1.tex
\ifeof 1
\documentlanguagetrywithoutunderscore{#1_\finish}%
\else
\globaldefs = 1 % everything in the txi-LL files needs to persist
\input txi-#1.tex
\fi
\closein 1
\endgroup % end raw TeX
\endgroup}
}
%
% If they passed de_DE, and txi-de_DE.tex doesn't exist,
% try txi-de.tex.
%
\def\documentlanguagetrywithoutunderscore#1_#2\finish{%
\openin 1 txi-#1.tex
\ifeof 1
\errhelp = \nolanghelp
\errmessage{Cannot read language file txi-#1.tex}%
\else
\input txi-#1.tex
\fi
\closein 1
}
%
\newhelp\nolanghelp{The given language definition file cannot be found or
is empty. Maybe you need to install it? Putting it in the current
directory should work if nowhere else does.}
% This macro is called from txi-??.tex files; the first argument is the
% \language name to set (without the "\lang@" prefix), the second and
% third args are \{left,right}hyphenmin.
%
% The language names to pass are determined when the format is built.
% See the etex.log file created at that time, e.g.,
% /usr/local/texlive/2008/texmf-var/web2c/pdftex/etex.log.
%
% With TeX Live 2008, etex now includes hyphenation patterns for all
% available languages. This means we can support hyphenation in
% Texinfo, at least to some extent. (This still doesn't solve the
% accented characters problem.)
%
\catcode`@=11
\def\txisetlanguage#1#2#3{%
% do not set the language if the name is undefined in the current TeX.
\expandafter\ifx\csname lang@#1\endcsname \relax
\message{no patterns for #1}%
\else
\global\language = \csname lang@#1\endcsname
\fi
% but there is no harm in adjusting the hyphenmin values regardless.
\global\lefthyphenmin = #2\relax
\global\righthyphenmin = #3\relax
}
% Helpers for encodings.
% Set the catcode of characters 128 through 255 to the specified number.
%
\def\setnonasciicharscatcode#1{%
\count255=128
\loop\ifnum\count255<256
\global\catcode\count255=#1\relax
\advance\count255 by 1
\repeat
}
\def\setnonasciicharscatcodenonglobal#1{%
\count255=128
\loop\ifnum\count255<256
\catcode\count255=#1\relax
\advance\count255 by 1
\repeat
}
% @documentencoding sets the definition of non-ASCII characters
% according to the specified encoding.
%
\parseargdef\documentencoding{%
% Encoding being declared for the document.
\def\declaredencoding{\csname #1.enc\endcsname}%
%
% Supported encodings: names converted to tokens in order to be able
% to compare them with \ifx.
\def\ascii{\csname US-ASCII.enc\endcsname}%
\def\latnine{\csname ISO-8859-15.enc\endcsname}%
\def\latone{\csname ISO-8859-1.enc\endcsname}%
\def\lattwo{\csname ISO-8859-2.enc\endcsname}%
\def\utfeight{\csname UTF-8.enc\endcsname}%
%
\ifx \declaredencoding \ascii
\asciichardefs
%
\else \ifx \declaredencoding \lattwo
\setnonasciicharscatcode\active
\lattwochardefs
%
\else \ifx \declaredencoding \latone
\setnonasciicharscatcode\active
\latonechardefs
%
\else \ifx \declaredencoding \latnine
\setnonasciicharscatcode\active
\latninechardefs
%
\else \ifx \declaredencoding \utfeight
\setnonasciicharscatcode\active
\utfeightchardefs
%
\else
\message{Unknown document encoding #1, ignoring.}%
%
\fi % utfeight
\fi % latnine
\fi % latone
\fi % lattwo
\fi % ascii
}
% A message to be logged when using a character that isn't available
% the default font encoding (OT1).
%
\def\missingcharmsg#1{\message{Character missing in OT1 encoding: #1.}}
% Take account of \c (plain) vs. \, (Texinfo) difference.
\def\cedilla#1{\ifx\c\ptexc\c{#1}\else\,{#1}\fi}
% First, make active non-ASCII characters in order for them to be
% correctly categorized when TeX reads the replacement text of
% macros containing the character definitions.
\setnonasciicharscatcode\active
%
% Latin1 (ISO-8859-1) character definitions.
\def\latonechardefs{%
\gdef^^a0{~}
\gdef^^a1{\exclamdown}
\gdef^^a2{\missingcharmsg{CENT SIGN}}
\gdef^^a3{{\pounds}}
\gdef^^a4{\missingcharmsg{CURRENCY SIGN}}
\gdef^^a5{\missingcharmsg{YEN SIGN}}
\gdef^^a6{\missingcharmsg{BROKEN BAR}}
\gdef^^a7{\S}
\gdef^^a8{\"{}}
\gdef^^a9{\copyright}
\gdef^^aa{\ordf}
\gdef^^ab{\guillemetleft}
\gdef^^ac{$\lnot$}
\gdef^^ad{\-}
\gdef^^ae{\registeredsymbol}
\gdef^^af{\={}}
%
\gdef^^b0{\textdegree}
\gdef^^b1{$\pm$}
\gdef^^b2{$^2$}
\gdef^^b3{$^3$}
\gdef^^b4{\'{}}
\gdef^^b5{$\mu$}
\gdef^^b6{\P}
%
\gdef^^b7{$^.$}
\gdef^^b8{\cedilla\ }
\gdef^^b9{$^1$}
\gdef^^ba{\ordm}
%
\gdef^^bb{\guilletright}
\gdef^^bc{$1\over4$}
\gdef^^bd{$1\over2$}
\gdef^^be{$3\over4$}
\gdef^^bf{\questiondown}
%
\gdef^^c0{\`A}
\gdef^^c1{\'A}
\gdef^^c2{\^A}
\gdef^^c3{\~A}
\gdef^^c4{\"A}
\gdef^^c5{\ringaccent A}
\gdef^^c6{\AE}
\gdef^^c7{\cedilla C}
\gdef^^c8{\`E}
\gdef^^c9{\'E}
\gdef^^ca{\^E}
\gdef^^cb{\"E}
\gdef^^cc{\`I}
\gdef^^cd{\'I}
\gdef^^ce{\^I}
\gdef^^cf{\"I}
%
\gdef^^d0{\missingcharmsg{LATIN CAPITAL LETTER ETH}}
\gdef^^d1{\~N}
\gdef^^d2{\`O}
\gdef^^d3{\'O}
\gdef^^d4{\^O}
\gdef^^d5{\~O}
\gdef^^d6{\"O}
\gdef^^d7{$\times$}
\gdef^^d8{\O}
\gdef^^d9{\`U}
\gdef^^da{\'U}
\gdef^^db{\^U}
\gdef^^dc{\"U}
\gdef^^dd{\'Y}
\gdef^^de{\missingcharmsg{LATIN CAPITAL LETTER THORN}}
\gdef^^df{\ss}
%
\gdef^^e0{\`a}
\gdef^^e1{\'a}
\gdef^^e2{\^a}
\gdef^^e3{\~a}
\gdef^^e4{\"a}
\gdef^^e5{\ringaccent a}
\gdef^^e6{\ae}
\gdef^^e7{\cedilla c}
\gdef^^e8{\`e}
\gdef^^e9{\'e}
\gdef^^ea{\^e}
\gdef^^eb{\"e}
\gdef^^ec{\`{\dotless i}}
\gdef^^ed{\'{\dotless i}}
\gdef^^ee{\^{\dotless i}}
\gdef^^ef{\"{\dotless i}}
%
\gdef^^f0{\missingcharmsg{LATIN SMALL LETTER ETH}}
\gdef^^f1{\~n}
\gdef^^f2{\`o}
\gdef^^f3{\'o}
\gdef^^f4{\^o}
\gdef^^f5{\~o}
\gdef^^f6{\"o}
\gdef^^f7{$\div$}
\gdef^^f8{\o}
\gdef^^f9{\`u}
\gdef^^fa{\'u}
\gdef^^fb{\^u}
\gdef^^fc{\"u}
\gdef^^fd{\'y}
\gdef^^fe{\missingcharmsg{LATIN SMALL LETTER THORN}}
\gdef^^ff{\"y}
}
% Latin9 (ISO-8859-15) encoding character definitions.
\def\latninechardefs{%
% Encoding is almost identical to Latin1.
\latonechardefs
%
\gdef^^a4{\euro}
\gdef^^a6{\v S}
\gdef^^a8{\v s}
\gdef^^b4{\v Z}
\gdef^^b8{\v z}
\gdef^^bc{\OE}
\gdef^^bd{\oe}
\gdef^^be{\"Y}
}
% Latin2 (ISO-8859-2) character definitions.
\def\lattwochardefs{%
\gdef^^a0{~}
\gdef^^a1{\ogonek{A}}
\gdef^^a2{\u{}}
\gdef^^a3{\L}
\gdef^^a4{\missingcharmsg{CURRENCY SIGN}}
\gdef^^a5{\v L}
\gdef^^a6{\'S}
\gdef^^a7{\S}
\gdef^^a8{\"{}}
\gdef^^a9{\v S}
\gdef^^aa{\cedilla S}
\gdef^^ab{\v T}
\gdef^^ac{\'Z}
\gdef^^ad{\-}
\gdef^^ae{\v Z}
\gdef^^af{\dotaccent Z}
%
\gdef^^b0{\textdegree}
\gdef^^b1{\ogonek{a}}
\gdef^^b2{\ogonek{ }}
\gdef^^b3{\l}
\gdef^^b4{\'{}}
\gdef^^b5{\v l}
\gdef^^b6{\'s}
\gdef^^b7{\v{}}
\gdef^^b8{\cedilla\ }
\gdef^^b9{\v s}
\gdef^^ba{\cedilla s}
\gdef^^bb{\v t}
\gdef^^bc{\'z}
\gdef^^bd{\H{}}
\gdef^^be{\v z}
\gdef^^bf{\dotaccent z}
%
\gdef^^c0{\'R}
\gdef^^c1{\'A}
\gdef^^c2{\^A}
\gdef^^c3{\u A}
\gdef^^c4{\"A}
\gdef^^c5{\'L}
\gdef^^c6{\'C}
\gdef^^c7{\cedilla C}
\gdef^^c8{\v C}
\gdef^^c9{\'E}
\gdef^^ca{\ogonek{E}}
\gdef^^cb{\"E}
\gdef^^cc{\v E}
\gdef^^cd{\'I}
\gdef^^ce{\^I}
\gdef^^cf{\v D}
%
\gdef^^d0{\missingcharmsg{LATIN CAPITAL LETTER D WITH STROKE}}
\gdef^^d1{\'N}
\gdef^^d2{\v N}
\gdef^^d3{\'O}
\gdef^^d4{\^O}
\gdef^^d5{\H O}
\gdef^^d6{\"O}
\gdef^^d7{$\times$}
\gdef^^d8{\v R}
\gdef^^d9{\ringaccent U}
\gdef^^da{\'U}
\gdef^^db{\H U}
\gdef^^dc{\"U}
\gdef^^dd{\'Y}
\gdef^^de{\cedilla T}
\gdef^^df{\ss}
%
\gdef^^e0{\'r}
\gdef^^e1{\'a}
\gdef^^e2{\^a}
\gdef^^e3{\u a}
\gdef^^e4{\"a}
\gdef^^e5{\'l}
\gdef^^e6{\'c}
\gdef^^e7{\cedilla c}
\gdef^^e8{\v c}
\gdef^^e9{\'e}
\gdef^^ea{\ogonek{e}}
\gdef^^eb{\"e}
\gdef^^ec{\v e}
\gdef^^ed{\'\i}
\gdef^^ee{\^\i}
\gdef^^ef{\v d}
%
\gdef^^f0{\missingcharmsg{LATIN SMALL LETTER D WITH STROKE}}
\gdef^^f1{\'n}
\gdef^^f2{\v n}
\gdef^^f3{\'o}
\gdef^^f4{\^o}
\gdef^^f5{\H o}
\gdef^^f6{\"o}
\gdef^^f7{$\div$}
\gdef^^f8{\v r}
\gdef^^f9{\ringaccent u}
\gdef^^fa{\'u}
\gdef^^fb{\H u}
\gdef^^fc{\"u}
\gdef^^fd{\'y}
\gdef^^fe{\cedilla t}
\gdef^^ff{\dotaccent{}}
}
% UTF-8 character definitions.
%
% This code to support UTF-8 is based on LaTeX's utf8.def, with some
% changes for Texinfo conventions. It is included here under the GPL by
% permission from Frank Mittelbach and the LaTeX team.
%
\newcount\countUTFx
\newcount\countUTFy
\newcount\countUTFz
\gdef\UTFviiiTwoOctets#1#2{\expandafter
\UTFviiiDefined\csname u8:#1\string #2\endcsname}
%
\gdef\UTFviiiThreeOctets#1#2#3{\expandafter
\UTFviiiDefined\csname u8:#1\string #2\string #3\endcsname}
%
\gdef\UTFviiiFourOctets#1#2#3#4{\expandafter
\UTFviiiDefined\csname u8:#1\string #2\string #3\string #4\endcsname}
\gdef\UTFviiiDefined#1{%
\ifx #1\relax
\message{\linenumber Unicode char \string #1 not defined for Texinfo}%
\else
\expandafter #1%
\fi
}
\begingroup
\catcode`\~13
\catcode`\"12
\def\UTFviiiLoop{%
\global\catcode\countUTFx\active
\uccode`\~\countUTFx
\uppercase\expandafter{\UTFviiiTmp}%
\advance\countUTFx by 1
\ifnum\countUTFx < \countUTFy
\expandafter\UTFviiiLoop
\fi}
\countUTFx = "C2
\countUTFy = "E0
\def\UTFviiiTmp{%
\xdef~{\noexpand\UTFviiiTwoOctets\string~}}
\UTFviiiLoop
\countUTFx = "E0
\countUTFy = "F0
\def\UTFviiiTmp{%
\xdef~{\noexpand\UTFviiiThreeOctets\string~}}
\UTFviiiLoop
\countUTFx = "F0
\countUTFy = "F4
\def\UTFviiiTmp{%
\xdef~{\noexpand\UTFviiiFourOctets\string~}}
\UTFviiiLoop
\endgroup
\begingroup
\catcode`\"=12
\catcode`\<=12
\catcode`\.=12
\catcode`\,=12
\catcode`\;=12
\catcode`\!=12
\catcode`\~=13
\gdef\DeclareUnicodeCharacter#1#2{%
\countUTFz = "#1\relax
\wlog{\space\space defining Unicode char U+#1 (decimal \the\countUTFz)}%
\begingroup
\parseXMLCharref
\def\UTFviiiTwoOctets##1##2{%
\csname u8:##1\string ##2\endcsname}%
\def\UTFviiiThreeOctets##1##2##3{%
\csname u8:##1\string ##2\string ##3\endcsname}%
\def\UTFviiiFourOctets##1##2##3##4{%
\csname u8:##1\string ##2\string ##3\string ##4\endcsname}%
\expandafter\expandafter\expandafter\expandafter
\expandafter\expandafter\expandafter
\gdef\UTFviiiTmp{#2}%
\endgroup}
\gdef\parseXMLCharref{%
\ifnum\countUTFz < "A0\relax
\errhelp = \EMsimple
\errmessage{Cannot define Unicode char value < 00A0}%
\else\ifnum\countUTFz < "800\relax
\parseUTFviiiA,%
\parseUTFviiiB C\UTFviiiTwoOctets.,%
\else\ifnum\countUTFz < "10000\relax
\parseUTFviiiA;%
\parseUTFviiiA,%
\parseUTFviiiB E\UTFviiiThreeOctets.{,;}%
\else
\parseUTFviiiA;%
\parseUTFviiiA,%
\parseUTFviiiA!%
\parseUTFviiiB F\UTFviiiFourOctets.{!,;}%
\fi\fi\fi
}
\gdef\parseUTFviiiA#1{%
\countUTFx = \countUTFz
\divide\countUTFz by 64
\countUTFy = \countUTFz
\multiply\countUTFz by 64
\advance\countUTFx by -\countUTFz
\advance\countUTFx by 128
\uccode `#1\countUTFx
\countUTFz = \countUTFy}
\gdef\parseUTFviiiB#1#2#3#4{%
\advance\countUTFz by "#10\relax
\uccode `#3\countUTFz
\uppercase{\gdef\UTFviiiTmp{#2#3#4}}}
\endgroup
\def\utfeightchardefs{%
\DeclareUnicodeCharacter{00A0}{\tie}
\DeclareUnicodeCharacter{00A1}{\exclamdown}
\DeclareUnicodeCharacter{00A3}{\pounds}
\DeclareUnicodeCharacter{00A8}{\"{ }}
\DeclareUnicodeCharacter{00A9}{\copyright}
\DeclareUnicodeCharacter{00AA}{\ordf}
\DeclareUnicodeCharacter{00AB}{\guillemetleft}
\DeclareUnicodeCharacter{00AD}{\-}
\DeclareUnicodeCharacter{00AE}{\registeredsymbol}
\DeclareUnicodeCharacter{00AF}{\={ }}
\DeclareUnicodeCharacter{00B0}{\ringaccent{ }}
\DeclareUnicodeCharacter{00B4}{\'{ }}
\DeclareUnicodeCharacter{00B8}{\cedilla{ }}
\DeclareUnicodeCharacter{00BA}{\ordm}
\DeclareUnicodeCharacter{00BB}{\guillemetright}
\DeclareUnicodeCharacter{00BF}{\questiondown}
\DeclareUnicodeCharacter{00C0}{\`A}
\DeclareUnicodeCharacter{00C1}{\'A}
\DeclareUnicodeCharacter{00C2}{\^A}
\DeclareUnicodeCharacter{00C3}{\~A}
\DeclareUnicodeCharacter{00C4}{\"A}
\DeclareUnicodeCharacter{00C5}{\AA}
\DeclareUnicodeCharacter{00C6}{\AE}
\DeclareUnicodeCharacter{00C7}{\cedilla{C}}
\DeclareUnicodeCharacter{00C8}{\`E}
\DeclareUnicodeCharacter{00C9}{\'E}
\DeclareUnicodeCharacter{00CA}{\^E}
\DeclareUnicodeCharacter{00CB}{\"E}
\DeclareUnicodeCharacter{00CC}{\`I}
\DeclareUnicodeCharacter{00CD}{\'I}
\DeclareUnicodeCharacter{00CE}{\^I}
\DeclareUnicodeCharacter{00CF}{\"I}
\DeclareUnicodeCharacter{00D1}{\~N}
\DeclareUnicodeCharacter{00D2}{\`O}
\DeclareUnicodeCharacter{00D3}{\'O}
\DeclareUnicodeCharacter{00D4}{\^O}
\DeclareUnicodeCharacter{00D5}{\~O}
\DeclareUnicodeCharacter{00D6}{\"O}
\DeclareUnicodeCharacter{00D8}{\O}
\DeclareUnicodeCharacter{00D9}{\`U}
\DeclareUnicodeCharacter{00DA}{\'U}
\DeclareUnicodeCharacter{00DB}{\^U}
\DeclareUnicodeCharacter{00DC}{\"U}
\DeclareUnicodeCharacter{00DD}{\'Y}
\DeclareUnicodeCharacter{00DF}{\ss}
\DeclareUnicodeCharacter{00E0}{\`a}
\DeclareUnicodeCharacter{00E1}{\'a}
\DeclareUnicodeCharacter{00E2}{\^a}
\DeclareUnicodeCharacter{00E3}{\~a}
\DeclareUnicodeCharacter{00E4}{\"a}
\DeclareUnicodeCharacter{00E5}{\aa}
\DeclareUnicodeCharacter{00E6}{\ae}
\DeclareUnicodeCharacter{00E7}{\cedilla{c}}
\DeclareUnicodeCharacter{00E8}{\`e}
\DeclareUnicodeCharacter{00E9}{\'e}
\DeclareUnicodeCharacter{00EA}{\^e}
\DeclareUnicodeCharacter{00EB}{\"e}
\DeclareUnicodeCharacter{00EC}{\`{\dotless{i}}}
\DeclareUnicodeCharacter{00ED}{\'{\dotless{i}}}
\DeclareUnicodeCharacter{00EE}{\^{\dotless{i}}}
\DeclareUnicodeCharacter{00EF}{\"{\dotless{i}}}
\DeclareUnicodeCharacter{00F1}{\~n}
\DeclareUnicodeCharacter{00F2}{\`o}
\DeclareUnicodeCharacter{00F3}{\'o}
\DeclareUnicodeCharacter{00F4}{\^o}
\DeclareUnicodeCharacter{00F5}{\~o}
\DeclareUnicodeCharacter{00F6}{\"o}
\DeclareUnicodeCharacter{00F8}{\o}
\DeclareUnicodeCharacter{00F9}{\`u}
\DeclareUnicodeCharacter{00FA}{\'u}
\DeclareUnicodeCharacter{00FB}{\^u}
\DeclareUnicodeCharacter{00FC}{\"u}
\DeclareUnicodeCharacter{00FD}{\'y}
\DeclareUnicodeCharacter{00FF}{\"y}
\DeclareUnicodeCharacter{0100}{\=A}
\DeclareUnicodeCharacter{0101}{\=a}
\DeclareUnicodeCharacter{0102}{\u{A}}
\DeclareUnicodeCharacter{0103}{\u{a}}
\DeclareUnicodeCharacter{0104}{\ogonek{A}}
\DeclareUnicodeCharacter{0105}{\ogonek{a}}
\DeclareUnicodeCharacter{0106}{\'C}
\DeclareUnicodeCharacter{0107}{\'c}
\DeclareUnicodeCharacter{0108}{\^C}
\DeclareUnicodeCharacter{0109}{\^c}
\DeclareUnicodeCharacter{0118}{\ogonek{E}}
\DeclareUnicodeCharacter{0119}{\ogonek{e}}
\DeclareUnicodeCharacter{010A}{\dotaccent{C}}
\DeclareUnicodeCharacter{010B}{\dotaccent{c}}
\DeclareUnicodeCharacter{010C}{\v{C}}
\DeclareUnicodeCharacter{010D}{\v{c}}
\DeclareUnicodeCharacter{010E}{\v{D}}
\DeclareUnicodeCharacter{0112}{\=E}
\DeclareUnicodeCharacter{0113}{\=e}
\DeclareUnicodeCharacter{0114}{\u{E}}
\DeclareUnicodeCharacter{0115}{\u{e}}
\DeclareUnicodeCharacter{0116}{\dotaccent{E}}
\DeclareUnicodeCharacter{0117}{\dotaccent{e}}
\DeclareUnicodeCharacter{011A}{\v{E}}
\DeclareUnicodeCharacter{011B}{\v{e}}
\DeclareUnicodeCharacter{011C}{\^G}
\DeclareUnicodeCharacter{011D}{\^g}
\DeclareUnicodeCharacter{011E}{\u{G}}
\DeclareUnicodeCharacter{011F}{\u{g}}
\DeclareUnicodeCharacter{0120}{\dotaccent{G}}
\DeclareUnicodeCharacter{0121}{\dotaccent{g}}
\DeclareUnicodeCharacter{0124}{\^H}
\DeclareUnicodeCharacter{0125}{\^h}
\DeclareUnicodeCharacter{0128}{\~I}
\DeclareUnicodeCharacter{0129}{\~{\dotless{i}}}
\DeclareUnicodeCharacter{012A}{\=I}
\DeclareUnicodeCharacter{012B}{\={\dotless{i}}}
\DeclareUnicodeCharacter{012C}{\u{I}}
\DeclareUnicodeCharacter{012D}{\u{\dotless{i}}}
\DeclareUnicodeCharacter{0130}{\dotaccent{I}}
\DeclareUnicodeCharacter{0131}{\dotless{i}}
\DeclareUnicodeCharacter{0132}{IJ}
\DeclareUnicodeCharacter{0133}{ij}
\DeclareUnicodeCharacter{0134}{\^J}
\DeclareUnicodeCharacter{0135}{\^{\dotless{j}}}
\DeclareUnicodeCharacter{0139}{\'L}
\DeclareUnicodeCharacter{013A}{\'l}
\DeclareUnicodeCharacter{0141}{\L}
\DeclareUnicodeCharacter{0142}{\l}
\DeclareUnicodeCharacter{0143}{\'N}
\DeclareUnicodeCharacter{0144}{\'n}
\DeclareUnicodeCharacter{0147}{\v{N}}
\DeclareUnicodeCharacter{0148}{\v{n}}
\DeclareUnicodeCharacter{014C}{\=O}
\DeclareUnicodeCharacter{014D}{\=o}
\DeclareUnicodeCharacter{014E}{\u{O}}
\DeclareUnicodeCharacter{014F}{\u{o}}
\DeclareUnicodeCharacter{0150}{\H{O}}
\DeclareUnicodeCharacter{0151}{\H{o}}
\DeclareUnicodeCharacter{0152}{\OE}
\DeclareUnicodeCharacter{0153}{\oe}
\DeclareUnicodeCharacter{0154}{\'R}
\DeclareUnicodeCharacter{0155}{\'r}
\DeclareUnicodeCharacter{0158}{\v{R}}
\DeclareUnicodeCharacter{0159}{\v{r}}
\DeclareUnicodeCharacter{015A}{\'S}
\DeclareUnicodeCharacter{015B}{\'s}
\DeclareUnicodeCharacter{015C}{\^S}
\DeclareUnicodeCharacter{015D}{\^s}
\DeclareUnicodeCharacter{015E}{\cedilla{S}}
\DeclareUnicodeCharacter{015F}{\cedilla{s}}
\DeclareUnicodeCharacter{0160}{\v{S}}
\DeclareUnicodeCharacter{0161}{\v{s}}
\DeclareUnicodeCharacter{0162}{\cedilla{t}}
\DeclareUnicodeCharacter{0163}{\cedilla{T}}
\DeclareUnicodeCharacter{0164}{\v{T}}
\DeclareUnicodeCharacter{0168}{\~U}
\DeclareUnicodeCharacter{0169}{\~u}
\DeclareUnicodeCharacter{016A}{\=U}
\DeclareUnicodeCharacter{016B}{\=u}
\DeclareUnicodeCharacter{016C}{\u{U}}
\DeclareUnicodeCharacter{016D}{\u{u}}
\DeclareUnicodeCharacter{016E}{\ringaccent{U}}
\DeclareUnicodeCharacter{016F}{\ringaccent{u}}
\DeclareUnicodeCharacter{0170}{\H{U}}
\DeclareUnicodeCharacter{0171}{\H{u}}
\DeclareUnicodeCharacter{0174}{\^W}
\DeclareUnicodeCharacter{0175}{\^w}
\DeclareUnicodeCharacter{0176}{\^Y}
\DeclareUnicodeCharacter{0177}{\^y}
\DeclareUnicodeCharacter{0178}{\"Y}
\DeclareUnicodeCharacter{0179}{\'Z}
\DeclareUnicodeCharacter{017A}{\'z}
\DeclareUnicodeCharacter{017B}{\dotaccent{Z}}
\DeclareUnicodeCharacter{017C}{\dotaccent{z}}
\DeclareUnicodeCharacter{017D}{\v{Z}}
\DeclareUnicodeCharacter{017E}{\v{z}}
\DeclareUnicodeCharacter{01C4}{D\v{Z}}
\DeclareUnicodeCharacter{01C5}{D\v{z}}
\DeclareUnicodeCharacter{01C6}{d\v{z}}
\DeclareUnicodeCharacter{01C7}{LJ}
\DeclareUnicodeCharacter{01C8}{Lj}
\DeclareUnicodeCharacter{01C9}{lj}
\DeclareUnicodeCharacter{01CA}{NJ}
\DeclareUnicodeCharacter{01CB}{Nj}
\DeclareUnicodeCharacter{01CC}{nj}
\DeclareUnicodeCharacter{01CD}{\v{A}}
\DeclareUnicodeCharacter{01CE}{\v{a}}
\DeclareUnicodeCharacter{01CF}{\v{I}}
\DeclareUnicodeCharacter{01D0}{\v{\dotless{i}}}
\DeclareUnicodeCharacter{01D1}{\v{O}}
\DeclareUnicodeCharacter{01D2}{\v{o}}
\DeclareUnicodeCharacter{01D3}{\v{U}}
\DeclareUnicodeCharacter{01D4}{\v{u}}
\DeclareUnicodeCharacter{01E2}{\={\AE}}
\DeclareUnicodeCharacter{01E3}{\={\ae}}
\DeclareUnicodeCharacter{01E6}{\v{G}}
\DeclareUnicodeCharacter{01E7}{\v{g}}
\DeclareUnicodeCharacter{01E8}{\v{K}}
\DeclareUnicodeCharacter{01E9}{\v{k}}
\DeclareUnicodeCharacter{01F0}{\v{\dotless{j}}}
\DeclareUnicodeCharacter{01F1}{DZ}
\DeclareUnicodeCharacter{01F2}{Dz}
\DeclareUnicodeCharacter{01F3}{dz}
\DeclareUnicodeCharacter{01F4}{\'G}
\DeclareUnicodeCharacter{01F5}{\'g}
\DeclareUnicodeCharacter{01F8}{\`N}
\DeclareUnicodeCharacter{01F9}{\`n}
\DeclareUnicodeCharacter{01FC}{\'{\AE}}
\DeclareUnicodeCharacter{01FD}{\'{\ae}}
\DeclareUnicodeCharacter{01FE}{\'{\O}}
\DeclareUnicodeCharacter{01FF}{\'{\o}}
\DeclareUnicodeCharacter{021E}{\v{H}}
\DeclareUnicodeCharacter{021F}{\v{h}}
\DeclareUnicodeCharacter{0226}{\dotaccent{A}}
\DeclareUnicodeCharacter{0227}{\dotaccent{a}}
\DeclareUnicodeCharacter{0228}{\cedilla{E}}
\DeclareUnicodeCharacter{0229}{\cedilla{e}}
\DeclareUnicodeCharacter{022E}{\dotaccent{O}}
\DeclareUnicodeCharacter{022F}{\dotaccent{o}}
\DeclareUnicodeCharacter{0232}{\=Y}
\DeclareUnicodeCharacter{0233}{\=y}
\DeclareUnicodeCharacter{0237}{\dotless{j}}
\DeclareUnicodeCharacter{02DB}{\ogonek{ }}
\DeclareUnicodeCharacter{1E02}{\dotaccent{B}}
\DeclareUnicodeCharacter{1E03}{\dotaccent{b}}
\DeclareUnicodeCharacter{1E04}{\udotaccent{B}}
\DeclareUnicodeCharacter{1E05}{\udotaccent{b}}
\DeclareUnicodeCharacter{1E06}{\ubaraccent{B}}
\DeclareUnicodeCharacter{1E07}{\ubaraccent{b}}
\DeclareUnicodeCharacter{1E0A}{\dotaccent{D}}
\DeclareUnicodeCharacter{1E0B}{\dotaccent{d}}
\DeclareUnicodeCharacter{1E0C}{\udotaccent{D}}
\DeclareUnicodeCharacter{1E0D}{\udotaccent{d}}
\DeclareUnicodeCharacter{1E0E}{\ubaraccent{D}}
\DeclareUnicodeCharacter{1E0F}{\ubaraccent{d}}
\DeclareUnicodeCharacter{1E1E}{\dotaccent{F}}
\DeclareUnicodeCharacter{1E1F}{\dotaccent{f}}
\DeclareUnicodeCharacter{1E20}{\=G}
\DeclareUnicodeCharacter{1E21}{\=g}
\DeclareUnicodeCharacter{1E22}{\dotaccent{H}}
\DeclareUnicodeCharacter{1E23}{\dotaccent{h}}
\DeclareUnicodeCharacter{1E24}{\udotaccent{H}}
\DeclareUnicodeCharacter{1E25}{\udotaccent{h}}
\DeclareUnicodeCharacter{1E26}{\"H}
\DeclareUnicodeCharacter{1E27}{\"h}
\DeclareUnicodeCharacter{1E30}{\'K}
\DeclareUnicodeCharacter{1E31}{\'k}
\DeclareUnicodeCharacter{1E32}{\udotaccent{K}}
\DeclareUnicodeCharacter{1E33}{\udotaccent{k}}
\DeclareUnicodeCharacter{1E34}{\ubaraccent{K}}
\DeclareUnicodeCharacter{1E35}{\ubaraccent{k}}
\DeclareUnicodeCharacter{1E36}{\udotaccent{L}}
\DeclareUnicodeCharacter{1E37}{\udotaccent{l}}
\DeclareUnicodeCharacter{1E3A}{\ubaraccent{L}}
\DeclareUnicodeCharacter{1E3B}{\ubaraccent{l}}
\DeclareUnicodeCharacter{1E3E}{\'M}
\DeclareUnicodeCharacter{1E3F}{\'m}
\DeclareUnicodeCharacter{1E40}{\dotaccent{M}}
\DeclareUnicodeCharacter{1E41}{\dotaccent{m}}
\DeclareUnicodeCharacter{1E42}{\udotaccent{M}}
\DeclareUnicodeCharacter{1E43}{\udotaccent{m}}
\DeclareUnicodeCharacter{1E44}{\dotaccent{N}}
\DeclareUnicodeCharacter{1E45}{\dotaccent{n}}
\DeclareUnicodeCharacter{1E46}{\udotaccent{N}}
\DeclareUnicodeCharacter{1E47}{\udotaccent{n}}
\DeclareUnicodeCharacter{1E48}{\ubaraccent{N}}
\DeclareUnicodeCharacter{1E49}{\ubaraccent{n}}
\DeclareUnicodeCharacter{1E54}{\'P}
\DeclareUnicodeCharacter{1E55}{\'p}
\DeclareUnicodeCharacter{1E56}{\dotaccent{P}}
\DeclareUnicodeCharacter{1E57}{\dotaccent{p}}
\DeclareUnicodeCharacter{1E58}{\dotaccent{R}}
\DeclareUnicodeCharacter{1E59}{\dotaccent{r}}
\DeclareUnicodeCharacter{1E5A}{\udotaccent{R}}
\DeclareUnicodeCharacter{1E5B}{\udotaccent{r}}
\DeclareUnicodeCharacter{1E5E}{\ubaraccent{R}}
\DeclareUnicodeCharacter{1E5F}{\ubaraccent{r}}
\DeclareUnicodeCharacter{1E60}{\dotaccent{S}}
\DeclareUnicodeCharacter{1E61}{\dotaccent{s}}
\DeclareUnicodeCharacter{1E62}{\udotaccent{S}}
\DeclareUnicodeCharacter{1E63}{\udotaccent{s}}
\DeclareUnicodeCharacter{1E6A}{\dotaccent{T}}
\DeclareUnicodeCharacter{1E6B}{\dotaccent{t}}
\DeclareUnicodeCharacter{1E6C}{\udotaccent{T}}
\DeclareUnicodeCharacter{1E6D}{\udotaccent{t}}
\DeclareUnicodeCharacter{1E6E}{\ubaraccent{T}}
\DeclareUnicodeCharacter{1E6F}{\ubaraccent{t}}
\DeclareUnicodeCharacter{1E7C}{\~V}
\DeclareUnicodeCharacter{1E7D}{\~v}
\DeclareUnicodeCharacter{1E7E}{\udotaccent{V}}
\DeclareUnicodeCharacter{1E7F}{\udotaccent{v}}
\DeclareUnicodeCharacter{1E80}{\`W}
\DeclareUnicodeCharacter{1E81}{\`w}
\DeclareUnicodeCharacter{1E82}{\'W}
\DeclareUnicodeCharacter{1E83}{\'w}
\DeclareUnicodeCharacter{1E84}{\"W}
\DeclareUnicodeCharacter{1E85}{\"w}
\DeclareUnicodeCharacter{1E86}{\dotaccent{W}}
\DeclareUnicodeCharacter{1E87}{\dotaccent{w}}
\DeclareUnicodeCharacter{1E88}{\udotaccent{W}}
\DeclareUnicodeCharacter{1E89}{\udotaccent{w}}
\DeclareUnicodeCharacter{1E8A}{\dotaccent{X}}
\DeclareUnicodeCharacter{1E8B}{\dotaccent{x}}
\DeclareUnicodeCharacter{1E8C}{\"X}
\DeclareUnicodeCharacter{1E8D}{\"x}
\DeclareUnicodeCharacter{1E8E}{\dotaccent{Y}}
\DeclareUnicodeCharacter{1E8F}{\dotaccent{y}}
\DeclareUnicodeCharacter{1E90}{\^Z}
\DeclareUnicodeCharacter{1E91}{\^z}
\DeclareUnicodeCharacter{1E92}{\udotaccent{Z}}
\DeclareUnicodeCharacter{1E93}{\udotaccent{z}}
\DeclareUnicodeCharacter{1E94}{\ubaraccent{Z}}
\DeclareUnicodeCharacter{1E95}{\ubaraccent{z}}
\DeclareUnicodeCharacter{1E96}{\ubaraccent{h}}
\DeclareUnicodeCharacter{1E97}{\"t}
\DeclareUnicodeCharacter{1E98}{\ringaccent{w}}
\DeclareUnicodeCharacter{1E99}{\ringaccent{y}}
\DeclareUnicodeCharacter{1EA0}{\udotaccent{A}}
\DeclareUnicodeCharacter{1EA1}{\udotaccent{a}}
\DeclareUnicodeCharacter{1EB8}{\udotaccent{E}}
\DeclareUnicodeCharacter{1EB9}{\udotaccent{e}}
\DeclareUnicodeCharacter{1EBC}{\~E}
\DeclareUnicodeCharacter{1EBD}{\~e}
\DeclareUnicodeCharacter{1ECA}{\udotaccent{I}}
\DeclareUnicodeCharacter{1ECB}{\udotaccent{i}}
\DeclareUnicodeCharacter{1ECC}{\udotaccent{O}}
\DeclareUnicodeCharacter{1ECD}{\udotaccent{o}}
\DeclareUnicodeCharacter{1EE4}{\udotaccent{U}}
\DeclareUnicodeCharacter{1EE5}{\udotaccent{u}}
\DeclareUnicodeCharacter{1EF2}{\`Y}
\DeclareUnicodeCharacter{1EF3}{\`y}
\DeclareUnicodeCharacter{1EF4}{\udotaccent{Y}}
\DeclareUnicodeCharacter{1EF8}{\~Y}
\DeclareUnicodeCharacter{1EF9}{\~y}
\DeclareUnicodeCharacter{2013}{--}
\DeclareUnicodeCharacter{2014}{---}
\DeclareUnicodeCharacter{2018}{\quoteleft}
\DeclareUnicodeCharacter{2019}{\quoteright}
\DeclareUnicodeCharacter{201A}{\quotesinglbase}
\DeclareUnicodeCharacter{201C}{\quotedblleft}
\DeclareUnicodeCharacter{201D}{\quotedblright}
\DeclareUnicodeCharacter{201E}{\quotedblbase}
\DeclareUnicodeCharacter{2022}{\bullet}
\DeclareUnicodeCharacter{2026}{\dots}
\DeclareUnicodeCharacter{2039}{\guilsinglleft}
\DeclareUnicodeCharacter{203A}{\guilsinglright}
\DeclareUnicodeCharacter{20AC}{\euro}
\DeclareUnicodeCharacter{2192}{\expansion}
\DeclareUnicodeCharacter{21D2}{\result}
\DeclareUnicodeCharacter{2212}{\minus}
\DeclareUnicodeCharacter{2217}{\point}
\DeclareUnicodeCharacter{2261}{\equiv}
}% end of \utfeightchardefs
% US-ASCII character definitions.
\def\asciichardefs{% nothing need be done
\relax
}
% Make non-ASCII characters printable again for compatibility with
% existing Texinfo documents that may use them, even without declaring a
% document encoding.
%
\setnonasciicharscatcode \other
\message{formatting,}
\newdimen\defaultparindent \defaultparindent = 15pt
\chapheadingskip = 15pt plus 4pt minus 2pt
\secheadingskip = 12pt plus 3pt minus 2pt
\subsecheadingskip = 9pt plus 2pt minus 2pt
% Prevent underfull vbox error messages.
\vbadness = 10000
% Don't be so finicky about underfull hboxes, either.
\hbadness = 2000
% Following George Bush, get rid of widows and orphans.
\widowpenalty=10000
\clubpenalty=10000
% Use TeX 3.0's \emergencystretch to help line breaking, but if we're
% using an old version of TeX, don't do anything. We want the amount of
% stretch added to depend on the line length, hence the dependence on
% \hsize. We call this whenever the paper size is set.
%
\def\setemergencystretch{%
\ifx\emergencystretch\thisisundefined
% Allow us to assign to \emergencystretch anyway.
\def\emergencystretch{\dimen0}%
\else
\emergencystretch = .15\hsize
\fi
}
% Parameters in order: 1) textheight; 2) textwidth;
% 3) voffset; 4) hoffset; 5) binding offset; 6) topskip;
% 7) physical page height; 8) physical page width.
%
% We also call \setleading{\textleading}, so the caller should define
% \textleading. The caller should also set \parskip.
%
\def\internalpagesizes#1#2#3#4#5#6#7#8{%
\voffset = #3\relax
\topskip = #6\relax
\splittopskip = \topskip
%
\vsize = #1\relax
\advance\vsize by \topskip
\outervsize = \vsize
\advance\outervsize by 2\topandbottommargin
\pageheight = \vsize
%
\hsize = #2\relax
\outerhsize = \hsize
\advance\outerhsize by 0.5in
\pagewidth = \hsize
%
\normaloffset = #4\relax
\bindingoffset = #5\relax
%
\ifpdf
\pdfpageheight #7\relax
\pdfpagewidth #8\relax
% if we don't reset these, they will remain at "1 true in" of
% whatever layout pdftex was dumped with.
\pdfhorigin = 1 true in
\pdfvorigin = 1 true in
\fi
%
\setleading{\textleading}
%
\parindent = \defaultparindent
\setemergencystretch
}
% @letterpaper (the default).
\def\letterpaper{{\globaldefs = 1
\parskip = 3pt plus 2pt minus 1pt
\textleading = 13.2pt
%
% If page is nothing but text, make it come out even.
\internalpagesizes{607.2pt}{6in}% that's 46 lines
{\voffset}{.25in}%
{\bindingoffset}{36pt}%
{11in}{8.5in}%
}}
% Use @smallbook to reset parameters for 7x9.25 trim size.
\def\smallbook{{\globaldefs = 1
\parskip = 2pt plus 1pt
\textleading = 12pt
%
\internalpagesizes{7.5in}{5in}%
{-.2in}{0in}%
{\bindingoffset}{16pt}%
{9.25in}{7in}%
%
\lispnarrowing = 0.3in
\tolerance = 700
\hfuzz = 1pt
\contentsrightmargin = 0pt
\defbodyindent = .5cm
}}
% Use @smallerbook to reset parameters for 6x9 trim size.
% (Just testing, parameters still in flux.)
\def\smallerbook{{\globaldefs = 1
\parskip = 1.5pt plus 1pt
\textleading = 12pt
%
\internalpagesizes{7.4in}{4.8in}%
{-.2in}{-.4in}%
{0pt}{14pt}%
{9in}{6in}%
%
\lispnarrowing = 0.25in
\tolerance = 700
\hfuzz = 1pt
\contentsrightmargin = 0pt
\defbodyindent = .4cm
}}
% Use @afourpaper to print on European A4 paper.
\def\afourpaper{{\globaldefs = 1
\parskip = 3pt plus 2pt minus 1pt
\textleading = 13.2pt
%
% Double-side printing via postscript on Laserjet 4050
% prints double-sided nicely when \bindingoffset=10mm and \hoffset=-6mm.
% To change the settings for a different printer or situation, adjust
% \normaloffset until the front-side and back-side texts align. Then
% do the same for \bindingoffset. You can set these for testing in
% your texinfo source file like this:
% @tex
% \global\normaloffset = -6mm
% \global\bindingoffset = 10mm
% @end tex
\internalpagesizes{673.2pt}{160mm}% that's 51 lines
{\voffset}{\hoffset}%
{\bindingoffset}{44pt}%
{297mm}{210mm}%
%
\tolerance = 700
\hfuzz = 1pt
\contentsrightmargin = 0pt
\defbodyindent = 5mm
}}
% Use @afivepaper to print on European A5 paper.
% From romildo@urano.iceb.ufop.br, 2 July 2000.
% He also recommends making @example and @lisp be small.
\def\afivepaper{{\globaldefs = 1
\parskip = 2pt plus 1pt minus 0.1pt
\textleading = 12.5pt
%
\internalpagesizes{160mm}{120mm}%
{\voffset}{\hoffset}%
{\bindingoffset}{8pt}%
{210mm}{148mm}%
%
\lispnarrowing = 0.2in
\tolerance = 800
\hfuzz = 1.2pt
\contentsrightmargin = 0pt
\defbodyindent = 2mm
\tableindent = 12mm
}}
% A specific text layout, 24x15cm overall, intended for A4 paper.
\def\afourlatex{{\globaldefs = 1
\afourpaper
\internalpagesizes{237mm}{150mm}%
{\voffset}{4.6mm}%
{\bindingoffset}{7mm}%
{297mm}{210mm}%
%
% Must explicitly reset to 0 because we call \afourpaper.
\globaldefs = 0
}}
% Use @afourwide to print on A4 paper in landscape format.
\def\afourwide{{\globaldefs = 1
\afourpaper
\internalpagesizes{241mm}{165mm}%
{\voffset}{-2.95mm}%
{\bindingoffset}{7mm}%
{297mm}{210mm}%
\globaldefs = 0
}}
% @pagesizes TEXTHEIGHT[,TEXTWIDTH]
% Perhaps we should allow setting the margins, \topskip, \parskip,
% and/or leading, also. Or perhaps we should compute them somehow.
%
\parseargdef\pagesizes{\pagesizesyyy #1,,\finish}
\def\pagesizesyyy#1,#2,#3\finish{{%
\setbox0 = \hbox{\ignorespaces #2}\ifdim\wd0 > 0pt \hsize=#2\relax \fi
\globaldefs = 1
%
\parskip = 3pt plus 2pt minus 1pt
\setleading{\textleading}%
%
\dimen0 = #1\relax
\advance\dimen0 by \voffset
%
\dimen2 = \hsize
\advance\dimen2 by \normaloffset
%
\internalpagesizes{#1}{\hsize}%
{\voffset}{\normaloffset}%
{\bindingoffset}{44pt}%
{\dimen0}{\dimen2}%
}}
% Set default to letter.
%
\letterpaper
\message{and turning on texinfo input format.}
% Define macros to output various characters with catcode for normal text.
\catcode`\"=\other
\catcode`\~=\other
\catcode`\^=\other
\catcode`\_=\other
\catcode`\|=\other
\catcode`\<=\other
\catcode`\>=\other
\catcode`\+=\other
\catcode`\$=\other
\def\normaldoublequote{"}
\def\normaltilde{~}
\def\normalcaret{^}
\def\normalunderscore{_}
\def\normalverticalbar{|}
\def\normalless{<}
\def\normalgreater{>}
\def\normalplus{+}
\def\normaldollar{$}%$ font-lock fix
% This macro is used to make a character print one way in \tt
% (where it can probably be output as-is), and another way in other fonts,
% where something hairier probably needs to be done.
%
% #1 is what to print if we are indeed using \tt; #2 is what to print
% otherwise. Since all the Computer Modern typewriter fonts have zero
% interword stretch (and shrink), and it is reasonable to expect all
% typewriter fonts to have this, we can check that font parameter.
%
\def\ifusingtt#1#2{\ifdim \fontdimen3\font=0pt #1\else #2\fi}
% Same as above, but check for italic font. Actually this also catches
% non-italic slanted fonts since it is impossible to distinguish them from
% italic fonts. But since this is only used by $ and it uses \sl anyway
% this is not a problem.
\def\ifusingit#1#2{\ifdim \fontdimen1\font>0pt #1\else #2\fi}
% Turn off all special characters except @
% (and those which the user can use as if they were ordinary).
% Most of these we simply print from the \tt font, but for some, we can
% use math or other variants that look better in normal text.
\catcode`\"=\active
\def\activedoublequote{{\tt\char34}}
\let"=\activedoublequote
\catcode`\~=\active
\def~{{\tt\char126}}
\chardef\hat=`\^
\catcode`\^=\active
\def^{{\tt \hat}}
\catcode`\_=\active
\def_{\ifusingtt\normalunderscore\_}
\let\realunder=_
% Subroutine for the previous macro.
\def\_{\leavevmode \kern.07em \vbox{\hrule width.3em height.1ex}\kern .07em }
\catcode`\|=\active
\def|{{\tt\char124}}
\chardef \less=`\<
\catcode`\<=\active
\def<{{\tt \less}}
\chardef \gtr=`\>
\catcode`\>=\active
\def>{{\tt \gtr}}
\catcode`\+=\active
\def+{{\tt \char 43}}
\catcode`\$=\active
\def${\ifusingit{{\sl\$}}\normaldollar}%$ font-lock fix
% If a .fmt file is being used, characters that might appear in a file
% name cannot be active until we have parsed the command line.
% So turn them off again, and have \everyjob (or @setfilename) turn them on.
% \otherifyactive is called near the end of this file.
\def\otherifyactive{\catcode`+=\other \catcode`\_=\other}
% Used sometimes to turn off (effectively) the active characters even after
% parsing them.
\def\turnoffactive{%
\normalturnoffactive
\otherbackslash
}
\catcode`\@=0
% \backslashcurfont outputs one backslash character in current font,
% as in \char`\\.
\global\chardef\backslashcurfont=`\\
\global\let\rawbackslashxx=\backslashcurfont % let existing .??s files work
% \realbackslash is an actual character `\' with catcode other, and
% \doublebackslash is two of them (for the pdf outlines).
{\catcode`\\=\other @gdef@realbackslash{\} @gdef@doublebackslash{\\}}
% In texinfo, backslash is an active character; it prints the backslash
% in fixed width font.
\catcode`\\=\active
@def@normalbackslash{{@tt@backslashcurfont}}
% On startup, @fixbackslash assigns:
% @let \ = @normalbackslash
% \rawbackslash defines an active \ to do \backslashcurfont.
% \otherbackslash defines an active \ to be a literal `\' character with
% catcode other.
@gdef@rawbackslash{@let\=@backslashcurfont}
@gdef@otherbackslash{@let\=@realbackslash}
% Same as @turnoffactive except outputs \ as {\tt\char`\\} instead of
% the literal character `\'.
%
@def@normalturnoffactive{%
@let\=@normalbackslash
@let"=@normaldoublequote
@let~=@normaltilde
@let^=@normalcaret
@let_=@normalunderscore
@let|=@normalverticalbar
@let<=@normalless
@let>=@normalgreater
@let+=@normalplus
@let$=@normaldollar %$ font-lock fix
@markupsetuplqdefault
@markupsetuprqdefault
@unsepspaces
}
% Make _ and + \other characters, temporarily.
% This is canceled by @fixbackslash.
@otherifyactive
% If a .fmt file is being used, we don't want the `\input texinfo' to show up.
% That is what \eatinput is for; after that, the `\' should revert to printing
% a backslash.
%
@gdef@eatinput input texinfo{@fixbackslash}
@global@let\ = @eatinput
% On the other hand, perhaps the file did not have a `\input texinfo'. Then
% the first `\' in the file would cause an error. This macro tries to fix
% that, assuming it is called before the first `\' could plausibly occur.
% Also turn back on active characters that might appear in the input
% file name, in case not using a pre-dumped format.
%
@gdef@fixbackslash{%
@ifx\@eatinput @let\ = @normalbackslash @fi
@catcode`+=@active
@catcode`@_=@active
}
% Say @foo, not \foo, in error messages.
@escapechar = `@@
% These look ok in all fonts, so just make them not special.
@catcode`@& = @other
@catcode`@# = @other
@catcode`@% = @other
@c Finally, make ` and ' active, so that txicodequoteundirected and
@c txicodequotebacktick work right in, e.g., @w{@code{`foo'}}. If we
@c don't make ` and ' active, @code will not get them as active chars.
@c Do this last of all since we use ` in the previous @catcode assignments.
@catcode`@'=@active
@catcode`@`=@active
@markupsetuplqdefault
@markupsetuprqdefault
@c Local variables:
@c eval: (add-hook 'write-file-hooks 'time-stamp)
@c page-delimiter: "^\\\\message"
@c time-stamp-start: "def\\\\texinfoversion{"
@c time-stamp-format: "%:y-%02m-%02d.%02H"
@c time-stamp-end: "}"
@c End:
@c vim:sw=2:
@ignore
arch-tag: e1b36e32-c96e-4135-a41a-0b2efa2ea115
@end ignore
recutils-1.8/doc/Makefile.in 0000644 0000000 0000000 00000222476 13413353113 012727 0000000 0000000 # Makefile.in generated by automake 1.15 from Makefile.am.
# @configure_input@
# Copyright (C) 1994-2014 Free Software Foundation, Inc.
# This Makefile.in is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
# with or without modifications, as long as this notice is preserved.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE.
@SET_MAKE@
# doc/Makefile.am
# Copyright (C) 2009-2019 Jose E. Marchesi
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
VPATH = @srcdir@
am__is_gnu_make = { \
if test -z '$(MAKELEVEL)'; then \
false; \
elif test -n '$(MAKE_HOST)'; then \
true; \
elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
true; \
else \
false; \
fi; \
}
am__make_running_with_option = \
case $${target_option-} in \
?) ;; \
*) echo "am__make_running_with_option: internal error: invalid" \
"target option '$${target_option-}' specified" >&2; \
exit 1;; \
esac; \
has_opt=no; \
sane_makeflags=$$MAKEFLAGS; \
if $(am__is_gnu_make); then \
sane_makeflags=$$MFLAGS; \
else \
case $$MAKEFLAGS in \
*\\[\ \ ]*) \
bs=\\; \
sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
| sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
esac; \
fi; \
skip_next=no; \
strip_trailopt () \
{ \
flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
}; \
for flg in $$sane_makeflags; do \
test $$skip_next = yes && { skip_next=no; continue; }; \
case $$flg in \
*=*|--*) continue;; \
-*I) strip_trailopt 'I'; skip_next=yes;; \
-*I?*) strip_trailopt 'I';; \
-*O) strip_trailopt 'O'; skip_next=yes;; \
-*O?*) strip_trailopt 'O';; \
-*l) strip_trailopt 'l'; skip_next=yes;; \
-*l?*) strip_trailopt 'l';; \
-[dEDm]) skip_next=yes;; \
-[JT]) skip_next=yes;; \
esac; \
case $$flg in \
*$$target_option*) has_opt=yes; break;; \
esac; \
done; \
test $$has_opt = yes
am__make_dryrun = (target_option=n; $(am__make_running_with_option))
am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
pkgdatadir = $(datadir)/@PACKAGE@
pkgincludedir = $(includedir)/@PACKAGE@
pkglibdir = $(libdir)/@PACKAGE@
pkglibexecdir = $(libexecdir)/@PACKAGE@
am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
install_sh_DATA = $(install_sh) -c -m 644
install_sh_PROGRAM = $(install_sh) -c
install_sh_SCRIPT = $(install_sh) -c
INSTALL_HEADER = $(INSTALL_DATA)
transform = $(program_transform_name)
NORMAL_INSTALL = :
PRE_INSTALL = :
POST_INSTALL = :
NORMAL_UNINSTALL = :
PRE_UNINSTALL = :
POST_UNINSTALL = :
build_triplet = @build@
host_triplet = @host@
subdir = doc
ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
am__aclocal_m4_deps = $(top_srcdir)/m4/00gnulib.m4 \
$(top_srcdir)/m4/__inline.m4 \
$(top_srcdir)/m4/absolute-header.m4 $(top_srcdir)/m4/acl.m4 \
$(top_srcdir)/m4/alloca.m4 $(top_srcdir)/m4/asm-underscore.m4 \
$(top_srcdir)/m4/autobuild.m4 $(top_srcdir)/m4/base64.m4 \
$(top_srcdir)/m4/bison.m4 $(top_srcdir)/m4/btowc.m4 \
$(top_srcdir)/m4/builtin-expect.m4 \
$(top_srcdir)/m4/canonicalize.m4 \
$(top_srcdir)/m4/clock_time.m4 \
$(top_srcdir)/m4/close-stream.m4 $(top_srcdir)/m4/close.m4 \
$(top_srcdir)/m4/closeout.m4 $(top_srcdir)/m4/codeset.m4 \
$(top_srcdir)/m4/dirname.m4 \
$(top_srcdir)/m4/double-slash-root.m4 $(top_srcdir)/m4/dup2.m4 \
$(top_srcdir)/m4/eealloc.m4 $(top_srcdir)/m4/environ.m4 \
$(top_srcdir)/m4/errno_h.m4 $(top_srcdir)/m4/error.m4 \
$(top_srcdir)/m4/euidaccess.m4 $(top_srcdir)/m4/execute.m4 \
$(top_srcdir)/m4/exponentd.m4 $(top_srcdir)/m4/exponentf.m4 \
$(top_srcdir)/m4/exponentl.m4 $(top_srcdir)/m4/extensions.m4 \
$(top_srcdir)/m4/extern-inline.m4 \
$(top_srcdir)/m4/fatal-signal.m4 $(top_srcdir)/m4/fcntl-o.m4 \
$(top_srcdir)/m4/fcntl.m4 $(top_srcdir)/m4/fcntl_h.m4 \
$(top_srcdir)/m4/flexmember.m4 $(top_srcdir)/m4/float_h.m4 \
$(top_srcdir)/m4/flock.m4 $(top_srcdir)/m4/floor.m4 \
$(top_srcdir)/m4/fpending.m4 $(top_srcdir)/m4/fpieee.m4 \
$(top_srcdir)/m4/fprintf-posix.m4 $(top_srcdir)/m4/frexp.m4 \
$(top_srcdir)/m4/frexpl.m4 $(top_srcdir)/m4/fseek.m4 \
$(top_srcdir)/m4/fseeko.m4 $(top_srcdir)/m4/fseterr.m4 \
$(top_srcdir)/m4/fstat.m4 $(top_srcdir)/m4/ftell.m4 \
$(top_srcdir)/m4/ftello.m4 $(top_srcdir)/m4/fwriting.m4 \
$(top_srcdir)/m4/getdelim.m4 $(top_srcdir)/m4/getdtablesize.m4 \
$(top_srcdir)/m4/getgroups.m4 $(top_srcdir)/m4/getline.m4 \
$(top_srcdir)/m4/getopt.m4 $(top_srcdir)/m4/getpass.m4 \
$(top_srcdir)/m4/getprogname.m4 $(top_srcdir)/m4/gettext.m4 \
$(top_srcdir)/m4/gettime.m4 $(top_srcdir)/m4/gettimeofday.m4 \
$(top_srcdir)/m4/glibc21.m4 $(top_srcdir)/m4/gnulib-common.m4 \
$(top_srcdir)/m4/gnulib-comp.m4 \
$(top_srcdir)/m4/group-member.m4 \
$(top_srcdir)/m4/host-cpu-c-abi.m4 $(top_srcdir)/m4/iconv.m4 \
$(top_srcdir)/m4/include_next.m4 \
$(top_srcdir)/m4/intlmacosx.m4 $(top_srcdir)/m4/intmax_t.m4 \
$(top_srcdir)/m4/inttypes-pri.m4 $(top_srcdir)/m4/inttypes.m4 \
$(top_srcdir)/m4/inttypes_h.m4 $(top_srcdir)/m4/isnand.m4 \
$(top_srcdir)/m4/isnanf.m4 $(top_srcdir)/m4/isnanl.m4 \
$(top_srcdir)/m4/langinfo_h.m4 $(top_srcdir)/m4/largefile.m4 \
$(top_srcdir)/m4/ldexpl.m4 $(top_srcdir)/m4/lib-ld.m4 \
$(top_srcdir)/m4/lib-link.m4 $(top_srcdir)/m4/lib-prefix.m4 \
$(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/limits-h.m4 \
$(top_srcdir)/m4/localcharset.m4 $(top_srcdir)/m4/locale-fr.m4 \
$(top_srcdir)/m4/locale-ja.m4 $(top_srcdir)/m4/locale-zh.m4 \
$(top_srcdir)/m4/locale_h.m4 $(top_srcdir)/m4/localeconv.m4 \
$(top_srcdir)/m4/localtime-buffer.m4 $(top_srcdir)/m4/lock.m4 \
$(top_srcdir)/m4/longlong.m4 $(top_srcdir)/m4/lseek.m4 \
$(top_srcdir)/m4/lstat.m4 $(top_srcdir)/m4/ltoptions.m4 \
$(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \
$(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/m4/malloc.m4 \
$(top_srcdir)/m4/malloca.m4 $(top_srcdir)/m4/math_h.m4 \
$(top_srcdir)/m4/mbrlen.m4 $(top_srcdir)/m4/mbrtowc.m4 \
$(top_srcdir)/m4/mbsinit.m4 $(top_srcdir)/m4/mbstate_t.m4 \
$(top_srcdir)/m4/mbtowc.m4 $(top_srcdir)/m4/memchr.m4 \
$(top_srcdir)/m4/minmax.m4 $(top_srcdir)/m4/mkdir.m4 \
$(top_srcdir)/m4/mkostemp.m4 $(top_srcdir)/m4/mkstemp.m4 \
$(top_srcdir)/m4/mktime.m4 $(top_srcdir)/m4/mmap-anon.m4 \
$(top_srcdir)/m4/mode_t.m4 $(top_srcdir)/m4/msvc-inval.m4 \
$(top_srcdir)/m4/msvc-nothrow.m4 $(top_srcdir)/m4/multiarch.m4 \
$(top_srcdir)/m4/nl_langinfo.m4 $(top_srcdir)/m4/nls.m4 \
$(top_srcdir)/m4/nocrash.m4 $(top_srcdir)/m4/nstrftime.m4 \
$(top_srcdir)/m4/obstack.m4 $(top_srcdir)/m4/off_t.m4 \
$(top_srcdir)/m4/open-cloexec.m4 $(top_srcdir)/m4/open.m4 \
$(top_srcdir)/m4/parse-datetime.m4 $(top_srcdir)/m4/pathmax.m4 \
$(top_srcdir)/m4/po.m4 $(top_srcdir)/m4/posix_spawn.m4 \
$(top_srcdir)/m4/printf-frexp.m4 \
$(top_srcdir)/m4/printf-frexpl.m4 \
$(top_srcdir)/m4/printf-posix-rpl.m4 \
$(top_srcdir)/m4/printf.m4 $(top_srcdir)/m4/progtest.m4 \
$(top_srcdir)/m4/pthread_rwlock_rdlock.m4 \
$(top_srcdir)/m4/quote.m4 $(top_srcdir)/m4/quotearg.m4 \
$(top_srcdir)/m4/raise.m4 $(top_srcdir)/m4/random_r.m4 \
$(top_srcdir)/m4/rawmemchr.m4 $(top_srcdir)/m4/read-file.m4 \
$(top_srcdir)/m4/readline.m4 $(top_srcdir)/m4/readlink.m4 \
$(top_srcdir)/m4/realloc.m4 $(top_srcdir)/m4/regex.m4 \
$(top_srcdir)/m4/rename.m4 $(top_srcdir)/m4/rmdir.m4 \
$(top_srcdir)/m4/sched_h.m4 $(top_srcdir)/m4/secure_getenv.m4 \
$(top_srcdir)/m4/selinux-context-h.m4 \
$(top_srcdir)/m4/selinux-selinux-h.m4 \
$(top_srcdir)/m4/setenv.m4 $(top_srcdir)/m4/sh-filename.m4 \
$(top_srcdir)/m4/sig_atomic_t.m4 $(top_srcdir)/m4/sigaction.m4 \
$(top_srcdir)/m4/signal_h.m4 \
$(top_srcdir)/m4/signalblocking.m4 $(top_srcdir)/m4/signbit.m4 \
$(top_srcdir)/m4/size_max.m4 $(top_srcdir)/m4/spawn_h.m4 \
$(top_srcdir)/m4/ssize_t.m4 $(top_srcdir)/m4/stat-time.m4 \
$(top_srcdir)/m4/stat.m4 $(top_srcdir)/m4/stdarg.m4 \
$(top_srcdir)/m4/stdbool.m4 $(top_srcdir)/m4/stddef_h.m4 \
$(top_srcdir)/m4/stdint.m4 $(top_srcdir)/m4/stdint_h.m4 \
$(top_srcdir)/m4/stdio_h.m4 $(top_srcdir)/m4/stdlib_h.m4 \
$(top_srcdir)/m4/strcase.m4 $(top_srcdir)/m4/strcasestr.m4 \
$(top_srcdir)/m4/strchrnul.m4 $(top_srcdir)/m4/strdup.m4 \
$(top_srcdir)/m4/strerror.m4 $(top_srcdir)/m4/string_h.m4 \
$(top_srcdir)/m4/strings_h.m4 $(top_srcdir)/m4/strsep.m4 \
$(top_srcdir)/m4/strverscmp.m4 $(top_srcdir)/m4/sys_file_h.m4 \
$(top_srcdir)/m4/sys_socket_h.m4 \
$(top_srcdir)/m4/sys_stat_h.m4 $(top_srcdir)/m4/sys_time_h.m4 \
$(top_srcdir)/m4/sys_types_h.m4 $(top_srcdir)/m4/sys_wait_h.m4 \
$(top_srcdir)/m4/tempname.m4 $(top_srcdir)/m4/threadlib.m4 \
$(top_srcdir)/m4/time_h.m4 $(top_srcdir)/m4/time_r.m4 \
$(top_srcdir)/m4/time_rz.m4 $(top_srcdir)/m4/timegm.m4 \
$(top_srcdir)/m4/timespec.m4 $(top_srcdir)/m4/tm_gmtoff.m4 \
$(top_srcdir)/m4/tmpdir.m4 $(top_srcdir)/m4/tzset.m4 \
$(top_srcdir)/m4/unistd_h.m4 $(top_srcdir)/m4/unlocked-io.m4 \
$(top_srcdir)/m4/vasnprintf-posix.m4 \
$(top_srcdir)/m4/vasnprintf.m4 \
$(top_srcdir)/m4/vasprintf-posix.m4 \
$(top_srcdir)/m4/vasprintf.m4 $(top_srcdir)/m4/version-etc.m4 \
$(top_srcdir)/m4/vfprintf-posix.m4 \
$(top_srcdir)/m4/wait-process.m4 $(top_srcdir)/m4/waitpid.m4 \
$(top_srcdir)/m4/warn-on-use.m4 $(top_srcdir)/m4/wchar_h.m4 \
$(top_srcdir)/m4/wchar_t.m4 $(top_srcdir)/m4/wcrtomb.m4 \
$(top_srcdir)/m4/wctob.m4 $(top_srcdir)/m4/wctomb.m4 \
$(top_srcdir)/m4/wctype_h.m4 $(top_srcdir)/m4/wint_t.m4 \
$(top_srcdir)/m4/xalloc.m4 $(top_srcdir)/m4/xsize.m4 \
$(top_srcdir)/configure.ac
am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
$(ACLOCAL_M4)
DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/version.texi \
$(srcdir)/stamp-vti $(srcdir)/version-rec-mode.texi \
$(srcdir)/stamp-1 $(am__DIST_COMMON)
mkinstalldirs = $(install_sh) -d
CONFIG_HEADER = $(top_builddir)/src/config.h
CONFIG_CLEAN_FILES =
CONFIG_CLEAN_VPATH_FILES =
AM_V_P = $(am__v_P_@AM_V@)
am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
am__v_P_0 = false
am__v_P_1 = :
AM_V_GEN = $(am__v_GEN_@AM_V@)
am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
am__v_GEN_0 = @echo " GEN " $@;
am__v_GEN_1 =
AM_V_at = $(am__v_at_@AM_V@)
am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
am__v_at_0 = @
am__v_at_1 =
SOURCES =
DIST_SOURCES =
AM_V_DVIPS = $(am__v_DVIPS_@AM_V@)
am__v_DVIPS_ = $(am__v_DVIPS_@AM_DEFAULT_V@)
am__v_DVIPS_0 = @echo " DVIPS " $@;
am__v_DVIPS_1 =
AM_V_MAKEINFO = $(am__v_MAKEINFO_@AM_V@)
am__v_MAKEINFO_ = $(am__v_MAKEINFO_@AM_DEFAULT_V@)
am__v_MAKEINFO_0 = @echo " MAKEINFO" $@;
am__v_MAKEINFO_1 =
AM_V_INFOHTML = $(am__v_INFOHTML_@AM_V@)
am__v_INFOHTML_ = $(am__v_INFOHTML_@AM_DEFAULT_V@)
am__v_INFOHTML_0 = @echo " INFOHTML" $@;
am__v_INFOHTML_1 =
AM_V_TEXI2DVI = $(am__v_TEXI2DVI_@AM_V@)
am__v_TEXI2DVI_ = $(am__v_TEXI2DVI_@AM_DEFAULT_V@)
am__v_TEXI2DVI_0 = @echo " TEXI2DVI" $@;
am__v_TEXI2DVI_1 =
AM_V_TEXI2PDF = $(am__v_TEXI2PDF_@AM_V@)
am__v_TEXI2PDF_ = $(am__v_TEXI2PDF_@AM_DEFAULT_V@)
am__v_TEXI2PDF_0 = @echo " TEXI2PDF" $@;
am__v_TEXI2PDF_1 =
AM_V_texinfo = $(am__v_texinfo_@AM_V@)
am__v_texinfo_ = $(am__v_texinfo_@AM_DEFAULT_V@)
am__v_texinfo_0 = -q
am__v_texinfo_1 =
AM_V_texidevnull = $(am__v_texidevnull_@AM_V@)
am__v_texidevnull_ = $(am__v_texidevnull_@AM_DEFAULT_V@)
am__v_texidevnull_0 = > /dev/null
am__v_texidevnull_1 =
INFO_DEPS = $(srcdir)/recutils.info $(srcdir)/rec-mode.info
TEXINFO_TEX = $(top_srcdir)/build-aux/texinfo.tex
am__TEXINFO_TEX_DIR = $(top_srcdir)/build-aux
DVIS = recutils.dvi rec-mode.dvi
PDFS = recutils.pdf rec-mode.pdf
PSS = recutils.ps rec-mode.ps
HTMLS = recutils.html rec-mode.html
TEXINFOS = recutils.texi rec-mode.texi
TEXI2DVI = texi2dvi
TEXI2PDF = $(TEXI2DVI) --pdf --batch
MAKEINFOHTML = $(MAKEINFO) --html
AM_MAKEINFOHTMLFLAGS = $(AM_MAKEINFOFLAGS)
DVIPS = dvips
am__can_run_installinfo = \
case $$AM_UPDATE_INFO_DIR in \
n|no|NO) false;; \
*) (install-info --version) >/dev/null 2>&1;; \
esac
am__installdirs = "$(DESTDIR)$(infodir)"
am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
am__vpath_adj = case $$p in \
$(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
*) f=$$p;; \
esac;
am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`;
am__install_max = 40
am__nobase_strip_setup = \
srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'`
am__nobase_strip = \
for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||"
am__nobase_list = $(am__nobase_strip_setup); \
for p in $$list; do echo "$$p $$p"; done | \
sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \
$(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \
if (++n[$$2] == $(am__install_max)) \
{ print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \
END { for (dir in files) print dir, files[dir] }'
am__base_list = \
sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \
sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g'
am__uninstall_files_from_dir = { \
test -z "$$files" \
|| { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \
|| { echo " ( cd '$$dir' && rm -f" $$files ")"; \
$(am__cd) "$$dir" && rm -f $$files; }; \
}
am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
am__DIST_COMMON = $(recutils_TEXINFOS) $(srcdir)/Makefile.in \
$(top_srcdir)/build-aux/mdate-sh \
$(top_srcdir)/build-aux/texinfo.tex texinfo.tex
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = @ACLOCAL@
ALLOCA = @ALLOCA@
ALLOCA_H = @ALLOCA_H@
AMTAR = @AMTAR@
AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
APPLE_UNIVERSAL_BUILD = @APPLE_UNIVERSAL_BUILD@
AR = @AR@
ARFLAGS = @ARFLAGS@
ASM_SYMBOL_PREFIX = @ASM_SYMBOL_PREFIX@
AUTOCONF = @AUTOCONF@
AUTOHEADER = @AUTOHEADER@
AUTOMAKE = @AUTOMAKE@
AWK = @AWK@
BASH_HEADERS = @BASH_HEADERS@
BITSIZEOF_PTRDIFF_T = @BITSIZEOF_PTRDIFF_T@
BITSIZEOF_SIG_ATOMIC_T = @BITSIZEOF_SIG_ATOMIC_T@
BITSIZEOF_SIZE_T = @BITSIZEOF_SIZE_T@
BITSIZEOF_WCHAR_T = @BITSIZEOF_WCHAR_T@
BITSIZEOF_WINT_T = @BITSIZEOF_WINT_T@
CA68 = @CA68@
CC = @CC@
CCDEPMODE = @CCDEPMODE@
CFLAGS = @CFLAGS@
CHECK_CFLAGS = @CHECK_CFLAGS@
CHECK_LIBS = @CHECK_LIBS@
CONFIG_INCLUDE = @CONFIG_INCLUDE@
CPP = @CPP@
CPPFLAGS = @CPPFLAGS@
CURLLIBS = @CURLLIBS@
CYGPATH_W = @CYGPATH_W@
DEFS = @DEFS@
DEPDIR = @DEPDIR@
DLLTOOL = @DLLTOOL@
DSYMUTIL = @DSYMUTIL@
DUMPBIN = @DUMPBIN@
ECHO_C = @ECHO_C@
ECHO_N = @ECHO_N@
ECHO_T = @ECHO_T@
EGREP = @EGREP@
EMULTIHOP_HIDDEN = @EMULTIHOP_HIDDEN@
EMULTIHOP_VALUE = @EMULTIHOP_VALUE@
ENOLINK_HIDDEN = @ENOLINK_HIDDEN@
ENOLINK_VALUE = @ENOLINK_VALUE@
EOVERFLOW_HIDDEN = @EOVERFLOW_HIDDEN@
EOVERFLOW_VALUE = @EOVERFLOW_VALUE@
ERRNO_H = @ERRNO_H@
EXEEXT = @EXEEXT@
FGREP = @FGREP@
FLOAT_H = @FLOAT_H@
FLOOR_LIBM = @FLOOR_LIBM@
GETOPT_CDEFS_H = @GETOPT_CDEFS_H@
GETOPT_H = @GETOPT_H@
GETTEXT_MACRO_VERSION = @GETTEXT_MACRO_VERSION@
GLIBC21 = @GLIBC21@
GLIB_CFLAGS = @GLIB_CFLAGS@
GLIB_LIBS = @GLIB_LIBS@
GMSGFMT = @GMSGFMT@
GMSGFMT_015 = @GMSGFMT_015@
GNULIB_ACOSF = @GNULIB_ACOSF@
GNULIB_ACOSL = @GNULIB_ACOSL@
GNULIB_ASINF = @GNULIB_ASINF@
GNULIB_ASINL = @GNULIB_ASINL@
GNULIB_ATAN2F = @GNULIB_ATAN2F@
GNULIB_ATANF = @GNULIB_ATANF@
GNULIB_ATANL = @GNULIB_ATANL@
GNULIB_ATOLL = @GNULIB_ATOLL@
GNULIB_BTOWC = @GNULIB_BTOWC@
GNULIB_CALLOC_POSIX = @GNULIB_CALLOC_POSIX@
GNULIB_CANONICALIZE_FILE_NAME = @GNULIB_CANONICALIZE_FILE_NAME@
GNULIB_CBRT = @GNULIB_CBRT@
GNULIB_CBRTF = @GNULIB_CBRTF@
GNULIB_CBRTL = @GNULIB_CBRTL@
GNULIB_CEIL = @GNULIB_CEIL@
GNULIB_CEILF = @GNULIB_CEILF@
GNULIB_CEILL = @GNULIB_CEILL@
GNULIB_CHDIR = @GNULIB_CHDIR@
GNULIB_CHOWN = @GNULIB_CHOWN@
GNULIB_CLOSE = @GNULIB_CLOSE@
GNULIB_COPYSIGN = @GNULIB_COPYSIGN@
GNULIB_COPYSIGNF = @GNULIB_COPYSIGNF@
GNULIB_COPYSIGNL = @GNULIB_COPYSIGNL@
GNULIB_COSF = @GNULIB_COSF@
GNULIB_COSHF = @GNULIB_COSHF@
GNULIB_COSL = @GNULIB_COSL@
GNULIB_CTIME = @GNULIB_CTIME@
GNULIB_DPRINTF = @GNULIB_DPRINTF@
GNULIB_DUP = @GNULIB_DUP@
GNULIB_DUP2 = @GNULIB_DUP2@
GNULIB_DUP3 = @GNULIB_DUP3@
GNULIB_DUPLOCALE = @GNULIB_DUPLOCALE@
GNULIB_ENVIRON = @GNULIB_ENVIRON@
GNULIB_EUIDACCESS = @GNULIB_EUIDACCESS@
GNULIB_EXP2 = @GNULIB_EXP2@
GNULIB_EXP2F = @GNULIB_EXP2F@
GNULIB_EXP2L = @GNULIB_EXP2L@
GNULIB_EXPF = @GNULIB_EXPF@
GNULIB_EXPL = @GNULIB_EXPL@
GNULIB_EXPLICIT_BZERO = @GNULIB_EXPLICIT_BZERO@
GNULIB_EXPM1 = @GNULIB_EXPM1@
GNULIB_EXPM1F = @GNULIB_EXPM1F@
GNULIB_EXPM1L = @GNULIB_EXPM1L@
GNULIB_FABSF = @GNULIB_FABSF@
GNULIB_FABSL = @GNULIB_FABSL@
GNULIB_FACCESSAT = @GNULIB_FACCESSAT@
GNULIB_FCHDIR = @GNULIB_FCHDIR@
GNULIB_FCHMODAT = @GNULIB_FCHMODAT@
GNULIB_FCHOWNAT = @GNULIB_FCHOWNAT@
GNULIB_FCLOSE = @GNULIB_FCLOSE@
GNULIB_FCNTL = @GNULIB_FCNTL@
GNULIB_FDATASYNC = @GNULIB_FDATASYNC@
GNULIB_FDOPEN = @GNULIB_FDOPEN@
GNULIB_FFLUSH = @GNULIB_FFLUSH@
GNULIB_FFS = @GNULIB_FFS@
GNULIB_FFSL = @GNULIB_FFSL@
GNULIB_FFSLL = @GNULIB_FFSLL@
GNULIB_FGETC = @GNULIB_FGETC@
GNULIB_FGETS = @GNULIB_FGETS@
GNULIB_FLOCK = @GNULIB_FLOCK@
GNULIB_FLOOR = @GNULIB_FLOOR@
GNULIB_FLOORF = @GNULIB_FLOORF@
GNULIB_FLOORL = @GNULIB_FLOORL@
GNULIB_FMA = @GNULIB_FMA@
GNULIB_FMAF = @GNULIB_FMAF@
GNULIB_FMAL = @GNULIB_FMAL@
GNULIB_FMOD = @GNULIB_FMOD@
GNULIB_FMODF = @GNULIB_FMODF@
GNULIB_FMODL = @GNULIB_FMODL@
GNULIB_FOPEN = @GNULIB_FOPEN@
GNULIB_FPRINTF = @GNULIB_FPRINTF@
GNULIB_FPRINTF_POSIX = @GNULIB_FPRINTF_POSIX@
GNULIB_FPURGE = @GNULIB_FPURGE@
GNULIB_FPUTC = @GNULIB_FPUTC@
GNULIB_FPUTS = @GNULIB_FPUTS@
GNULIB_FREAD = @GNULIB_FREAD@
GNULIB_FREOPEN = @GNULIB_FREOPEN@
GNULIB_FREXP = @GNULIB_FREXP@
GNULIB_FREXPF = @GNULIB_FREXPF@
GNULIB_FREXPL = @GNULIB_FREXPL@
GNULIB_FSCANF = @GNULIB_FSCANF@
GNULIB_FSEEK = @GNULIB_FSEEK@
GNULIB_FSEEKO = @GNULIB_FSEEKO@
GNULIB_FSTAT = @GNULIB_FSTAT@
GNULIB_FSTATAT = @GNULIB_FSTATAT@
GNULIB_FSYNC = @GNULIB_FSYNC@
GNULIB_FTELL = @GNULIB_FTELL@
GNULIB_FTELLO = @GNULIB_FTELLO@
GNULIB_FTRUNCATE = @GNULIB_FTRUNCATE@
GNULIB_FUTIMENS = @GNULIB_FUTIMENS@
GNULIB_FWRITE = @GNULIB_FWRITE@
GNULIB_GETC = @GNULIB_GETC@
GNULIB_GETCHAR = @GNULIB_GETCHAR@
GNULIB_GETCWD = @GNULIB_GETCWD@
GNULIB_GETDELIM = @GNULIB_GETDELIM@
GNULIB_GETDOMAINNAME = @GNULIB_GETDOMAINNAME@
GNULIB_GETDTABLESIZE = @GNULIB_GETDTABLESIZE@
GNULIB_GETGROUPS = @GNULIB_GETGROUPS@
GNULIB_GETHOSTNAME = @GNULIB_GETHOSTNAME@
GNULIB_GETLINE = @GNULIB_GETLINE@
GNULIB_GETLOADAVG = @GNULIB_GETLOADAVG@
GNULIB_GETLOGIN = @GNULIB_GETLOGIN@
GNULIB_GETLOGIN_R = @GNULIB_GETLOGIN_R@
GNULIB_GETPAGESIZE = @GNULIB_GETPAGESIZE@
GNULIB_GETPASS = @GNULIB_GETPASS@
GNULIB_GETSUBOPT = @GNULIB_GETSUBOPT@
GNULIB_GETTIMEOFDAY = @GNULIB_GETTIMEOFDAY@
GNULIB_GETUSERSHELL = @GNULIB_GETUSERSHELL@
GNULIB_GL_UNISTD_H_GETOPT = @GNULIB_GL_UNISTD_H_GETOPT@
GNULIB_GRANTPT = @GNULIB_GRANTPT@
GNULIB_GROUP_MEMBER = @GNULIB_GROUP_MEMBER@
GNULIB_HYPOT = @GNULIB_HYPOT@
GNULIB_HYPOTF = @GNULIB_HYPOTF@
GNULIB_HYPOTL = @GNULIB_HYPOTL@
GNULIB_ILOGB = @GNULIB_ILOGB@
GNULIB_ILOGBF = @GNULIB_ILOGBF@
GNULIB_ILOGBL = @GNULIB_ILOGBL@
GNULIB_IMAXABS = @GNULIB_IMAXABS@
GNULIB_IMAXDIV = @GNULIB_IMAXDIV@
GNULIB_ISATTY = @GNULIB_ISATTY@
GNULIB_ISFINITE = @GNULIB_ISFINITE@
GNULIB_ISINF = @GNULIB_ISINF@
GNULIB_ISNAN = @GNULIB_ISNAN@
GNULIB_ISNAND = @GNULIB_ISNAND@
GNULIB_ISNANF = @GNULIB_ISNANF@
GNULIB_ISNANL = @GNULIB_ISNANL@
GNULIB_ISWBLANK = @GNULIB_ISWBLANK@
GNULIB_ISWCTYPE = @GNULIB_ISWCTYPE@
GNULIB_LCHMOD = @GNULIB_LCHMOD@
GNULIB_LCHOWN = @GNULIB_LCHOWN@
GNULIB_LDEXPF = @GNULIB_LDEXPF@
GNULIB_LDEXPL = @GNULIB_LDEXPL@
GNULIB_LINK = @GNULIB_LINK@
GNULIB_LINKAT = @GNULIB_LINKAT@
GNULIB_LOCALECONV = @GNULIB_LOCALECONV@
GNULIB_LOCALENAME = @GNULIB_LOCALENAME@
GNULIB_LOCALTIME = @GNULIB_LOCALTIME@
GNULIB_LOG = @GNULIB_LOG@
GNULIB_LOG10 = @GNULIB_LOG10@
GNULIB_LOG10F = @GNULIB_LOG10F@
GNULIB_LOG10L = @GNULIB_LOG10L@
GNULIB_LOG1P = @GNULIB_LOG1P@
GNULIB_LOG1PF = @GNULIB_LOG1PF@
GNULIB_LOG1PL = @GNULIB_LOG1PL@
GNULIB_LOG2 = @GNULIB_LOG2@
GNULIB_LOG2F = @GNULIB_LOG2F@
GNULIB_LOG2L = @GNULIB_LOG2L@
GNULIB_LOGB = @GNULIB_LOGB@
GNULIB_LOGBF = @GNULIB_LOGBF@
GNULIB_LOGBL = @GNULIB_LOGBL@
GNULIB_LOGF = @GNULIB_LOGF@
GNULIB_LOGL = @GNULIB_LOGL@
GNULIB_LSEEK = @GNULIB_LSEEK@
GNULIB_LSTAT = @GNULIB_LSTAT@
GNULIB_MALLOC_POSIX = @GNULIB_MALLOC_POSIX@
GNULIB_MBRLEN = @GNULIB_MBRLEN@
GNULIB_MBRTOWC = @GNULIB_MBRTOWC@
GNULIB_MBSCASECMP = @GNULIB_MBSCASECMP@
GNULIB_MBSCASESTR = @GNULIB_MBSCASESTR@
GNULIB_MBSCHR = @GNULIB_MBSCHR@
GNULIB_MBSCSPN = @GNULIB_MBSCSPN@
GNULIB_MBSINIT = @GNULIB_MBSINIT@
GNULIB_MBSLEN = @GNULIB_MBSLEN@
GNULIB_MBSNCASECMP = @GNULIB_MBSNCASECMP@
GNULIB_MBSNLEN = @GNULIB_MBSNLEN@
GNULIB_MBSNRTOWCS = @GNULIB_MBSNRTOWCS@
GNULIB_MBSPBRK = @GNULIB_MBSPBRK@
GNULIB_MBSPCASECMP = @GNULIB_MBSPCASECMP@
GNULIB_MBSRCHR = @GNULIB_MBSRCHR@
GNULIB_MBSRTOWCS = @GNULIB_MBSRTOWCS@
GNULIB_MBSSEP = @GNULIB_MBSSEP@
GNULIB_MBSSPN = @GNULIB_MBSSPN@
GNULIB_MBSSTR = @GNULIB_MBSSTR@
GNULIB_MBSTOK_R = @GNULIB_MBSTOK_R@
GNULIB_MBTOWC = @GNULIB_MBTOWC@
GNULIB_MEMCHR = @GNULIB_MEMCHR@
GNULIB_MEMMEM = @GNULIB_MEMMEM@
GNULIB_MEMPCPY = @GNULIB_MEMPCPY@
GNULIB_MEMRCHR = @GNULIB_MEMRCHR@
GNULIB_MKDIRAT = @GNULIB_MKDIRAT@
GNULIB_MKDTEMP = @GNULIB_MKDTEMP@
GNULIB_MKFIFO = @GNULIB_MKFIFO@
GNULIB_MKFIFOAT = @GNULIB_MKFIFOAT@
GNULIB_MKNOD = @GNULIB_MKNOD@
GNULIB_MKNODAT = @GNULIB_MKNODAT@
GNULIB_MKOSTEMP = @GNULIB_MKOSTEMP@
GNULIB_MKOSTEMPS = @GNULIB_MKOSTEMPS@
GNULIB_MKSTEMP = @GNULIB_MKSTEMP@
GNULIB_MKSTEMPS = @GNULIB_MKSTEMPS@
GNULIB_MKTIME = @GNULIB_MKTIME@
GNULIB_MODF = @GNULIB_MODF@
GNULIB_MODFF = @GNULIB_MODFF@
GNULIB_MODFL = @GNULIB_MODFL@
GNULIB_NANOSLEEP = @GNULIB_NANOSLEEP@
GNULIB_NL_LANGINFO = @GNULIB_NL_LANGINFO@
GNULIB_NONBLOCKING = @GNULIB_NONBLOCKING@
GNULIB_OBSTACK_PRINTF = @GNULIB_OBSTACK_PRINTF@
GNULIB_OBSTACK_PRINTF_POSIX = @GNULIB_OBSTACK_PRINTF_POSIX@
GNULIB_OPEN = @GNULIB_OPEN@
GNULIB_OPENAT = @GNULIB_OPENAT@
GNULIB_OVERRIDES_STRUCT_STAT = @GNULIB_OVERRIDES_STRUCT_STAT@
GNULIB_OVERRIDES_WINT_T = @GNULIB_OVERRIDES_WINT_T@
GNULIB_PCLOSE = @GNULIB_PCLOSE@
GNULIB_PERROR = @GNULIB_PERROR@
GNULIB_PIPE = @GNULIB_PIPE@
GNULIB_PIPE2 = @GNULIB_PIPE2@
GNULIB_POPEN = @GNULIB_POPEN@
GNULIB_POSIX_OPENPT = @GNULIB_POSIX_OPENPT@
GNULIB_POSIX_SPAWN = @GNULIB_POSIX_SPAWN@
GNULIB_POSIX_SPAWNATTR_DESTROY = @GNULIB_POSIX_SPAWNATTR_DESTROY@
GNULIB_POSIX_SPAWNATTR_GETFLAGS = @GNULIB_POSIX_SPAWNATTR_GETFLAGS@
GNULIB_POSIX_SPAWNATTR_GETPGROUP = @GNULIB_POSIX_SPAWNATTR_GETPGROUP@
GNULIB_POSIX_SPAWNATTR_GETSCHEDPARAM = @GNULIB_POSIX_SPAWNATTR_GETSCHEDPARAM@
GNULIB_POSIX_SPAWNATTR_GETSCHEDPOLICY = @GNULIB_POSIX_SPAWNATTR_GETSCHEDPOLICY@
GNULIB_POSIX_SPAWNATTR_GETSIGDEFAULT = @GNULIB_POSIX_SPAWNATTR_GETSIGDEFAULT@
GNULIB_POSIX_SPAWNATTR_GETSIGMASK = @GNULIB_POSIX_SPAWNATTR_GETSIGMASK@
GNULIB_POSIX_SPAWNATTR_INIT = @GNULIB_POSIX_SPAWNATTR_INIT@
GNULIB_POSIX_SPAWNATTR_SETFLAGS = @GNULIB_POSIX_SPAWNATTR_SETFLAGS@
GNULIB_POSIX_SPAWNATTR_SETPGROUP = @GNULIB_POSIX_SPAWNATTR_SETPGROUP@
GNULIB_POSIX_SPAWNATTR_SETSCHEDPARAM = @GNULIB_POSIX_SPAWNATTR_SETSCHEDPARAM@
GNULIB_POSIX_SPAWNATTR_SETSCHEDPOLICY = @GNULIB_POSIX_SPAWNATTR_SETSCHEDPOLICY@
GNULIB_POSIX_SPAWNATTR_SETSIGDEFAULT = @GNULIB_POSIX_SPAWNATTR_SETSIGDEFAULT@
GNULIB_POSIX_SPAWNATTR_SETSIGMASK = @GNULIB_POSIX_SPAWNATTR_SETSIGMASK@
GNULIB_POSIX_SPAWNP = @GNULIB_POSIX_SPAWNP@
GNULIB_POSIX_SPAWN_FILE_ACTIONS_ADDCHDIR = @GNULIB_POSIX_SPAWN_FILE_ACTIONS_ADDCHDIR@
GNULIB_POSIX_SPAWN_FILE_ACTIONS_ADDCLOSE = @GNULIB_POSIX_SPAWN_FILE_ACTIONS_ADDCLOSE@
GNULIB_POSIX_SPAWN_FILE_ACTIONS_ADDDUP2 = @GNULIB_POSIX_SPAWN_FILE_ACTIONS_ADDDUP2@
GNULIB_POSIX_SPAWN_FILE_ACTIONS_ADDOPEN = @GNULIB_POSIX_SPAWN_FILE_ACTIONS_ADDOPEN@
GNULIB_POSIX_SPAWN_FILE_ACTIONS_DESTROY = @GNULIB_POSIX_SPAWN_FILE_ACTIONS_DESTROY@
GNULIB_POSIX_SPAWN_FILE_ACTIONS_INIT = @GNULIB_POSIX_SPAWN_FILE_ACTIONS_INIT@
GNULIB_POWF = @GNULIB_POWF@
GNULIB_PREAD = @GNULIB_PREAD@
GNULIB_PRINTF = @GNULIB_PRINTF@
GNULIB_PRINTF_POSIX = @GNULIB_PRINTF_POSIX@
GNULIB_PTHREAD_SIGMASK = @GNULIB_PTHREAD_SIGMASK@
GNULIB_PTSNAME = @GNULIB_PTSNAME@
GNULIB_PTSNAME_R = @GNULIB_PTSNAME_R@
GNULIB_PUTC = @GNULIB_PUTC@
GNULIB_PUTCHAR = @GNULIB_PUTCHAR@
GNULIB_PUTENV = @GNULIB_PUTENV@
GNULIB_PUTS = @GNULIB_PUTS@
GNULIB_PWRITE = @GNULIB_PWRITE@
GNULIB_QSORT_R = @GNULIB_QSORT_R@
GNULIB_RAISE = @GNULIB_RAISE@
GNULIB_RANDOM = @GNULIB_RANDOM@
GNULIB_RANDOM_R = @GNULIB_RANDOM_R@
GNULIB_RAWMEMCHR = @GNULIB_RAWMEMCHR@
GNULIB_READ = @GNULIB_READ@
GNULIB_READLINK = @GNULIB_READLINK@
GNULIB_READLINKAT = @GNULIB_READLINKAT@
GNULIB_REALLOCARRAY = @GNULIB_REALLOCARRAY@
GNULIB_REALLOC_POSIX = @GNULIB_REALLOC_POSIX@
GNULIB_REALPATH = @GNULIB_REALPATH@
GNULIB_REMAINDER = @GNULIB_REMAINDER@
GNULIB_REMAINDERF = @GNULIB_REMAINDERF@
GNULIB_REMAINDERL = @GNULIB_REMAINDERL@
GNULIB_REMOVE = @GNULIB_REMOVE@
GNULIB_RENAME = @GNULIB_RENAME@
GNULIB_RENAMEAT = @GNULIB_RENAMEAT@
GNULIB_RINT = @GNULIB_RINT@
GNULIB_RINTF = @GNULIB_RINTF@
GNULIB_RINTL = @GNULIB_RINTL@
GNULIB_RMDIR = @GNULIB_RMDIR@
GNULIB_ROUND = @GNULIB_ROUND@
GNULIB_ROUNDF = @GNULIB_ROUNDF@
GNULIB_ROUNDL = @GNULIB_ROUNDL@
GNULIB_RPMATCH = @GNULIB_RPMATCH@
GNULIB_SCANF = @GNULIB_SCANF@
GNULIB_SECURE_GETENV = @GNULIB_SECURE_GETENV@
GNULIB_SETENV = @GNULIB_SETENV@
GNULIB_SETHOSTNAME = @GNULIB_SETHOSTNAME@
GNULIB_SETLOCALE = @GNULIB_SETLOCALE@
GNULIB_SIGACTION = @GNULIB_SIGACTION@
GNULIB_SIGNAL_H_SIGPIPE = @GNULIB_SIGNAL_H_SIGPIPE@
GNULIB_SIGNBIT = @GNULIB_SIGNBIT@
GNULIB_SIGPROCMASK = @GNULIB_SIGPROCMASK@
GNULIB_SINF = @GNULIB_SINF@
GNULIB_SINHF = @GNULIB_SINHF@
GNULIB_SINL = @GNULIB_SINL@
GNULIB_SLEEP = @GNULIB_SLEEP@
GNULIB_SNPRINTF = @GNULIB_SNPRINTF@
GNULIB_SPRINTF_POSIX = @GNULIB_SPRINTF_POSIX@
GNULIB_SQRTF = @GNULIB_SQRTF@
GNULIB_SQRTL = @GNULIB_SQRTL@
GNULIB_STAT = @GNULIB_STAT@
GNULIB_STDIO_H_NONBLOCKING = @GNULIB_STDIO_H_NONBLOCKING@
GNULIB_STDIO_H_SIGPIPE = @GNULIB_STDIO_H_SIGPIPE@
GNULIB_STPCPY = @GNULIB_STPCPY@
GNULIB_STPNCPY = @GNULIB_STPNCPY@
GNULIB_STRCASESTR = @GNULIB_STRCASESTR@
GNULIB_STRCHRNUL = @GNULIB_STRCHRNUL@
GNULIB_STRDUP = @GNULIB_STRDUP@
GNULIB_STRERROR = @GNULIB_STRERROR@
GNULIB_STRERROR_R = @GNULIB_STRERROR_R@
GNULIB_STRFTIME = @GNULIB_STRFTIME@
GNULIB_STRNCAT = @GNULIB_STRNCAT@
GNULIB_STRNDUP = @GNULIB_STRNDUP@
GNULIB_STRNLEN = @GNULIB_STRNLEN@
GNULIB_STRPBRK = @GNULIB_STRPBRK@
GNULIB_STRPTIME = @GNULIB_STRPTIME@
GNULIB_STRSEP = @GNULIB_STRSEP@
GNULIB_STRSIGNAL = @GNULIB_STRSIGNAL@
GNULIB_STRSTR = @GNULIB_STRSTR@
GNULIB_STRTOD = @GNULIB_STRTOD@
GNULIB_STRTOIMAX = @GNULIB_STRTOIMAX@
GNULIB_STRTOK_R = @GNULIB_STRTOK_R@
GNULIB_STRTOLL = @GNULIB_STRTOLL@
GNULIB_STRTOULL = @GNULIB_STRTOULL@
GNULIB_STRTOUMAX = @GNULIB_STRTOUMAX@
GNULIB_STRVERSCMP = @GNULIB_STRVERSCMP@
GNULIB_SYMLINK = @GNULIB_SYMLINK@
GNULIB_SYMLINKAT = @GNULIB_SYMLINKAT@
GNULIB_SYSTEM_POSIX = @GNULIB_SYSTEM_POSIX@
GNULIB_TANF = @GNULIB_TANF@
GNULIB_TANHF = @GNULIB_TANHF@
GNULIB_TANL = @GNULIB_TANL@
GNULIB_TIMEGM = @GNULIB_TIMEGM@
GNULIB_TIME_R = @GNULIB_TIME_R@
GNULIB_TIME_RZ = @GNULIB_TIME_RZ@
GNULIB_TMPFILE = @GNULIB_TMPFILE@
GNULIB_TOWCTRANS = @GNULIB_TOWCTRANS@
GNULIB_TRUNC = @GNULIB_TRUNC@
GNULIB_TRUNCATE = @GNULIB_TRUNCATE@
GNULIB_TRUNCF = @GNULIB_TRUNCF@
GNULIB_TRUNCL = @GNULIB_TRUNCL@
GNULIB_TTYNAME_R = @GNULIB_TTYNAME_R@
GNULIB_TZSET = @GNULIB_TZSET@
GNULIB_UNISTD_H_NONBLOCKING = @GNULIB_UNISTD_H_NONBLOCKING@
GNULIB_UNISTD_H_SIGPIPE = @GNULIB_UNISTD_H_SIGPIPE@
GNULIB_UNLINK = @GNULIB_UNLINK@
GNULIB_UNLINKAT = @GNULIB_UNLINKAT@
GNULIB_UNLOCKPT = @GNULIB_UNLOCKPT@
GNULIB_UNSETENV = @GNULIB_UNSETENV@
GNULIB_USLEEP = @GNULIB_USLEEP@
GNULIB_UTIMENSAT = @GNULIB_UTIMENSAT@
GNULIB_VASPRINTF = @GNULIB_VASPRINTF@
GNULIB_VDPRINTF = @GNULIB_VDPRINTF@
GNULIB_VFPRINTF = @GNULIB_VFPRINTF@
GNULIB_VFPRINTF_POSIX = @GNULIB_VFPRINTF_POSIX@
GNULIB_VFSCANF = @GNULIB_VFSCANF@
GNULIB_VPRINTF = @GNULIB_VPRINTF@
GNULIB_VPRINTF_POSIX = @GNULIB_VPRINTF_POSIX@
GNULIB_VSCANF = @GNULIB_VSCANF@
GNULIB_VSNPRINTF = @GNULIB_VSNPRINTF@
GNULIB_VSPRINTF_POSIX = @GNULIB_VSPRINTF_POSIX@
GNULIB_WAITPID = @GNULIB_WAITPID@
GNULIB_WCPCPY = @GNULIB_WCPCPY@
GNULIB_WCPNCPY = @GNULIB_WCPNCPY@
GNULIB_WCRTOMB = @GNULIB_WCRTOMB@
GNULIB_WCSCASECMP = @GNULIB_WCSCASECMP@
GNULIB_WCSCAT = @GNULIB_WCSCAT@
GNULIB_WCSCHR = @GNULIB_WCSCHR@
GNULIB_WCSCMP = @GNULIB_WCSCMP@
GNULIB_WCSCOLL = @GNULIB_WCSCOLL@
GNULIB_WCSCPY = @GNULIB_WCSCPY@
GNULIB_WCSCSPN = @GNULIB_WCSCSPN@
GNULIB_WCSDUP = @GNULIB_WCSDUP@
GNULIB_WCSFTIME = @GNULIB_WCSFTIME@
GNULIB_WCSLEN = @GNULIB_WCSLEN@
GNULIB_WCSNCASECMP = @GNULIB_WCSNCASECMP@
GNULIB_WCSNCAT = @GNULIB_WCSNCAT@
GNULIB_WCSNCMP = @GNULIB_WCSNCMP@
GNULIB_WCSNCPY = @GNULIB_WCSNCPY@
GNULIB_WCSNLEN = @GNULIB_WCSNLEN@
GNULIB_WCSNRTOMBS = @GNULIB_WCSNRTOMBS@
GNULIB_WCSPBRK = @GNULIB_WCSPBRK@
GNULIB_WCSRCHR = @GNULIB_WCSRCHR@
GNULIB_WCSRTOMBS = @GNULIB_WCSRTOMBS@
GNULIB_WCSSPN = @GNULIB_WCSSPN@
GNULIB_WCSSTR = @GNULIB_WCSSTR@
GNULIB_WCSTOK = @GNULIB_WCSTOK@
GNULIB_WCSWIDTH = @GNULIB_WCSWIDTH@
GNULIB_WCSXFRM = @GNULIB_WCSXFRM@
GNULIB_WCTOB = @GNULIB_WCTOB@
GNULIB_WCTOMB = @GNULIB_WCTOMB@
GNULIB_WCTRANS = @GNULIB_WCTRANS@
GNULIB_WCTYPE = @GNULIB_WCTYPE@
GNULIB_WCWIDTH = @GNULIB_WCWIDTH@
GNULIB_WMEMCHR = @GNULIB_WMEMCHR@
GNULIB_WMEMCMP = @GNULIB_WMEMCMP@
GNULIB_WMEMCPY = @GNULIB_WMEMCPY@
GNULIB_WMEMMOVE = @GNULIB_WMEMMOVE@
GNULIB_WMEMSET = @GNULIB_WMEMSET@
GNULIB_WRITE = @GNULIB_WRITE@
GNULIB__EXIT = @GNULIB__EXIT@
GREP = @GREP@
HAVE_ACOSF = @HAVE_ACOSF@
HAVE_ACOSL = @HAVE_ACOSL@
HAVE_ASINF = @HAVE_ASINF@
HAVE_ASINL = @HAVE_ASINL@
HAVE_ATAN2F = @HAVE_ATAN2F@
HAVE_ATANF = @HAVE_ATANF@
HAVE_ATANL = @HAVE_ATANL@
HAVE_ATOLL = @HAVE_ATOLL@
HAVE_BTOWC = @HAVE_BTOWC@
HAVE_C99_STDINT_H = @HAVE_C99_STDINT_H@
HAVE_CANONICALIZE_FILE_NAME = @HAVE_CANONICALIZE_FILE_NAME@
HAVE_CBRT = @HAVE_CBRT@
HAVE_CBRTF = @HAVE_CBRTF@
HAVE_CBRTL = @HAVE_CBRTL@
HAVE_CHOWN = @HAVE_CHOWN@
HAVE_COPYSIGN = @HAVE_COPYSIGN@
HAVE_COPYSIGNL = @HAVE_COPYSIGNL@
HAVE_COSF = @HAVE_COSF@
HAVE_COSHF = @HAVE_COSHF@
HAVE_COSL = @HAVE_COSL@
HAVE_CRTDEFS_H = @HAVE_CRTDEFS_H@
HAVE_DECL_ACOSL = @HAVE_DECL_ACOSL@
HAVE_DECL_ASINL = @HAVE_DECL_ASINL@
HAVE_DECL_ATANL = @HAVE_DECL_ATANL@
HAVE_DECL_CBRTF = @HAVE_DECL_CBRTF@
HAVE_DECL_CBRTL = @HAVE_DECL_CBRTL@
HAVE_DECL_CEILF = @HAVE_DECL_CEILF@
HAVE_DECL_CEILL = @HAVE_DECL_CEILL@
HAVE_DECL_COPYSIGNF = @HAVE_DECL_COPYSIGNF@
HAVE_DECL_COSL = @HAVE_DECL_COSL@
HAVE_DECL_ENVIRON = @HAVE_DECL_ENVIRON@
HAVE_DECL_EXP2 = @HAVE_DECL_EXP2@
HAVE_DECL_EXP2F = @HAVE_DECL_EXP2F@
HAVE_DECL_EXP2L = @HAVE_DECL_EXP2L@
HAVE_DECL_EXPL = @HAVE_DECL_EXPL@
HAVE_DECL_EXPM1L = @HAVE_DECL_EXPM1L@
HAVE_DECL_FCHDIR = @HAVE_DECL_FCHDIR@
HAVE_DECL_FDATASYNC = @HAVE_DECL_FDATASYNC@
HAVE_DECL_FLOORF = @HAVE_DECL_FLOORF@
HAVE_DECL_FLOORL = @HAVE_DECL_FLOORL@
HAVE_DECL_FPURGE = @HAVE_DECL_FPURGE@
HAVE_DECL_FREXPL = @HAVE_DECL_FREXPL@
HAVE_DECL_FSEEKO = @HAVE_DECL_FSEEKO@
HAVE_DECL_FTELLO = @HAVE_DECL_FTELLO@
HAVE_DECL_GETDELIM = @HAVE_DECL_GETDELIM@
HAVE_DECL_GETDOMAINNAME = @HAVE_DECL_GETDOMAINNAME@
HAVE_DECL_GETLINE = @HAVE_DECL_GETLINE@
HAVE_DECL_GETLOADAVG = @HAVE_DECL_GETLOADAVG@
HAVE_DECL_GETLOGIN = @HAVE_DECL_GETLOGIN@
HAVE_DECL_GETLOGIN_R = @HAVE_DECL_GETLOGIN_R@
HAVE_DECL_GETPAGESIZE = @HAVE_DECL_GETPAGESIZE@
HAVE_DECL_GETUSERSHELL = @HAVE_DECL_GETUSERSHELL@
HAVE_DECL_IMAXABS = @HAVE_DECL_IMAXABS@
HAVE_DECL_IMAXDIV = @HAVE_DECL_IMAXDIV@
HAVE_DECL_INITSTATE = @HAVE_DECL_INITSTATE@
HAVE_DECL_LDEXPL = @HAVE_DECL_LDEXPL@
HAVE_DECL_LOCALTIME_R = @HAVE_DECL_LOCALTIME_R@
HAVE_DECL_LOG10L = @HAVE_DECL_LOG10L@
HAVE_DECL_LOG2 = @HAVE_DECL_LOG2@
HAVE_DECL_LOG2F = @HAVE_DECL_LOG2F@
HAVE_DECL_LOG2L = @HAVE_DECL_LOG2L@
HAVE_DECL_LOGB = @HAVE_DECL_LOGB@
HAVE_DECL_LOGL = @HAVE_DECL_LOGL@
HAVE_DECL_MEMMEM = @HAVE_DECL_MEMMEM@
HAVE_DECL_MEMRCHR = @HAVE_DECL_MEMRCHR@
HAVE_DECL_OBSTACK_PRINTF = @HAVE_DECL_OBSTACK_PRINTF@
HAVE_DECL_REMAINDER = @HAVE_DECL_REMAINDER@
HAVE_DECL_REMAINDERL = @HAVE_DECL_REMAINDERL@
HAVE_DECL_RINTF = @HAVE_DECL_RINTF@
HAVE_DECL_ROUND = @HAVE_DECL_ROUND@
HAVE_DECL_ROUNDF = @HAVE_DECL_ROUNDF@
HAVE_DECL_ROUNDL = @HAVE_DECL_ROUNDL@
HAVE_DECL_SETENV = @HAVE_DECL_SETENV@
HAVE_DECL_SETHOSTNAME = @HAVE_DECL_SETHOSTNAME@
HAVE_DECL_SETSTATE = @HAVE_DECL_SETSTATE@
HAVE_DECL_SINL = @HAVE_DECL_SINL@
HAVE_DECL_SNPRINTF = @HAVE_DECL_SNPRINTF@
HAVE_DECL_SQRTL = @HAVE_DECL_SQRTL@
HAVE_DECL_STRDUP = @HAVE_DECL_STRDUP@
HAVE_DECL_STRERROR_R = @HAVE_DECL_STRERROR_R@
HAVE_DECL_STRNCASECMP = @HAVE_DECL_STRNCASECMP@
HAVE_DECL_STRNDUP = @HAVE_DECL_STRNDUP@
HAVE_DECL_STRNLEN = @HAVE_DECL_STRNLEN@
HAVE_DECL_STRSIGNAL = @HAVE_DECL_STRSIGNAL@
HAVE_DECL_STRTOIMAX = @HAVE_DECL_STRTOIMAX@
HAVE_DECL_STRTOK_R = @HAVE_DECL_STRTOK_R@
HAVE_DECL_STRTOUMAX = @HAVE_DECL_STRTOUMAX@
HAVE_DECL_TANL = @HAVE_DECL_TANL@
HAVE_DECL_TRUNC = @HAVE_DECL_TRUNC@
HAVE_DECL_TRUNCATE = @HAVE_DECL_TRUNCATE@
HAVE_DECL_TRUNCF = @HAVE_DECL_TRUNCF@
HAVE_DECL_TRUNCL = @HAVE_DECL_TRUNCL@
HAVE_DECL_TTYNAME_R = @HAVE_DECL_TTYNAME_R@
HAVE_DECL_UNSETENV = @HAVE_DECL_UNSETENV@
HAVE_DECL_VSNPRINTF = @HAVE_DECL_VSNPRINTF@
HAVE_DECL_WCTOB = @HAVE_DECL_WCTOB@
HAVE_DECL_WCWIDTH = @HAVE_DECL_WCWIDTH@
HAVE_DPRINTF = @HAVE_DPRINTF@
HAVE_DUP2 = @HAVE_DUP2@
HAVE_DUP3 = @HAVE_DUP3@
HAVE_DUPLOCALE = @HAVE_DUPLOCALE@
HAVE_EUIDACCESS = @HAVE_EUIDACCESS@
HAVE_EXPF = @HAVE_EXPF@
HAVE_EXPL = @HAVE_EXPL@
HAVE_EXPLICIT_BZERO = @HAVE_EXPLICIT_BZERO@
HAVE_EXPM1 = @HAVE_EXPM1@
HAVE_EXPM1F = @HAVE_EXPM1F@
HAVE_FABSF = @HAVE_FABSF@
HAVE_FABSL = @HAVE_FABSL@
HAVE_FACCESSAT = @HAVE_FACCESSAT@
HAVE_FCHDIR = @HAVE_FCHDIR@
HAVE_FCHMODAT = @HAVE_FCHMODAT@
HAVE_FCHOWNAT = @HAVE_FCHOWNAT@
HAVE_FCNTL = @HAVE_FCNTL@
HAVE_FDATASYNC = @HAVE_FDATASYNC@
HAVE_FEATURES_H = @HAVE_FEATURES_H@
HAVE_FFS = @HAVE_FFS@
HAVE_FFSL = @HAVE_FFSL@
HAVE_FFSLL = @HAVE_FFSLL@
HAVE_FLOCK = @HAVE_FLOCK@
HAVE_FMA = @HAVE_FMA@
HAVE_FMAF = @HAVE_FMAF@
HAVE_FMAL = @HAVE_FMAL@
HAVE_FMODF = @HAVE_FMODF@
HAVE_FMODL = @HAVE_FMODL@
HAVE_FREELOCALE = @HAVE_FREELOCALE@
HAVE_FREXPF = @HAVE_FREXPF@
HAVE_FSEEKO = @HAVE_FSEEKO@
HAVE_FSTATAT = @HAVE_FSTATAT@
HAVE_FSYNC = @HAVE_FSYNC@
HAVE_FTELLO = @HAVE_FTELLO@
HAVE_FTRUNCATE = @HAVE_FTRUNCATE@
HAVE_FUTIMENS = @HAVE_FUTIMENS@
HAVE_GETDTABLESIZE = @HAVE_GETDTABLESIZE@
HAVE_GETGROUPS = @HAVE_GETGROUPS@
HAVE_GETHOSTNAME = @HAVE_GETHOSTNAME@
HAVE_GETLOGIN = @HAVE_GETLOGIN@
HAVE_GETOPT_H = @HAVE_GETOPT_H@
HAVE_GETPAGESIZE = @HAVE_GETPAGESIZE@
HAVE_GETPASS = @HAVE_GETPASS@
HAVE_GETSUBOPT = @HAVE_GETSUBOPT@
HAVE_GETTIMEOFDAY = @HAVE_GETTIMEOFDAY@
HAVE_GRANTPT = @HAVE_GRANTPT@
HAVE_GROUP_MEMBER = @HAVE_GROUP_MEMBER@
HAVE_HYPOTF = @HAVE_HYPOTF@
HAVE_HYPOTL = @HAVE_HYPOTL@
HAVE_ILOGB = @HAVE_ILOGB@
HAVE_ILOGBF = @HAVE_ILOGBF@
HAVE_ILOGBL = @HAVE_ILOGBL@
HAVE_IMAXDIV_T = @HAVE_IMAXDIV_T@
HAVE_INTTYPES_H = @HAVE_INTTYPES_H@
HAVE_ISNAND = @HAVE_ISNAND@
HAVE_ISNANF = @HAVE_ISNANF@
HAVE_ISNANL = @HAVE_ISNANL@
HAVE_ISWBLANK = @HAVE_ISWBLANK@
HAVE_ISWCNTRL = @HAVE_ISWCNTRL@
HAVE_LANGINFO_ALTMON = @HAVE_LANGINFO_ALTMON@
HAVE_LANGINFO_CODESET = @HAVE_LANGINFO_CODESET@
HAVE_LANGINFO_ERA = @HAVE_LANGINFO_ERA@
HAVE_LANGINFO_H = @HAVE_LANGINFO_H@
HAVE_LANGINFO_T_FMT_AMPM = @HAVE_LANGINFO_T_FMT_AMPM@
HAVE_LANGINFO_YESEXPR = @HAVE_LANGINFO_YESEXPR@
HAVE_LCHMOD = @HAVE_LCHMOD@
HAVE_LCHOWN = @HAVE_LCHOWN@
HAVE_LDEXPF = @HAVE_LDEXPF@
HAVE_LIBGCRYPT = @HAVE_LIBGCRYPT@
HAVE_LINK = @HAVE_LINK@
HAVE_LINKAT = @HAVE_LINKAT@
HAVE_LOG10F = @HAVE_LOG10F@
HAVE_LOG10L = @HAVE_LOG10L@
HAVE_LOG1P = @HAVE_LOG1P@
HAVE_LOG1PF = @HAVE_LOG1PF@
HAVE_LOG1PL = @HAVE_LOG1PL@
HAVE_LOGBF = @HAVE_LOGBF@
HAVE_LOGBL = @HAVE_LOGBL@
HAVE_LOGF = @HAVE_LOGF@
HAVE_LOGL = @HAVE_LOGL@
HAVE_LONG_LONG_INT = @HAVE_LONG_LONG_INT@
HAVE_LSTAT = @HAVE_LSTAT@
HAVE_MAX_ALIGN_T = @HAVE_MAX_ALIGN_T@
HAVE_MBRLEN = @HAVE_MBRLEN@
HAVE_MBRTOWC = @HAVE_MBRTOWC@
HAVE_MBSINIT = @HAVE_MBSINIT@
HAVE_MBSLEN = @HAVE_MBSLEN@
HAVE_MBSNRTOWCS = @HAVE_MBSNRTOWCS@
HAVE_MBSRTOWCS = @HAVE_MBSRTOWCS@
HAVE_MEMCHR = @HAVE_MEMCHR@
HAVE_MEMPCPY = @HAVE_MEMPCPY@
HAVE_MKDIRAT = @HAVE_MKDIRAT@
HAVE_MKDTEMP = @HAVE_MKDTEMP@
HAVE_MKFIFO = @HAVE_MKFIFO@
HAVE_MKFIFOAT = @HAVE_MKFIFOAT@
HAVE_MKNOD = @HAVE_MKNOD@
HAVE_MKNODAT = @HAVE_MKNODAT@
HAVE_MKOSTEMP = @HAVE_MKOSTEMP@
HAVE_MKOSTEMPS = @HAVE_MKOSTEMPS@
HAVE_MKSTEMP = @HAVE_MKSTEMP@
HAVE_MKSTEMPS = @HAVE_MKSTEMPS@
HAVE_MODFF = @HAVE_MODFF@
HAVE_MODFL = @HAVE_MODFL@
HAVE_MSVC_INVALID_PARAMETER_HANDLER = @HAVE_MSVC_INVALID_PARAMETER_HANDLER@
HAVE_NANOSLEEP = @HAVE_NANOSLEEP@
HAVE_NEWLOCALE = @HAVE_NEWLOCALE@
HAVE_NL_LANGINFO = @HAVE_NL_LANGINFO@
HAVE_OPENAT = @HAVE_OPENAT@
HAVE_OS_H = @HAVE_OS_H@
HAVE_PCLOSE = @HAVE_PCLOSE@
HAVE_PIPE = @HAVE_PIPE@
HAVE_PIPE2 = @HAVE_PIPE2@
HAVE_POPEN = @HAVE_POPEN@
HAVE_POSIX_OPENPT = @HAVE_POSIX_OPENPT@
HAVE_POSIX_SIGNALBLOCKING = @HAVE_POSIX_SIGNALBLOCKING@
HAVE_POSIX_SPAWN = @HAVE_POSIX_SPAWN@
HAVE_POSIX_SPAWNATTR_T = @HAVE_POSIX_SPAWNATTR_T@
HAVE_POSIX_SPAWN_FILE_ACTIONS_ADDCHDIR = @HAVE_POSIX_SPAWN_FILE_ACTIONS_ADDCHDIR@
HAVE_POSIX_SPAWN_FILE_ACTIONS_T = @HAVE_POSIX_SPAWN_FILE_ACTIONS_T@
HAVE_POWF = @HAVE_POWF@
HAVE_PREAD = @HAVE_PREAD@
HAVE_PTHREAD_SIGMASK = @HAVE_PTHREAD_SIGMASK@
HAVE_PTSNAME = @HAVE_PTSNAME@
HAVE_PTSNAME_R = @HAVE_PTSNAME_R@
HAVE_PWRITE = @HAVE_PWRITE@
HAVE_QSORT_R = @HAVE_QSORT_R@
HAVE_RAISE = @HAVE_RAISE@
HAVE_RANDOM = @HAVE_RANDOM@
HAVE_RANDOM_H = @HAVE_RANDOM_H@
HAVE_RANDOM_R = @HAVE_RANDOM_R@
HAVE_RAWMEMCHR = @HAVE_RAWMEMCHR@
HAVE_READLINK = @HAVE_READLINK@
HAVE_READLINKAT = @HAVE_READLINKAT@
HAVE_REALLOCARRAY = @HAVE_REALLOCARRAY@
HAVE_REALPATH = @HAVE_REALPATH@
HAVE_REMAINDER = @HAVE_REMAINDER@
HAVE_REMAINDERF = @HAVE_REMAINDERF@
HAVE_RENAMEAT = @HAVE_RENAMEAT@
HAVE_RINT = @HAVE_RINT@
HAVE_RINTL = @HAVE_RINTL@
HAVE_RPMATCH = @HAVE_RPMATCH@
HAVE_SAME_LONG_DOUBLE_AS_DOUBLE = @HAVE_SAME_LONG_DOUBLE_AS_DOUBLE@
HAVE_SCHED_H = @HAVE_SCHED_H@
HAVE_SECURE_GETENV = @HAVE_SECURE_GETENV@
HAVE_SETENV = @HAVE_SETENV@
HAVE_SETHOSTNAME = @HAVE_SETHOSTNAME@
HAVE_SIGACTION = @HAVE_SIGACTION@
HAVE_SIGHANDLER_T = @HAVE_SIGHANDLER_T@
HAVE_SIGINFO_T = @HAVE_SIGINFO_T@
HAVE_SIGNED_SIG_ATOMIC_T = @HAVE_SIGNED_SIG_ATOMIC_T@
HAVE_SIGNED_WCHAR_T = @HAVE_SIGNED_WCHAR_T@
HAVE_SIGNED_WINT_T = @HAVE_SIGNED_WINT_T@
HAVE_SIGSET_T = @HAVE_SIGSET_T@
HAVE_SINF = @HAVE_SINF@
HAVE_SINHF = @HAVE_SINHF@
HAVE_SINL = @HAVE_SINL@
HAVE_SLEEP = @HAVE_SLEEP@
HAVE_SPAWN_H = @HAVE_SPAWN_H@
HAVE_SQRTF = @HAVE_SQRTF@
HAVE_SQRTL = @HAVE_SQRTL@
HAVE_STDINT_H = @HAVE_STDINT_H@
HAVE_STPCPY = @HAVE_STPCPY@
HAVE_STPNCPY = @HAVE_STPNCPY@
HAVE_STRCASECMP = @HAVE_STRCASECMP@
HAVE_STRCASESTR = @HAVE_STRCASESTR@
HAVE_STRCHRNUL = @HAVE_STRCHRNUL@
HAVE_STRINGS_H = @HAVE_STRINGS_H@
HAVE_STRPBRK = @HAVE_STRPBRK@
HAVE_STRPTIME = @HAVE_STRPTIME@
HAVE_STRSEP = @HAVE_STRSEP@
HAVE_STRTOD = @HAVE_STRTOD@
HAVE_STRTOLL = @HAVE_STRTOLL@
HAVE_STRTOULL = @HAVE_STRTOULL@
HAVE_STRUCT_RANDOM_DATA = @HAVE_STRUCT_RANDOM_DATA@
HAVE_STRUCT_SCHED_PARAM = @HAVE_STRUCT_SCHED_PARAM@
HAVE_STRUCT_SIGACTION_SA_SIGACTION = @HAVE_STRUCT_SIGACTION_SA_SIGACTION@
HAVE_STRUCT_TIMEVAL = @HAVE_STRUCT_TIMEVAL@
HAVE_STRVERSCMP = @HAVE_STRVERSCMP@
HAVE_SYMLINK = @HAVE_SYMLINK@
HAVE_SYMLINKAT = @HAVE_SYMLINKAT@
HAVE_SYS_BITYPES_H = @HAVE_SYS_BITYPES_H@
HAVE_SYS_CDEFS_H = @HAVE_SYS_CDEFS_H@
HAVE_SYS_FILE_H = @HAVE_SYS_FILE_H@
HAVE_SYS_INTTYPES_H = @HAVE_SYS_INTTYPES_H@
HAVE_SYS_LOADAVG_H = @HAVE_SYS_LOADAVG_H@
HAVE_SYS_PARAM_H = @HAVE_SYS_PARAM_H@
HAVE_SYS_TIME_H = @HAVE_SYS_TIME_H@
HAVE_SYS_TYPES_H = @HAVE_SYS_TYPES_H@
HAVE_TANF = @HAVE_TANF@
HAVE_TANHF = @HAVE_TANHF@
HAVE_TANL = @HAVE_TANL@
HAVE_TIMEGM = @HAVE_TIMEGM@
HAVE_TIMEZONE_T = @HAVE_TIMEZONE_T@
HAVE_TYPE_VOLATILE_SIG_ATOMIC_T = @HAVE_TYPE_VOLATILE_SIG_ATOMIC_T@
HAVE_TZSET = @HAVE_TZSET@
HAVE_UNISTD_H = @HAVE_UNISTD_H@
HAVE_UNLINKAT = @HAVE_UNLINKAT@
HAVE_UNLOCKPT = @HAVE_UNLOCKPT@
HAVE_UNSIGNED_LONG_LONG_INT = @HAVE_UNSIGNED_LONG_LONG_INT@
HAVE_USLEEP = @HAVE_USLEEP@
HAVE_UTIMENSAT = @HAVE_UTIMENSAT@
HAVE_VASPRINTF = @HAVE_VASPRINTF@
HAVE_VDPRINTF = @HAVE_VDPRINTF@
HAVE_WCHAR_H = @HAVE_WCHAR_H@
HAVE_WCHAR_T = @HAVE_WCHAR_T@
HAVE_WCPCPY = @HAVE_WCPCPY@
HAVE_WCPNCPY = @HAVE_WCPNCPY@
HAVE_WCRTOMB = @HAVE_WCRTOMB@
HAVE_WCSCASECMP = @HAVE_WCSCASECMP@
HAVE_WCSCAT = @HAVE_WCSCAT@
HAVE_WCSCHR = @HAVE_WCSCHR@
HAVE_WCSCMP = @HAVE_WCSCMP@
HAVE_WCSCOLL = @HAVE_WCSCOLL@
HAVE_WCSCPY = @HAVE_WCSCPY@
HAVE_WCSCSPN = @HAVE_WCSCSPN@
HAVE_WCSDUP = @HAVE_WCSDUP@
HAVE_WCSFTIME = @HAVE_WCSFTIME@
HAVE_WCSLEN = @HAVE_WCSLEN@
HAVE_WCSNCASECMP = @HAVE_WCSNCASECMP@
HAVE_WCSNCAT = @HAVE_WCSNCAT@
HAVE_WCSNCMP = @HAVE_WCSNCMP@
HAVE_WCSNCPY = @HAVE_WCSNCPY@
HAVE_WCSNLEN = @HAVE_WCSNLEN@
HAVE_WCSNRTOMBS = @HAVE_WCSNRTOMBS@
HAVE_WCSPBRK = @HAVE_WCSPBRK@
HAVE_WCSRCHR = @HAVE_WCSRCHR@
HAVE_WCSRTOMBS = @HAVE_WCSRTOMBS@
HAVE_WCSSPN = @HAVE_WCSSPN@
HAVE_WCSSTR = @HAVE_WCSSTR@
HAVE_WCSTOK = @HAVE_WCSTOK@
HAVE_WCSWIDTH = @HAVE_WCSWIDTH@
HAVE_WCSXFRM = @HAVE_WCSXFRM@
HAVE_WCTRANS_T = @HAVE_WCTRANS_T@
HAVE_WCTYPE_H = @HAVE_WCTYPE_H@
HAVE_WCTYPE_T = @HAVE_WCTYPE_T@
HAVE_WINSOCK2_H = @HAVE_WINSOCK2_H@
HAVE_WINT_T = @HAVE_WINT_T@
HAVE_WMEMCHR = @HAVE_WMEMCHR@
HAVE_WMEMCMP = @HAVE_WMEMCMP@
HAVE_WMEMCPY = @HAVE_WMEMCPY@
HAVE_WMEMMOVE = @HAVE_WMEMMOVE@
HAVE_WMEMSET = @HAVE_WMEMSET@
HAVE_XLOCALE_H = @HAVE_XLOCALE_H@
HAVE__BOOL = @HAVE__BOOL@
HAVE__EXIT = @HAVE__EXIT@
HELP2MAN = @HELP2MAN@
INCLUDE_NEXT = @INCLUDE_NEXT@
INCLUDE_NEXT_AS_FIRST_DIRECTIVE = @INCLUDE_NEXT_AS_FIRST_DIRECTIVE@
INSTALL = @INSTALL@
INSTALL_DATA = @INSTALL_DATA@
INSTALL_PROGRAM = @INSTALL_PROGRAM@
INSTALL_SCRIPT = @INSTALL_SCRIPT@
INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
INT32_MAX_LT_INTMAX_MAX = @INT32_MAX_LT_INTMAX_MAX@
INT64_MAX_EQ_LONG_MAX = @INT64_MAX_EQ_LONG_MAX@
INTLLIBS = @INTLLIBS@
INTL_MACOSX_LIBS = @INTL_MACOSX_LIBS@
LD = @LD@
LDFLAGS = @LDFLAGS@
LEX = @LEX@
LEXLIB = @LEXLIB@
LEX_OUTPUT_ROOT = @LEX_OUTPUT_ROOT@
LIBGCRYPT = @LIBGCRYPT@
LIBGCRYPT_PREFIX = @LIBGCRYPT_PREFIX@
LIBICONV = @LIBICONV@
LIBINTL = @LIBINTL@
LIBMULTITHREAD = @LIBMULTITHREAD@
LIBOBJS = @LIBOBJS@
LIBPTH = @LIBPTH@
LIBPTH_PREFIX = @LIBPTH_PREFIX@
LIBREADLINE = @LIBREADLINE@
LIBS = @LIBS@
LIBTHREAD = @LIBTHREAD@
LIBTOOL = @LIBTOOL@
LIB_ACL = @LIB_ACL@
LIB_CLOCK_GETTIME = @LIB_CLOCK_GETTIME@
LIB_EACCESS = @LIB_EACCESS@
LIB_POSIX_SPAWN = @LIB_POSIX_SPAWN@
LIB_SELINUX = @LIB_SELINUX@
LIMITS_H = @LIMITS_H@
LIPO = @LIPO@
LN_S = @LN_S@
LOCALCHARSET_TESTS_ENVIRONMENT = @LOCALCHARSET_TESTS_ENVIRONMENT@
LOCALE_FR = @LOCALE_FR@
LOCALE_FR_UTF8 = @LOCALE_FR_UTF8@
LOCALE_JA = @LOCALE_JA@
LOCALE_ZH_CN = @LOCALE_ZH_CN@
LTALLOCA = @LTALLOCA@
LTLIBGCRYPT = @LTLIBGCRYPT@
LTLIBICONV = @LTLIBICONV@
LTLIBINTL = @LTLIBINTL@
LTLIBMULTITHREAD = @LTLIBMULTITHREAD@
LTLIBOBJS = @LTLIBOBJS@
LTLIBPTH = @LTLIBPTH@
LTLIBREADLINE = @LTLIBREADLINE@
LTLIBTHREAD = @LTLIBTHREAD@
LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@
MAKEINFO = @MAKEINFO@
MANIFEST_TOOL = @MANIFEST_TOOL@
MDBLIBS = @MDBLIBS@
MDB_DATETIME = @MDB_DATETIME@
MKDIR_P = @MKDIR_P@
MSGFMT = @MSGFMT@
MSGFMT_015 = @MSGFMT_015@
MSGMERGE = @MSGMERGE@
NEXT_AS_FIRST_DIRECTIVE_ERRNO_H = @NEXT_AS_FIRST_DIRECTIVE_ERRNO_H@
NEXT_AS_FIRST_DIRECTIVE_FCNTL_H = @NEXT_AS_FIRST_DIRECTIVE_FCNTL_H@
NEXT_AS_FIRST_DIRECTIVE_FLOAT_H = @NEXT_AS_FIRST_DIRECTIVE_FLOAT_H@
NEXT_AS_FIRST_DIRECTIVE_GETOPT_H = @NEXT_AS_FIRST_DIRECTIVE_GETOPT_H@
NEXT_AS_FIRST_DIRECTIVE_INTTYPES_H = @NEXT_AS_FIRST_DIRECTIVE_INTTYPES_H@
NEXT_AS_FIRST_DIRECTIVE_LANGINFO_H = @NEXT_AS_FIRST_DIRECTIVE_LANGINFO_H@
NEXT_AS_FIRST_DIRECTIVE_LIMITS_H = @NEXT_AS_FIRST_DIRECTIVE_LIMITS_H@
NEXT_AS_FIRST_DIRECTIVE_LOCALE_H = @NEXT_AS_FIRST_DIRECTIVE_LOCALE_H@
NEXT_AS_FIRST_DIRECTIVE_MATH_H = @NEXT_AS_FIRST_DIRECTIVE_MATH_H@
NEXT_AS_FIRST_DIRECTIVE_SCHED_H = @NEXT_AS_FIRST_DIRECTIVE_SCHED_H@
NEXT_AS_FIRST_DIRECTIVE_SELINUX_SELINUX_H = @NEXT_AS_FIRST_DIRECTIVE_SELINUX_SELINUX_H@
NEXT_AS_FIRST_DIRECTIVE_SIGNAL_H = @NEXT_AS_FIRST_DIRECTIVE_SIGNAL_H@
NEXT_AS_FIRST_DIRECTIVE_SPAWN_H = @NEXT_AS_FIRST_DIRECTIVE_SPAWN_H@
NEXT_AS_FIRST_DIRECTIVE_STDARG_H = @NEXT_AS_FIRST_DIRECTIVE_STDARG_H@
NEXT_AS_FIRST_DIRECTIVE_STDDEF_H = @NEXT_AS_FIRST_DIRECTIVE_STDDEF_H@
NEXT_AS_FIRST_DIRECTIVE_STDINT_H = @NEXT_AS_FIRST_DIRECTIVE_STDINT_H@
NEXT_AS_FIRST_DIRECTIVE_STDIO_H = @NEXT_AS_FIRST_DIRECTIVE_STDIO_H@
NEXT_AS_FIRST_DIRECTIVE_STDLIB_H = @NEXT_AS_FIRST_DIRECTIVE_STDLIB_H@
NEXT_AS_FIRST_DIRECTIVE_STRINGS_H = @NEXT_AS_FIRST_DIRECTIVE_STRINGS_H@
NEXT_AS_FIRST_DIRECTIVE_STRING_H = @NEXT_AS_FIRST_DIRECTIVE_STRING_H@
NEXT_AS_FIRST_DIRECTIVE_SYS_FILE_H = @NEXT_AS_FIRST_DIRECTIVE_SYS_FILE_H@
NEXT_AS_FIRST_DIRECTIVE_SYS_STAT_H = @NEXT_AS_FIRST_DIRECTIVE_SYS_STAT_H@
NEXT_AS_FIRST_DIRECTIVE_SYS_TIME_H = @NEXT_AS_FIRST_DIRECTIVE_SYS_TIME_H@
NEXT_AS_FIRST_DIRECTIVE_SYS_TYPES_H = @NEXT_AS_FIRST_DIRECTIVE_SYS_TYPES_H@
NEXT_AS_FIRST_DIRECTIVE_SYS_WAIT_H = @NEXT_AS_FIRST_DIRECTIVE_SYS_WAIT_H@
NEXT_AS_FIRST_DIRECTIVE_TIME_H = @NEXT_AS_FIRST_DIRECTIVE_TIME_H@
NEXT_AS_FIRST_DIRECTIVE_UNISTD_H = @NEXT_AS_FIRST_DIRECTIVE_UNISTD_H@
NEXT_AS_FIRST_DIRECTIVE_WCHAR_H = @NEXT_AS_FIRST_DIRECTIVE_WCHAR_H@
NEXT_AS_FIRST_DIRECTIVE_WCTYPE_H = @NEXT_AS_FIRST_DIRECTIVE_WCTYPE_H@
NEXT_ERRNO_H = @NEXT_ERRNO_H@
NEXT_FCNTL_H = @NEXT_FCNTL_H@
NEXT_FLOAT_H = @NEXT_FLOAT_H@
NEXT_GETOPT_H = @NEXT_GETOPT_H@
NEXT_INTTYPES_H = @NEXT_INTTYPES_H@
NEXT_LANGINFO_H = @NEXT_LANGINFO_H@
NEXT_LIMITS_H = @NEXT_LIMITS_H@
NEXT_LOCALE_H = @NEXT_LOCALE_H@
NEXT_MATH_H = @NEXT_MATH_H@
NEXT_SCHED_H = @NEXT_SCHED_H@
NEXT_SELINUX_SELINUX_H = @NEXT_SELINUX_SELINUX_H@
NEXT_SIGNAL_H = @NEXT_SIGNAL_H@
NEXT_SPAWN_H = @NEXT_SPAWN_H@
NEXT_STDARG_H = @NEXT_STDARG_H@
NEXT_STDDEF_H = @NEXT_STDDEF_H@
NEXT_STDINT_H = @NEXT_STDINT_H@
NEXT_STDIO_H = @NEXT_STDIO_H@
NEXT_STDLIB_H = @NEXT_STDLIB_H@
NEXT_STRINGS_H = @NEXT_STRINGS_H@
NEXT_STRING_H = @NEXT_STRING_H@
NEXT_SYS_FILE_H = @NEXT_SYS_FILE_H@
NEXT_SYS_STAT_H = @NEXT_SYS_STAT_H@
NEXT_SYS_TIME_H = @NEXT_SYS_TIME_H@
NEXT_SYS_TYPES_H = @NEXT_SYS_TYPES_H@
NEXT_SYS_WAIT_H = @NEXT_SYS_WAIT_H@
NEXT_TIME_H = @NEXT_TIME_H@
NEXT_UNISTD_H = @NEXT_UNISTD_H@
NEXT_WCHAR_H = @NEXT_WCHAR_H@
NEXT_WCTYPE_H = @NEXT_WCTYPE_H@
NM = @NM@
NMEDIT = @NMEDIT@
OBJDUMP = @OBJDUMP@
OBJEXT = @OBJEXT@
OTOOL = @OTOOL@
OTOOL64 = @OTOOL64@
PACKAGE = @PACKAGE@
PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
PACKAGE_NAME = @PACKAGE_NAME@
PACKAGE_STRING = @PACKAGE_STRING@
PACKAGE_TARNAME = @PACKAGE_TARNAME@
PACKAGE_URL = @PACKAGE_URL@
PACKAGE_VERSION = @PACKAGE_VERSION@
PATH_SEPARATOR = @PATH_SEPARATOR@
PKG_CONFIG = @PKG_CONFIG@
PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
POSUB = @POSUB@
PRAGMA_COLUMNS = @PRAGMA_COLUMNS@
PRAGMA_SYSTEM_HEADER = @PRAGMA_SYSTEM_HEADER@
PRIPTR_PREFIX = @PRIPTR_PREFIX@
PRI_MACROS_BROKEN = @PRI_MACROS_BROKEN@
PTHREAD_H_DEFINES_STRUCT_TIMESPEC = @PTHREAD_H_DEFINES_STRUCT_TIMESPEC@
PTRDIFF_T_SUFFIX = @PTRDIFF_T_SUFFIX@
RANLIB = @RANLIB@
REPLACE_ACOSF = @REPLACE_ACOSF@
REPLACE_ASINF = @REPLACE_ASINF@
REPLACE_ATAN2F = @REPLACE_ATAN2F@
REPLACE_ATANF = @REPLACE_ATANF@
REPLACE_BTOWC = @REPLACE_BTOWC@
REPLACE_CALLOC = @REPLACE_CALLOC@
REPLACE_CANONICALIZE_FILE_NAME = @REPLACE_CANONICALIZE_FILE_NAME@
REPLACE_CBRTF = @REPLACE_CBRTF@
REPLACE_CBRTL = @REPLACE_CBRTL@
REPLACE_CEIL = @REPLACE_CEIL@
REPLACE_CEILF = @REPLACE_CEILF@
REPLACE_CEILL = @REPLACE_CEILL@
REPLACE_CHOWN = @REPLACE_CHOWN@
REPLACE_CLOSE = @REPLACE_CLOSE@
REPLACE_COSF = @REPLACE_COSF@
REPLACE_COSHF = @REPLACE_COSHF@
REPLACE_CTIME = @REPLACE_CTIME@
REPLACE_DPRINTF = @REPLACE_DPRINTF@
REPLACE_DUP = @REPLACE_DUP@
REPLACE_DUP2 = @REPLACE_DUP2@
REPLACE_DUPLOCALE = @REPLACE_DUPLOCALE@
REPLACE_EXP2 = @REPLACE_EXP2@
REPLACE_EXP2L = @REPLACE_EXP2L@
REPLACE_EXPF = @REPLACE_EXPF@
REPLACE_EXPM1 = @REPLACE_EXPM1@
REPLACE_EXPM1F = @REPLACE_EXPM1F@
REPLACE_FABSL = @REPLACE_FABSL@
REPLACE_FACCESSAT = @REPLACE_FACCESSAT@
REPLACE_FCHOWNAT = @REPLACE_FCHOWNAT@
REPLACE_FCLOSE = @REPLACE_FCLOSE@
REPLACE_FCNTL = @REPLACE_FCNTL@
REPLACE_FDOPEN = @REPLACE_FDOPEN@
REPLACE_FFLUSH = @REPLACE_FFLUSH@
REPLACE_FLOOR = @REPLACE_FLOOR@
REPLACE_FLOORF = @REPLACE_FLOORF@
REPLACE_FLOORL = @REPLACE_FLOORL@
REPLACE_FMA = @REPLACE_FMA@
REPLACE_FMAF = @REPLACE_FMAF@
REPLACE_FMAL = @REPLACE_FMAL@
REPLACE_FMOD = @REPLACE_FMOD@
REPLACE_FMODF = @REPLACE_FMODF@
REPLACE_FMODL = @REPLACE_FMODL@
REPLACE_FOPEN = @REPLACE_FOPEN@
REPLACE_FPRINTF = @REPLACE_FPRINTF@
REPLACE_FPURGE = @REPLACE_FPURGE@
REPLACE_FREELOCALE = @REPLACE_FREELOCALE@
REPLACE_FREOPEN = @REPLACE_FREOPEN@
REPLACE_FREXP = @REPLACE_FREXP@
REPLACE_FREXPF = @REPLACE_FREXPF@
REPLACE_FREXPL = @REPLACE_FREXPL@
REPLACE_FSEEK = @REPLACE_FSEEK@
REPLACE_FSEEKO = @REPLACE_FSEEKO@
REPLACE_FSTAT = @REPLACE_FSTAT@
REPLACE_FSTATAT = @REPLACE_FSTATAT@
REPLACE_FTELL = @REPLACE_FTELL@
REPLACE_FTELLO = @REPLACE_FTELLO@
REPLACE_FTRUNCATE = @REPLACE_FTRUNCATE@
REPLACE_FUTIMENS = @REPLACE_FUTIMENS@
REPLACE_GETCWD = @REPLACE_GETCWD@
REPLACE_GETDELIM = @REPLACE_GETDELIM@
REPLACE_GETDOMAINNAME = @REPLACE_GETDOMAINNAME@
REPLACE_GETDTABLESIZE = @REPLACE_GETDTABLESIZE@
REPLACE_GETGROUPS = @REPLACE_GETGROUPS@
REPLACE_GETLINE = @REPLACE_GETLINE@
REPLACE_GETLOGIN_R = @REPLACE_GETLOGIN_R@
REPLACE_GETPAGESIZE = @REPLACE_GETPAGESIZE@
REPLACE_GETPASS = @REPLACE_GETPASS@
REPLACE_GETTIMEOFDAY = @REPLACE_GETTIMEOFDAY@
REPLACE_GMTIME = @REPLACE_GMTIME@
REPLACE_HUGE_VAL = @REPLACE_HUGE_VAL@
REPLACE_HYPOT = @REPLACE_HYPOT@
REPLACE_HYPOTF = @REPLACE_HYPOTF@
REPLACE_HYPOTL = @REPLACE_HYPOTL@
REPLACE_ILOGB = @REPLACE_ILOGB@
REPLACE_ILOGBF = @REPLACE_ILOGBF@
REPLACE_ILOGBL = @REPLACE_ILOGBL@
REPLACE_ISATTY = @REPLACE_ISATTY@
REPLACE_ISFINITE = @REPLACE_ISFINITE@
REPLACE_ISINF = @REPLACE_ISINF@
REPLACE_ISNAN = @REPLACE_ISNAN@
REPLACE_ISWBLANK = @REPLACE_ISWBLANK@
REPLACE_ISWCNTRL = @REPLACE_ISWCNTRL@
REPLACE_ITOLD = @REPLACE_ITOLD@
REPLACE_LCHOWN = @REPLACE_LCHOWN@
REPLACE_LDEXPL = @REPLACE_LDEXPL@
REPLACE_LINK = @REPLACE_LINK@
REPLACE_LINKAT = @REPLACE_LINKAT@
REPLACE_LOCALECONV = @REPLACE_LOCALECONV@
REPLACE_LOCALTIME = @REPLACE_LOCALTIME@
REPLACE_LOCALTIME_R = @REPLACE_LOCALTIME_R@
REPLACE_LOG = @REPLACE_LOG@
REPLACE_LOG10 = @REPLACE_LOG10@
REPLACE_LOG10F = @REPLACE_LOG10F@
REPLACE_LOG10L = @REPLACE_LOG10L@
REPLACE_LOG1P = @REPLACE_LOG1P@
REPLACE_LOG1PF = @REPLACE_LOG1PF@
REPLACE_LOG1PL = @REPLACE_LOG1PL@
REPLACE_LOG2 = @REPLACE_LOG2@
REPLACE_LOG2F = @REPLACE_LOG2F@
REPLACE_LOG2L = @REPLACE_LOG2L@
REPLACE_LOGB = @REPLACE_LOGB@
REPLACE_LOGBF = @REPLACE_LOGBF@
REPLACE_LOGBL = @REPLACE_LOGBL@
REPLACE_LOGF = @REPLACE_LOGF@
REPLACE_LOGL = @REPLACE_LOGL@
REPLACE_LSEEK = @REPLACE_LSEEK@
REPLACE_LSTAT = @REPLACE_LSTAT@
REPLACE_MALLOC = @REPLACE_MALLOC@
REPLACE_MBRLEN = @REPLACE_MBRLEN@
REPLACE_MBRTOWC = @REPLACE_MBRTOWC@
REPLACE_MBSINIT = @REPLACE_MBSINIT@
REPLACE_MBSNRTOWCS = @REPLACE_MBSNRTOWCS@
REPLACE_MBSRTOWCS = @REPLACE_MBSRTOWCS@
REPLACE_MBSTATE_T = @REPLACE_MBSTATE_T@
REPLACE_MBTOWC = @REPLACE_MBTOWC@
REPLACE_MEMCHR = @REPLACE_MEMCHR@
REPLACE_MEMMEM = @REPLACE_MEMMEM@
REPLACE_MKDIR = @REPLACE_MKDIR@
REPLACE_MKFIFO = @REPLACE_MKFIFO@
REPLACE_MKNOD = @REPLACE_MKNOD@
REPLACE_MKSTEMP = @REPLACE_MKSTEMP@
REPLACE_MKTIME = @REPLACE_MKTIME@
REPLACE_MODF = @REPLACE_MODF@
REPLACE_MODFF = @REPLACE_MODFF@
REPLACE_MODFL = @REPLACE_MODFL@
REPLACE_NAN = @REPLACE_NAN@
REPLACE_NANOSLEEP = @REPLACE_NANOSLEEP@
REPLACE_NEWLOCALE = @REPLACE_NEWLOCALE@
REPLACE_NL_LANGINFO = @REPLACE_NL_LANGINFO@
REPLACE_NULL = @REPLACE_NULL@
REPLACE_OBSTACK_PRINTF = @REPLACE_OBSTACK_PRINTF@
REPLACE_OPEN = @REPLACE_OPEN@
REPLACE_OPENAT = @REPLACE_OPENAT@
REPLACE_PERROR = @REPLACE_PERROR@
REPLACE_POPEN = @REPLACE_POPEN@
REPLACE_POSIX_SPAWN = @REPLACE_POSIX_SPAWN@
REPLACE_POSIX_SPAWN_FILE_ACTIONS_ADDCHDIR = @REPLACE_POSIX_SPAWN_FILE_ACTIONS_ADDCHDIR@
REPLACE_POSIX_SPAWN_FILE_ACTIONS_ADDCLOSE = @REPLACE_POSIX_SPAWN_FILE_ACTIONS_ADDCLOSE@
REPLACE_POSIX_SPAWN_FILE_ACTIONS_ADDDUP2 = @REPLACE_POSIX_SPAWN_FILE_ACTIONS_ADDDUP2@
REPLACE_POSIX_SPAWN_FILE_ACTIONS_ADDOPEN = @REPLACE_POSIX_SPAWN_FILE_ACTIONS_ADDOPEN@
REPLACE_PREAD = @REPLACE_PREAD@
REPLACE_PRINTF = @REPLACE_PRINTF@
REPLACE_PTHREAD_SIGMASK = @REPLACE_PTHREAD_SIGMASK@
REPLACE_PTSNAME = @REPLACE_PTSNAME@
REPLACE_PTSNAME_R = @REPLACE_PTSNAME_R@
REPLACE_PUTENV = @REPLACE_PUTENV@
REPLACE_PWRITE = @REPLACE_PWRITE@
REPLACE_QSORT_R = @REPLACE_QSORT_R@
REPLACE_RAISE = @REPLACE_RAISE@
REPLACE_RANDOM_R = @REPLACE_RANDOM_R@
REPLACE_READ = @REPLACE_READ@
REPLACE_READLINK = @REPLACE_READLINK@
REPLACE_READLINKAT = @REPLACE_READLINKAT@
REPLACE_REALLOC = @REPLACE_REALLOC@
REPLACE_REALPATH = @REPLACE_REALPATH@
REPLACE_REMAINDER = @REPLACE_REMAINDER@
REPLACE_REMAINDERF = @REPLACE_REMAINDERF@
REPLACE_REMAINDERL = @REPLACE_REMAINDERL@
REPLACE_REMOVE = @REPLACE_REMOVE@
REPLACE_RENAME = @REPLACE_RENAME@
REPLACE_RENAMEAT = @REPLACE_RENAMEAT@
REPLACE_RMDIR = @REPLACE_RMDIR@
REPLACE_ROUND = @REPLACE_ROUND@
REPLACE_ROUNDF = @REPLACE_ROUNDF@
REPLACE_ROUNDL = @REPLACE_ROUNDL@
REPLACE_SETENV = @REPLACE_SETENV@
REPLACE_SETLOCALE = @REPLACE_SETLOCALE@
REPLACE_SIGNBIT = @REPLACE_SIGNBIT@
REPLACE_SIGNBIT_USING_GCC = @REPLACE_SIGNBIT_USING_GCC@
REPLACE_SINF = @REPLACE_SINF@
REPLACE_SINHF = @REPLACE_SINHF@
REPLACE_SLEEP = @REPLACE_SLEEP@
REPLACE_SNPRINTF = @REPLACE_SNPRINTF@
REPLACE_SPRINTF = @REPLACE_SPRINTF@
REPLACE_SQRTF = @REPLACE_SQRTF@
REPLACE_SQRTL = @REPLACE_SQRTL@
REPLACE_STAT = @REPLACE_STAT@
REPLACE_STDIO_READ_FUNCS = @REPLACE_STDIO_READ_FUNCS@
REPLACE_STDIO_WRITE_FUNCS = @REPLACE_STDIO_WRITE_FUNCS@
REPLACE_STPNCPY = @REPLACE_STPNCPY@
REPLACE_STRCASESTR = @REPLACE_STRCASESTR@
REPLACE_STRCHRNUL = @REPLACE_STRCHRNUL@
REPLACE_STRDUP = @REPLACE_STRDUP@
REPLACE_STRERROR = @REPLACE_STRERROR@
REPLACE_STRERROR_R = @REPLACE_STRERROR_R@
REPLACE_STRFTIME = @REPLACE_STRFTIME@
REPLACE_STRNCAT = @REPLACE_STRNCAT@
REPLACE_STRNDUP = @REPLACE_STRNDUP@
REPLACE_STRNLEN = @REPLACE_STRNLEN@
REPLACE_STRSIGNAL = @REPLACE_STRSIGNAL@
REPLACE_STRSTR = @REPLACE_STRSTR@
REPLACE_STRTOD = @REPLACE_STRTOD@
REPLACE_STRTOIMAX = @REPLACE_STRTOIMAX@
REPLACE_STRTOK_R = @REPLACE_STRTOK_R@
REPLACE_STRTOUMAX = @REPLACE_STRTOUMAX@
REPLACE_STRUCT_LCONV = @REPLACE_STRUCT_LCONV@
REPLACE_STRUCT_TIMEVAL = @REPLACE_STRUCT_TIMEVAL@
REPLACE_SYMLINK = @REPLACE_SYMLINK@
REPLACE_SYMLINKAT = @REPLACE_SYMLINKAT@
REPLACE_TANF = @REPLACE_TANF@
REPLACE_TANHF = @REPLACE_TANHF@
REPLACE_TIMEGM = @REPLACE_TIMEGM@
REPLACE_TMPFILE = @REPLACE_TMPFILE@
REPLACE_TOWLOWER = @REPLACE_TOWLOWER@
REPLACE_TRUNC = @REPLACE_TRUNC@
REPLACE_TRUNCATE = @REPLACE_TRUNCATE@
REPLACE_TRUNCF = @REPLACE_TRUNCF@
REPLACE_TRUNCL = @REPLACE_TRUNCL@
REPLACE_TTYNAME_R = @REPLACE_TTYNAME_R@
REPLACE_TZSET = @REPLACE_TZSET@
REPLACE_UNLINK = @REPLACE_UNLINK@
REPLACE_UNLINKAT = @REPLACE_UNLINKAT@
REPLACE_UNSETENV = @REPLACE_UNSETENV@
REPLACE_USLEEP = @REPLACE_USLEEP@
REPLACE_UTIMENSAT = @REPLACE_UTIMENSAT@
REPLACE_VASPRINTF = @REPLACE_VASPRINTF@
REPLACE_VDPRINTF = @REPLACE_VDPRINTF@
REPLACE_VFPRINTF = @REPLACE_VFPRINTF@
REPLACE_VPRINTF = @REPLACE_VPRINTF@
REPLACE_VSNPRINTF = @REPLACE_VSNPRINTF@
REPLACE_VSPRINTF = @REPLACE_VSPRINTF@
REPLACE_WCRTOMB = @REPLACE_WCRTOMB@
REPLACE_WCSFTIME = @REPLACE_WCSFTIME@
REPLACE_WCSNRTOMBS = @REPLACE_WCSNRTOMBS@
REPLACE_WCSRTOMBS = @REPLACE_WCSRTOMBS@
REPLACE_WCSWIDTH = @REPLACE_WCSWIDTH@
REPLACE_WCTOB = @REPLACE_WCTOB@
REPLACE_WCTOMB = @REPLACE_WCTOMB@
REPLACE_WCWIDTH = @REPLACE_WCWIDTH@
REPLACE_WRITE = @REPLACE_WRITE@
SCHED_H = @SCHED_H@
SED = @SED@
SELINUX_CONTEXT_H = @SELINUX_CONTEXT_H@
SET_MAKE = @SET_MAKE@
SHELL = @SHELL@
SIG_ATOMIC_T_SUFFIX = @SIG_ATOMIC_T_SUFFIX@
SIZE_T_SUFFIX = @SIZE_T_SUFFIX@
STDARG_H = @STDARG_H@
STDBOOL_H = @STDBOOL_H@
STDDEF_H = @STDDEF_H@
STDINT_H = @STDINT_H@
STRIP = @STRIP@
SYS_TIME_H_DEFINES_STRUCT_TIMESPEC = @SYS_TIME_H_DEFINES_STRUCT_TIMESPEC@
TIME_H_DEFINES_STRUCT_TIMESPEC = @TIME_H_DEFINES_STRUCT_TIMESPEC@
UINT32_MAX_LT_UINTMAX_MAX = @UINT32_MAX_LT_UINTMAX_MAX@
UINT64_MAX_EQ_ULONG_MAX = @UINT64_MAX_EQ_ULONG_MAX@
UNDEFINE_STRTOK_R = @UNDEFINE_STRTOK_R@
UNISTD_H_DEFINES_STRUCT_TIMESPEC = @UNISTD_H_DEFINES_STRUCT_TIMESPEC@
UNISTD_H_HAVE_WINSOCK2_H = @UNISTD_H_HAVE_WINSOCK2_H@
UNISTD_H_HAVE_WINSOCK2_H_AND_USE_SOCKETS = @UNISTD_H_HAVE_WINSOCK2_H_AND_USE_SOCKETS@
USE_ACL = @USE_ACL@
USE_NLS = @USE_NLS@
UUIDLIBS = @UUIDLIBS@
VERSION = @VERSION@
WCHAR_T_SUFFIX = @WCHAR_T_SUFFIX@
WINDOWS_64_BIT_OFF_T = @WINDOWS_64_BIT_OFF_T@
WINDOWS_64_BIT_ST_SIZE = @WINDOWS_64_BIT_ST_SIZE@
WINDOWS_STAT_INODES = @WINDOWS_STAT_INODES@
WINDOWS_STAT_TIMESPEC = @WINDOWS_STAT_TIMESPEC@
WINT_T_SUFFIX = @WINT_T_SUFFIX@
XGETTEXT = @XGETTEXT@
XGETTEXT_015 = @XGETTEXT_015@
XGETTEXT_EXTRA_OPTIONS = @XGETTEXT_EXTRA_OPTIONS@
YACC = @YACC@
YFLAGS = @YFLAGS@
abs_builddir = @abs_builddir@
abs_srcdir = @abs_srcdir@
abs_top_builddir = @abs_top_builddir@
abs_top_srcdir = @abs_top_srcdir@
ac_ct_AR = @ac_ct_AR@
ac_ct_CC = @ac_ct_CC@
ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
am__include = @am__include@
am__leading_dot = @am__leading_dot@
am__quote = @am__quote@
am__tar = @am__tar@
am__untar = @am__untar@
bindir = @bindir@
build = @build@
build_alias = @build_alias@
build_cpu = @build_cpu@
build_os = @build_os@
build_vendor = @build_vendor@
builddir = @builddir@
crypt_support = @crypt_support@
datadir = @datadir@
datarootdir = @datarootdir@
docdir = @docdir@
dvidir = @dvidir@
exec_prefix = @exec_prefix@
gl_LIBOBJS = @gl_LIBOBJS@
gl_LTLIBOBJS = @gl_LTLIBOBJS@
gltests_LIBOBJS = @gltests_LIBOBJS@
gltests_LTLIBOBJS = @gltests_LTLIBOBJS@
gltests_WITNESS = @gltests_WITNESS@
have_uuid = @have_uuid@
host = @host@
host_alias = @host_alias@
host_cpu = @host_cpu@
host_os = @host_os@
host_vendor = @host_vendor@
htmldir = @htmldir@
includedir = @includedir@
infodir = @infodir@
install_sh = @install_sh@
libdir = @libdir@
libexecdir = @libexecdir@
localedir = @localedir@
localstatedir = @localstatedir@
mandir = @mandir@
mkdir_p = @mkdir_p@
oldincludedir = @oldincludedir@
pdfdir = @pdfdir@
prefix = @prefix@
program_transform_name = @program_transform_name@
psdir = @psdir@
runstatedir = @runstatedir@
sbindir = @sbindir@
sharedstatedir = @sharedstatedir@
srcdir = @srcdir@
sysconfdir = @sysconfdir@
target_alias = @target_alias@
top_build_prefix = @top_build_prefix@
top_builddir = @top_builddir@
top_srcdir = @top_srcdir@
info_TEXINFOS = recutils.texi rec-mode.texi
recutils_TEXINFOS = parse-datetime.texi fdl.texi
all: all-am
.SUFFIXES:
.SUFFIXES: .dvi .html .info .pdf .ps .texi
$(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps)
@for dep in $?; do \
case '$(am__configure_deps)' in \
*$$dep*) \
( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
&& { if test -f $@; then exit 0; else break; fi; }; \
exit 1;; \
esac; \
done; \
echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu doc/Makefile'; \
$(am__cd) $(top_srcdir) && \
$(AUTOMAKE) --gnu doc/Makefile
Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
@case '$?' in \
*config.status*) \
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
*) \
echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
esac;
$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
$(top_srcdir)/configure: $(am__configure_deps)
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
$(ACLOCAL_M4): $(am__aclocal_m4_deps)
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
$(am__aclocal_m4_deps):
mostlyclean-libtool:
-rm -f *.lo
clean-libtool:
-rm -rf .libs _libs
.texi.info:
$(AM_V_MAKEINFO)restore=: && backupdir="$(am__leading_dot)am$$$$" && \
am__cwd=`pwd` && $(am__cd) $(srcdir) && \
rm -rf $$backupdir && mkdir $$backupdir && \
if ($(MAKEINFO) --version) >/dev/null 2>&1; then \
for f in $@ $@-[0-9] $@-[0-9][0-9] $(@:.info=).i[0-9] $(@:.info=).i[0-9][0-9]; do \
if test -f $$f; then mv $$f $$backupdir; restore=mv; else :; fi; \
done; \
else :; fi && \
cd "$$am__cwd"; \
if $(MAKEINFO) $(AM_MAKEINFOFLAGS) $(MAKEINFOFLAGS) -I $(srcdir) \
-o $@ $<; \
then \
rc=0; \
$(am__cd) $(srcdir); \
else \
rc=$$?; \
$(am__cd) $(srcdir) && \
$$restore $$backupdir/* `echo "./$@" | sed 's|[^/]*$$||'`; \
fi; \
rm -rf $$backupdir; exit $$rc
.texi.dvi:
$(AM_V_TEXI2DVI)TEXINPUTS="$(am__TEXINFO_TEX_DIR)$(PATH_SEPARATOR)$$TEXINPUTS" \
MAKEINFO='$(MAKEINFO) $(AM_MAKEINFOFLAGS) $(MAKEINFOFLAGS) -I $(srcdir)' \
$(TEXI2DVI) $(AM_V_texinfo) --build-dir=$(@:.dvi=.t2d) -o $@ $(AM_V_texidevnull) \
$<
.texi.pdf:
$(AM_V_TEXI2PDF)TEXINPUTS="$(am__TEXINFO_TEX_DIR)$(PATH_SEPARATOR)$$TEXINPUTS" \
MAKEINFO='$(MAKEINFO) $(AM_MAKEINFOFLAGS) $(MAKEINFOFLAGS) -I $(srcdir)' \
$(TEXI2PDF) $(AM_V_texinfo) --build-dir=$(@:.pdf=.t2p) -o $@ $(AM_V_texidevnull) \
$<
.texi.html:
$(AM_V_MAKEINFO)rm -rf $(@:.html=.htp)
$(AM_V_at)if $(MAKEINFOHTML) $(AM_MAKEINFOHTMLFLAGS) $(MAKEINFOFLAGS) -I $(srcdir) \
-o $(@:.html=.htp) $<; \
then \
rm -rf $@ && mv $(@:.html=.htp) $@; \
else \
rm -rf $(@:.html=.htp); exit 1; \
fi
$(srcdir)/recutils.info: recutils.texi $(srcdir)/version.texi $(recutils_TEXINFOS)
recutils.dvi: recutils.texi $(srcdir)/version.texi $(recutils_TEXINFOS)
recutils.pdf: recutils.texi $(srcdir)/version.texi $(recutils_TEXINFOS)
recutils.html: recutils.texi $(srcdir)/version.texi $(recutils_TEXINFOS)
$(srcdir)/version.texi: $(srcdir)/stamp-vti
$(srcdir)/stamp-vti: recutils.texi $(top_srcdir)/configure
@(dir=.; test -f ./recutils.texi || dir=$(srcdir); \
set `$(SHELL) $(top_srcdir)/build-aux/mdate-sh $$dir/recutils.texi`; \
echo "@set UPDATED $$1 $$2 $$3"; \
echo "@set UPDATED-MONTH $$2 $$3"; \
echo "@set EDITION $(VERSION)"; \
echo "@set VERSION $(VERSION)") > vti.tmp$$$$ && \
(cmp -s vti.tmp$$$$ $(srcdir)/version.texi \
|| (echo "Updating $(srcdir)/version.texi" && \
cp vti.tmp$$$$ $(srcdir)/version.texi.tmp$$$$ && \
mv $(srcdir)/version.texi.tmp$$$$ $(srcdir)/version.texi)) && \
rm -f vti.tmp$$$$ $(srcdir)/version.texi.$$$$
@cp $(srcdir)/version.texi $@
mostlyclean-vti:
-rm -f vti.tmp* $(srcdir)/version.texi.tmp*
maintainer-clean-vti:
-rm -f $(srcdir)/stamp-vti $(srcdir)/version.texi
$(srcdir)/rec-mode.info: rec-mode.texi $(srcdir)/version-rec-mode.texi
rec-mode.dvi: rec-mode.texi $(srcdir)/version-rec-mode.texi
rec-mode.pdf: rec-mode.texi $(srcdir)/version-rec-mode.texi
rec-mode.html: rec-mode.texi $(srcdir)/version-rec-mode.texi
$(srcdir)/version-rec-mode.texi: $(srcdir)/stamp-1
$(srcdir)/stamp-1: rec-mode.texi $(top_srcdir)/configure
@(dir=.; test -f ./rec-mode.texi || dir=$(srcdir); \
set `$(SHELL) $(top_srcdir)/build-aux/mdate-sh $$dir/rec-mode.texi`; \
echo "@set UPDATED $$1 $$2 $$3"; \
echo "@set UPDATED-MONTH $$2 $$3"; \
echo "@set EDITION $(VERSION)"; \
echo "@set VERSION $(VERSION)") > 1.tmp$$$$ && \
(cmp -s 1.tmp$$$$ $(srcdir)/version-rec-mode.texi \
|| (echo "Updating $(srcdir)/version-rec-mode.texi" && \
cp 1.tmp$$$$ $(srcdir)/version-rec-mode.texi.tmp$$$$ && \
mv $(srcdir)/version-rec-mode.texi.tmp$$$$ $(srcdir)/version-rec-mode.texi)) && \
rm -f 1.tmp$$$$ $(srcdir)/version-rec-mode.texi.$$$$
@cp $(srcdir)/version-rec-mode.texi $@
mostlyclean-1:
-rm -f 1.tmp* $(srcdir)/version-rec-mode.texi.tmp*
maintainer-clean-1:
-rm -f $(srcdir)/stamp-1 $(srcdir)/version-rec-mode.texi
.dvi.ps:
$(AM_V_DVIPS)TEXINPUTS="$(am__TEXINFO_TEX_DIR)$(PATH_SEPARATOR)$$TEXINPUTS" \
$(DVIPS) $(AM_V_texinfo) -o $@ $<
uninstall-dvi-am:
@$(NORMAL_UNINSTALL)
@list='$(DVIS)'; test -n "$(dvidir)" || list=; \
for p in $$list; do \
$(am__strip_dir) \
echo " rm -f '$(DESTDIR)$(dvidir)/$$f'"; \
rm -f "$(DESTDIR)$(dvidir)/$$f"; \
done
uninstall-html-am:
@$(NORMAL_UNINSTALL)
@list='$(HTMLS)'; test -n "$(htmldir)" || list=; \
for p in $$list; do \
$(am__strip_dir) \
echo " rm -rf '$(DESTDIR)$(htmldir)/$$f'"; \
rm -rf "$(DESTDIR)$(htmldir)/$$f"; \
done
uninstall-info-am:
@$(PRE_UNINSTALL)
@if test -d '$(DESTDIR)$(infodir)' && $(am__can_run_installinfo); then \
list='$(INFO_DEPS)'; \
for file in $$list; do \
relfile=`echo "$$file" | sed 's|^.*/||'`; \
echo " install-info --info-dir='$(DESTDIR)$(infodir)' --remove '$(DESTDIR)$(infodir)/$$relfile'"; \
if install-info --info-dir="$(DESTDIR)$(infodir)" --remove "$(DESTDIR)$(infodir)/$$relfile"; \
then :; else test ! -f "$(DESTDIR)$(infodir)/$$relfile" || exit 1; fi; \
done; \
else :; fi
@$(NORMAL_UNINSTALL)
@list='$(INFO_DEPS)'; \
for file in $$list; do \
relfile=`echo "$$file" | sed 's|^.*/||'`; \
relfile_i=`echo "$$relfile" | sed 's|\.info$$||;s|$$|.i|'`; \
(if test -d "$(DESTDIR)$(infodir)" && cd "$(DESTDIR)$(infodir)"; then \
echo " cd '$(DESTDIR)$(infodir)' && rm -f $$relfile $$relfile-[0-9] $$relfile-[0-9][0-9] $$relfile_i[0-9] $$relfile_i[0-9][0-9]"; \
rm -f $$relfile $$relfile-[0-9] $$relfile-[0-9][0-9] $$relfile_i[0-9] $$relfile_i[0-9][0-9]; \
else :; fi); \
done
uninstall-pdf-am:
@$(NORMAL_UNINSTALL)
@list='$(PDFS)'; test -n "$(pdfdir)" || list=; \
for p in $$list; do \
$(am__strip_dir) \
echo " rm -f '$(DESTDIR)$(pdfdir)/$$f'"; \
rm -f "$(DESTDIR)$(pdfdir)/$$f"; \
done
uninstall-ps-am:
@$(NORMAL_UNINSTALL)
@list='$(PSS)'; test -n "$(psdir)" || list=; \
for p in $$list; do \
$(am__strip_dir) \
echo " rm -f '$(DESTDIR)$(psdir)/$$f'"; \
rm -f "$(DESTDIR)$(psdir)/$$f"; \
done
dist-info: $(INFO_DEPS)
@srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; \
list='$(INFO_DEPS)'; \
for base in $$list; do \
case $$base in \
$(srcdir)/*) base=`echo "$$base" | sed "s|^$$srcdirstrip/||"`;; \
esac; \
if test -f $$base; then d=.; else d=$(srcdir); fi; \
base_i=`echo "$$base" | sed 's|\.info$$||;s|$$|.i|'`; \
for file in $$d/$$base $$d/$$base-[0-9] $$d/$$base-[0-9][0-9] $$d/$$base_i[0-9] $$d/$$base_i[0-9][0-9]; do \
if test -f $$file; then \
relfile=`expr "$$file" : "$$d/\(.*\)"`; \
test -f "$(distdir)/$$relfile" || \
cp -p $$file "$(distdir)/$$relfile"; \
else :; fi; \
done; \
done
mostlyclean-aminfo:
-rm -rf recutils.t2d recutils.t2p rec-mode.t2d rec-mode.t2p
clean-aminfo:
-test -z "recutils.dvi recutils.pdf recutils.ps recutils.html rec-mode.dvi \
rec-mode.pdf rec-mode.ps rec-mode.html" \
|| rm -rf recutils.dvi recutils.pdf recutils.ps recutils.html rec-mode.dvi \
rec-mode.pdf rec-mode.ps rec-mode.html
maintainer-clean-aminfo:
@list='$(INFO_DEPS)'; for i in $$list; do \
i_i=`echo "$$i" | sed 's|\.info$$||;s|$$|.i|'`; \
echo " rm -f $$i $$i-[0-9] $$i-[0-9][0-9] $$i_i[0-9] $$i_i[0-9][0-9]"; \
rm -f $$i $$i-[0-9] $$i-[0-9][0-9] $$i_i[0-9] $$i_i[0-9][0-9]; \
done
tags TAGS:
ctags CTAGS:
cscope cscopelist:
distdir: $(DISTFILES)
@srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
list='$(DISTFILES)'; \
dist_files=`for file in $$list; do echo $$file; done | \
sed -e "s|^$$srcdirstrip/||;t" \
-e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
case $$dist_files in \
*/*) $(MKDIR_P) `echo "$$dist_files" | \
sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
sort -u` ;; \
esac; \
for file in $$dist_files; do \
if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
if test -d $$d/$$file; then \
dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
if test -d "$(distdir)/$$file"; then \
find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
fi; \
if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
fi; \
cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
else \
test -f "$(distdir)/$$file" \
|| cp -p $$d/$$file "$(distdir)/$$file" \
|| exit 1; \
fi; \
done
$(MAKE) $(AM_MAKEFLAGS) \
top_distdir="$(top_distdir)" distdir="$(distdir)" \
dist-info
check-am: all-am
check: check-am
all-am: Makefile $(INFO_DEPS)
installdirs:
for dir in "$(DESTDIR)$(infodir)"; do \
test -z "$$dir" || $(MKDIR_P) "$$dir"; \
done
install: install-am
install-exec: install-exec-am
install-data: install-data-am
uninstall: uninstall-am
install-am: all-am
@$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
installcheck: installcheck-am
install-strip:
if test -z '$(STRIP)'; then \
$(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
install; \
else \
$(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
"INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
fi
mostlyclean-generic:
clean-generic:
distclean-generic:
-test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
-test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
maintainer-clean-generic:
@echo "This command is intended for maintainers to use"
@echo "it deletes files that may require special tools to rebuild."
clean: clean-am
clean-am: clean-aminfo clean-generic clean-libtool mostlyclean-am
distclean: distclean-am
-rm -f Makefile
distclean-am: clean-am distclean-generic
dvi: dvi-am
dvi-am: $(DVIS)
html: html-am
html-am: $(HTMLS)
info: info-am
info-am: $(INFO_DEPS)
install-data-am: install-info-am
install-dvi: install-dvi-am
install-dvi-am: $(DVIS)
@$(NORMAL_INSTALL)
@list='$(DVIS)'; test -n "$(dvidir)" || list=; \
if test -n "$$list"; then \
echo " $(MKDIR_P) '$(DESTDIR)$(dvidir)'"; \
$(MKDIR_P) "$(DESTDIR)$(dvidir)" || exit 1; \
fi; \
for p in $$list; do \
if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
echo "$$d$$p"; \
done | $(am__base_list) | \
while read files; do \
echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(dvidir)'"; \
$(INSTALL_DATA) $$files "$(DESTDIR)$(dvidir)" || exit $$?; \
done
install-exec-am:
install-html: install-html-am
install-html-am: $(HTMLS)
@$(NORMAL_INSTALL)
@list='$(HTMLS)'; list2=; test -n "$(htmldir)" || list=; \
if test -n "$$list"; then \
echo " $(MKDIR_P) '$(DESTDIR)$(htmldir)'"; \
$(MKDIR_P) "$(DESTDIR)$(htmldir)" || exit 1; \
fi; \
for p in $$list; do \
if test -f "$$p" || test -d "$$p"; then d=; else d="$(srcdir)/"; fi; \
$(am__strip_dir) \
d2=$$d$$p; \
if test -d "$$d2"; then \
echo " $(MKDIR_P) '$(DESTDIR)$(htmldir)/$$f'"; \
$(MKDIR_P) "$(DESTDIR)$(htmldir)/$$f" || exit 1; \
echo " $(INSTALL_DATA) '$$d2'/* '$(DESTDIR)$(htmldir)/$$f'"; \
$(INSTALL_DATA) "$$d2"/* "$(DESTDIR)$(htmldir)/$$f" || exit $$?; \
else \
list2="$$list2 $$d2"; \
fi; \
done; \
test -z "$$list2" || { echo "$$list2" | $(am__base_list) | \
while read files; do \
echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(htmldir)'"; \
$(INSTALL_DATA) $$files "$(DESTDIR)$(htmldir)" || exit $$?; \
done; }
install-info: install-info-am
install-info-am: $(INFO_DEPS)
@$(NORMAL_INSTALL)
@srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; \
list='$(INFO_DEPS)'; test -n "$(infodir)" || list=; \
if test -n "$$list"; then \
echo " $(MKDIR_P) '$(DESTDIR)$(infodir)'"; \
$(MKDIR_P) "$(DESTDIR)$(infodir)" || exit 1; \
fi; \
for file in $$list; do \
case $$file in \
$(srcdir)/*) file=`echo "$$file" | sed "s|^$$srcdirstrip/||"`;; \
esac; \
if test -f $$file; then d=.; else d=$(srcdir); fi; \
file_i=`echo "$$file" | sed 's|\.info$$||;s|$$|.i|'`; \
for ifile in $$d/$$file $$d/$$file-[0-9] $$d/$$file-[0-9][0-9] \
$$d/$$file_i[0-9] $$d/$$file_i[0-9][0-9] ; do \
if test -f $$ifile; then \
echo "$$ifile"; \
else : ; fi; \
done; \
done | $(am__base_list) | \
while read files; do \
echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(infodir)'"; \
$(INSTALL_DATA) $$files "$(DESTDIR)$(infodir)" || exit $$?; done
@$(POST_INSTALL)
@if $(am__can_run_installinfo); then \
list='$(INFO_DEPS)'; test -n "$(infodir)" || list=; \
for file in $$list; do \
relfile=`echo "$$file" | sed 's|^.*/||'`; \
echo " install-info --info-dir='$(DESTDIR)$(infodir)' '$(DESTDIR)$(infodir)/$$relfile'";\
install-info --info-dir="$(DESTDIR)$(infodir)" "$(DESTDIR)$(infodir)/$$relfile" || :;\
done; \
else : ; fi
install-man:
install-pdf: install-pdf-am
install-pdf-am: $(PDFS)
@$(NORMAL_INSTALL)
@list='$(PDFS)'; test -n "$(pdfdir)" || list=; \
if test -n "$$list"; then \
echo " $(MKDIR_P) '$(DESTDIR)$(pdfdir)'"; \
$(MKDIR_P) "$(DESTDIR)$(pdfdir)" || exit 1; \
fi; \
for p in $$list; do \
if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
echo "$$d$$p"; \
done | $(am__base_list) | \
while read files; do \
echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pdfdir)'"; \
$(INSTALL_DATA) $$files "$(DESTDIR)$(pdfdir)" || exit $$?; done
install-ps: install-ps-am
install-ps-am: $(PSS)
@$(NORMAL_INSTALL)
@list='$(PSS)'; test -n "$(psdir)" || list=; \
if test -n "$$list"; then \
echo " $(MKDIR_P) '$(DESTDIR)$(psdir)'"; \
$(MKDIR_P) "$(DESTDIR)$(psdir)" || exit 1; \
fi; \
for p in $$list; do \
if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
echo "$$d$$p"; \
done | $(am__base_list) | \
while read files; do \
echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(psdir)'"; \
$(INSTALL_DATA) $$files "$(DESTDIR)$(psdir)" || exit $$?; done
installcheck-am:
maintainer-clean: maintainer-clean-am
-rm -f Makefile
maintainer-clean-am: distclean-am maintainer-clean-1 \
maintainer-clean-aminfo maintainer-clean-generic \
maintainer-clean-vti
mostlyclean: mostlyclean-am
mostlyclean-am: mostlyclean-1 mostlyclean-aminfo mostlyclean-generic \
mostlyclean-libtool mostlyclean-vti
pdf: pdf-am
pdf-am: $(PDFS)
ps: ps-am
ps-am: $(PSS)
uninstall-am: uninstall-dvi-am uninstall-html-am uninstall-info-am \
uninstall-pdf-am uninstall-ps-am
.MAKE: install-am install-strip
.PHONY: all all-am check check-am clean clean-aminfo clean-generic \
clean-libtool cscopelist-am ctags-am dist-info distclean \
distclean-generic distclean-libtool distdir dvi dvi-am html \
html-am info info-am install install-am install-data \
install-data-am install-dvi install-dvi-am install-exec \
install-exec-am install-html install-html-am install-info \
install-info-am install-man install-pdf install-pdf-am \
install-ps install-ps-am install-strip installcheck \
installcheck-am installdirs maintainer-clean \
maintainer-clean-1 maintainer-clean-aminfo \
maintainer-clean-generic maintainer-clean-vti mostlyclean \
mostlyclean-1 mostlyclean-aminfo mostlyclean-generic \
mostlyclean-libtool mostlyclean-vti pdf pdf-am ps ps-am \
tags-am uninstall uninstall-am uninstall-dvi-am \
uninstall-html-am uninstall-info-am uninstall-pdf-am \
uninstall-ps-am
.PRECIOUS: Makefile
# End of Makefile.am
# Tell versions [3.59,3.63) of GNU make to not export all variables.
# Otherwise a system limit (for SysV at least) may be exceeded.
.NOEXPORT:
recutils-1.8/doc/version.texi 0000644 0000000 0000000 00000000136 13413353273 013234 0000000 0000000 @set UPDATED 3 January 2019
@set UPDATED-MONTH January 2019
@set EDITION 1.8
@set VERSION 1.8
recutils-1.8/doc/stamp-vti 0000644 0000000 0000000 00000000136 13413353273 012523 0000000 0000000 @set UPDATED 3 January 2019
@set UPDATED-MONTH January 2019
@set EDITION 1.8
@set VERSION 1.8
recutils-1.8/doc/rec-mode.info 0000644 0000000 0000000 00000124457 13413353275 013243 0000000 0000000 This is rec-mode.info, produced by makeinfo version 6.3 from
rec-mode.texi.
This manual is for rec-mode for Emacs, which is part of the GNU recutils
suite (version 1.8, 3 January 2019).
Copyright (C) 2012-2019 Jose E. Marchesi
Permission is granted to copy, distribute and/or modify this
document under the terms of the GNU Free Documentation License,
Version 1.3 or any later version published by the Free Software
Foundation; with no Invariant Sections, no Front-Cover Texts, and
no Back-Cover Texts. A copy of the license is included in the
section entitled "GNU Free Documentation License".
INFO-DIR-SECTION Database
START-INFO-DIR-ENTRY
* rec-mode: (rec-mode). Emacs mode for editing recfiles.
END-INFO-DIR-ENTRY
File: rec-mode.info, Node: Top, Next: Introduction, Up: (dir)
rec-mode: an Emacs mode for editing recfiles
********************************************
This manual documents version 1.8 of rec-mode.
This manual is for rec-mode for Emacs, which is part of the GNU
recutils suite (version 1.8, 3 January 2019).
Copyright (C) 2012-2019 Jose E. Marchesi
Permission is granted to copy, distribute and/or modify this
document under the terms of the GNU Free Documentation License,
Version 1.3 or any later version published by the Free Software
Foundation; with no Invariant Sections, no Front-Cover Texts, and
no Back-Cover Texts. A copy of the license is included in the
section entitled "GNU Free Documentation License".
* Menu:
* Introduction:: Getting started
* Navigation mode:: User-friendly interface for browing recfiles
* Edition modes:: Edit recfiles in rec-format
* Configuration:: Adapting rec-mode to your needs
* GNU Free Documentation License:: Distribution terms for this document
-- The Detailed Node Listing --
Introduction
* Installation:: How to install rec-mode
* Activation:: How to activate rec-mode for certain buffers
* Feedback:: Bug reports, ideas, patches etc.
Navigation mode
* Record navigation:: Moving through records.
* Field navigation:: Moving through fields in a record.
* Field folding:: Hiding and showing the values of fields.
* Field edition:: Changing the values of fields.
* Searches:: Finding records fufilling some criteria.
* Statistics:: Counting records.
* Data integrity:: Verifying the integrity of the recfile.
Edition modes
* Edition modes:: Edit recfiles in rec-format
Configuration
* Finding the recutils:: Specifying the location of the recutils.
* Records appearance:: Setting the way records are displayed.
File: rec-mode.info, Node: Introduction, Next: Navigation mode, Prev: Top, Up: Top
1 Introduction
**************
rec-mode is a mode for browsing and editing recfiles, which are text
files containing data structured in fields and records. It is part of
the GNU recutils(1) suite.
Recfiles are text-based databases which are easy to read and write
manually using a text editor. At the same time they feature enough
structure so they can be read, edited and processed automatically by
programs.
* Menu:
* Installation:: How to install rec-mode
* Activation:: How to activate rec-mode for certain buffers
* Feedback:: Bug reports, ideas, patches etc.
---------- Footnotes ----------
(1)
File: rec-mode.info, Node: Installation, Next: Activation, Up: Introduction
1.1 Installation
================
rec-mode is implemented in a self-contained elisp file called
'rec-mode.el'. It can be obtained in several ways:
- As part of a released tarball of recutils. 'rec-mode.el' can be
found in the 'etc/' directory in the tarball contents.
- As part of the source tree cloned from the development git repo.
'rec-mode.el' can be found in the 'etc/' directory in the recutils
sources tree.
- As a single file downloaded form some other location in internet.
- It may be already installed as part of a binary package in some
distribution.
In the first three cases you need to tell Emacs where to locate the
'rec-mode.el' file and to load it. Add the following to your '.emacs'
file.
(add-to-list 'load-path "~/path/to/recmode/")
(require 'rec-mode)
If 'rec-mode.el' was installed as part of a binary package in a
distribution then you usually don't have to touch the 'load-path'
variable. Depending on the specific case you may have to 'require' the
package.
File: rec-mode.info, Node: Activation, Next: Feedback, Prev: Installation, Up: Introduction
1.2 Activation
==============
To make sure files with extension '.rec' use rec-mode, add the following
line to your '.emacs' file.
(add-to-list 'auto-mode-alist '("\\.rec\\'" . rec-mode))
rec-mode buffers need font-lock to be turned on - this is the default in
Emacs(1).
With this setup, all files with extension '.rec' will be put into rec
mode. As an alternative, make the first line of a recfile look like
this:
# -*- mode: rec -*-
which will select rec-mode for this buffer no matter what the file's
name is.
---------- Footnotes ----------
(1) If you don't use font-lock globally, turn it on in the rec buffer
with '(add-hook 'rec-mode-hook 'turn-on-font-lock)'
File: rec-mode.info, Node: Feedback, Prev: Activation, Up: Introduction
1.3 Feedback
============
If you find problems with rec-mode, or if you have questions, remarks,
or ideas about it, please mail to the recutils mailing list
. If you are not a member of the mailing list,
your mail will be passed to the list after a moderator has approved
it(1).
---------- Footnotes ----------
(1) Please consider subscribing to the mailing list, in order to
minimize the work the mailing list moderators have to do. The
subscription can be done online at
.
File: rec-mode.info, Node: Navigation mode, Next: Edition modes, Prev: Introduction, Up: Top
2 Navigation mode
*****************
When a recfile is visited in Emacs and rec-mode is activated, the
contents of the file are examined and parsed in order to determine if it
is a valid recfile and, in that case, to extract information like the
kind of records stored in the file.
If the file does not contain valid rec data then the buffer is put in
'fundamental-mode' and a description of the syntax error, along its
location, is notified in the echo area.
If the file contains valid rec data, the mode sets itself in what is
known as "navigation mode". In this mode the buffer is made read-only
and it is narrowed to the first record present in the file. Also, the
presentation of the record contents is slightly changed in order to
improve the visualization of the data: continuation line marks are
replaced by indentation, big fields are folded, etc. The modeline is
changed in order to reflect the type of the records being navigated.
At this point the user can navigate through the records and fields
contained in the file, and edit the contents of the fields and the
structure of the records, by using the commands described in the
following subsections.
* Menu:
* Record navigation:: Moving through records
* Field navigation:: Moving through fields in a record
* Field folding:: Hiding and showing the values of fields
* Field edition:: Changing the values of fields
* Searches:: Finding records fulfilling some criteria
* Statistics:: Counting records
* Data integrity:: Verifying the integrity of the recfile
File: rec-mode.info, Node: Record navigation, Next: Field navigation, Up: Navigation mode
2.1 Record navigation
=====================
The following commands jump to other records in the buffer.
'n' ('rec-cmd-goto-next-rec')
Display the next record of the same type in the buffer.
'C-u N n' will move next N times.
'p' ('rec-cmd-goto-previous-rec')
Display the previous record of the same type in the buffer.
'C-u N p' will move backwards N times.
'd' ('rec-cmd-show-descriptor')
Display the record descriptor applicable to the current record. If
the current record is anonymous, i.e. there is not record
descriptor. then this command does nothing.
'b' ('rec-cmd-jump-back')
Display the record previously displayed in the buffer.
'C-c t' ('rec-find-type')
Prompt the user for one of the record types present in the recfile
and display the first record of the selected type.
File: rec-mode.info, Node: Field navigation, Next: Field folding, Prev: Record navigation, Up: Navigation mode
2.2 Field navigation
====================
The following commands iterate through the fields in a record, and to
get information about some of the properties of the fields.
'TAB' ('rec-cmd-goto-next-field')
Move the cursor to the beginning of the name of the next field in
the current record. If the cursor is currently located at the last
field of the record then move it to the beginning of the first
field.
't' ('rec-cmd-show-type')
Show information about the type of the field under the cursor, if
it is defined.
File: rec-mode.info, Node: Field folding, Next: Field edition, Prev: Field navigation, Up: Navigation mode
2.3 Field folding
=================
Fields in recfiles can contain data of any size, and sometimes it is
difficult to have an overview of the contents of the record. The
following commands fold and unfold the value of the field under the
cursor.
'SPC' ('rec-cmd-toggle-field-visibility')
Toggle the visibility of the field under the cursor. When a field
is folded then three dots are displayed in the buffer instead of
the value of the field.
It is possible to automatically fold any field whose value exceeds
a certain limit which can be configured by the user. *Note Records
appearance::.
File: rec-mode.info, Node: Field edition, Next: Searches, Prev: Field folding, Up: Navigation mode
2.4 Field edition
=================
The following commands change the value of the field under the cursor.
'e' ('rec-cmd-edit-field')
Edit the value of the field under the cursor. The specific action
depends on the type of the field in the corresponding record
descriptor:
- For date fields a calendar buffer is opened in another window
and the focus is moved there. The user can then select a date
by moving the cursor there and press 'RET' in order to set
that date as the value for the field. Alternatively the user
can press 't' in order to set the field to "now", or 'q' to
cancel the operation. In the later case the value of the
field is left untouched.
- For enumerated and bool fields a fast-select buffer is opened
in another window, showing a list of labeled options. The
labels are single digits and letters. The user can then
select ony of the options by pressing the corresponding label,
or cancel the operation by pressing 'RET'. In the later case
the value of the field is left untouched.
- For any other kind of fields an edition buffer is opened in
another window, showing the current contents of the field.
The user can then edit the buffer as desired. When she is
done, the user can then press 'C-c C-c' in order to set the
new value of the field, or just kill the buffer to cancel the
operation.
'm' ('rec-cmd-trim-field-value')
Trim the value of the field under the cursor, removing any sequence
of leading and trailing blank characters.
File: rec-mode.info, Node: Searches, Next: Statistics, Prev: Field edition, Up: Navigation mode
2.5 Searches
============
The following commands jump to the first record in the buffer satisfying
some criteria.
's q' ('rec-cmd-select-fast')
Display the first record having a field whose value matches a given
fixed pattern. This is equivalent of using the command line option
'-q' of 'recsel'. If a prefix argument is specified then the
search is case-insensitive.
's s' ('rec-cmd-select-sex')
Display the first record in the buffer satisfying a given selection
expression. This is equivalent of using the command line option
'-e' of 'recsel'. If a prefix argument is specified then the
search is case-insensitive.
File: rec-mode.info, Node: Statistics, Next: Data integrity, Prev: Searches, Up: Navigation mode
2.6 Statistics
==============
The following commands allow to count records in the current buffer
based on some provided criteria.
'I' ('rec-cmd-show-info')
Show the number of records in the buffer categorized by type.
'#' ('rec-cmd-count')
Count the number of records in the buffer having the same type as
the current record. With a numeric prefix N, ask for a selection
expression and count the number of records in the buffer satisfying
the expression.
Note that rec-mode tries to guess a reasonable default for the
selection expression, depending on the type of the field and its
value. If the user press 'RET' then the provided default selection
expression is used.
'%' ('rec-cmd-statistics')
If the field under the cursor contains an enumerated value, show
the percentages of records in the current record set having fields
with each of the possible values of the enumerated type.
File: rec-mode.info, Node: Data integrity, Prev: Statistics, Up: Navigation mode
2.7 Data integrity
==================
The integrity of the rec data stored in the file can be checked using
the following commands.
'c' ('rec-cmd-compile')
Compile the buffer with 'recfix' and open a compilation window
showing the result of the command. In case some error or warning
is reported, the user can jump to the location triggering the error
by pressing 'RET' in the compilation window.
File: rec-mode.info, Node: Edition modes, Next: Configuration, Prev: Navigation mode, Up: Top
3 Edition modes
***************
The navigation mode described in a previous chapter is mainly intended
for browsing recdata and doing changes at the record level: editing the
contents of a field, adding or removing fields, etc. In order to
perform broader changes, such as adding/deleting record descriptors,
records or comment blocks, the user must enter into one of the "edition
modes".
There are three edition modes, covering different areas of the
recfile: record, record type and buffer. When an edition mode is
entered the buffer is set in read/write mode, it is narrowed to the
desired area and any embellishment used in navigation mode is
removed(1). As a general rule, the commands available in navigation
mode are also available in the edition mode prefixed with 'C-c'. Thus,
'C-c n' would make the cursor to jump to the beginning of the next
record.
The following commands are used to enter into one of the available
edition modes from the navigation mode.
'R' ('rec-edit-record')
Edit the record being navigated.
'T' ('rec-edit-type')
Edit the record set being navigated.
'B' ('rec-edit-buffer')
Edit the buffer.
After doing modifications in the buffer, the user can go back to
navigation mode by using the following command.
'C-c C-c' ('rec-finish-editing')
Finish the current edition and return to navigation mode. If a
syntactic error was introduced in the edition activity then the
error is reported in the echo area and navigation mode is not
entered.
---------- Footnotes ----------
(1) Exceptuating font-lock
File: rec-mode.info, Node: Configuration, Next: GNU Free Documentation License, Prev: Edition modes, Up: Top
4 Configuration
***************
TBC
* Menu:
* Finding the recutils:: Specifying the location of the recutils.
* Records appearance:: Setting the way records are displayed
File: rec-mode.info, Node: Finding the recutils, Next: Records appearance, Up: Configuration
4.1 Finding the recutils
========================
'rec-mode' makes use of the several utilities which are part of the
recutils. The following variables tell the mode where to find the
utilities. The default values of these variables must work if the
recutils are installed system-wide in the system.
'rec-recsel'
Name of the 'recsel' utility from the GNU recutils.
'rec-recinf'
Name of the 'recinf' utility from the GNU recutils.
'rec-recfix'
Name of the 'recfix' utility from the GNU recutils.
File: rec-mode.info, Node: Records appearance, Prev: Finding the recutils, Up: Configuration
4.2 Records appearance
======================
The appearance of the records in navigation mode can be customised by
tweaking the value of the following variables.
'rec-max-lines-in-fields'
Values in fiels having more than the specified number of lines will
be hidden by default in navigation mode. When hidden, an ellipsis
is shown instead of the value of the field. Default is '15'.
File: rec-mode.info, Node: GNU Free Documentation License, Prev: Configuration, Up: Top
Appendix A GNU Free Documentation License
*****************************************
Version 1.3, 3 November 2008
Copyright (C) 2000, 2001, 2002, 2007, 2008 Free Software Foundation, Inc.
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
0. PREAMBLE
The purpose of this License is to make a manual, textbook, or other
functional and useful document "free" in the sense of freedom: to
assure everyone the effective freedom to copy and redistribute it,
with or without modifying it, either commercially or
noncommercially. Secondarily, this License preserves for the
author and publisher a way to get credit for their work, while not
being considered responsible for modifications made by others.
This License is a kind of "copyleft", which means that derivative
works of the document must themselves be free in the same sense.
It complements the GNU General Public License, which is a copyleft
license designed for free software.
We have designed this License in order to use it for manuals for
free software, because free software needs free documentation: a
free program should come with manuals providing the same freedoms
that the software does. But this License is not limited to
software manuals; it can be used for any textual work, regardless
of subject matter or whether it is published as a printed book. We
recommend this License principally for works whose purpose is
instruction or reference.
1. APPLICABILITY AND DEFINITIONS
This License applies to any manual or other work, in any medium,
that contains a notice placed by the copyright holder saying it can
be distributed under the terms of this License. Such a notice
grants a world-wide, royalty-free license, unlimited in duration,
to use that work under the conditions stated herein. The
"Document", below, refers to any such manual or work. Any member
of the public is a licensee, and is addressed as "you". You accept
the license if you copy, modify or distribute the work in a way
requiring permission under copyright law.
A "Modified Version" of the Document means any work containing the
Document or a portion of it, either copied verbatim, or with
modifications and/or translated into another language.
A "Secondary Section" is a named appendix or a front-matter section
of the Document that deals exclusively with the relationship of the
publishers or authors of the Document to the Document's overall
subject (or to related matters) and contains nothing that could
fall directly within that overall subject. (Thus, if the Document
is in part a textbook of mathematics, a Secondary Section may not
explain any mathematics.) The relationship could be a matter of
historical connection with the subject or with related matters, or
of legal, commercial, philosophical, ethical or political position
regarding them.
The "Invariant Sections" are certain Secondary Sections whose
titles are designated, as being those of Invariant Sections, in the
notice that says that the Document is released under this License.
If a section does not fit the above definition of Secondary then it
is not allowed to be designated as Invariant. The Document may
contain zero Invariant Sections. If the Document does not identify
any Invariant Sections then there are none.
The "Cover Texts" are certain short passages of text that are
listed, as Front-Cover Texts or Back-Cover Texts, in the notice
that says that the Document is released under this License. A
Front-Cover Text may be at most 5 words, and a Back-Cover Text may
be at most 25 words.
A "Transparent" copy of the Document means a machine-readable copy,
represented in a format whose specification is available to the
general public, that is suitable for revising the document
straightforwardly with generic text editors or (for images composed
of pixels) generic paint programs or (for drawings) some widely
available drawing editor, and that is suitable for input to text
formatters or for automatic translation to a variety of formats
suitable for input to text formatters. A copy made in an otherwise
Transparent file format whose markup, or absence of markup, has
been arranged to thwart or discourage subsequent modification by
readers is not Transparent. An image format is not Transparent if
used for any substantial amount of text. A copy that is not
"Transparent" is called "Opaque".
Examples of suitable formats for Transparent copies include plain
ASCII without markup, Texinfo input format, LaTeX input format,
SGML or XML using a publicly available DTD, and standard-conforming
simple HTML, PostScript or PDF designed for human modification.
Examples of transparent image formats include PNG, XCF and JPG.
Opaque formats include proprietary formats that can be read and
edited only by proprietary word processors, SGML or XML for which
the DTD and/or processing tools are not generally available, and
the machine-generated HTML, PostScript or PDF produced by some word
processors for output purposes only.
The "Title Page" means, for a printed book, the title page itself,
plus such following pages as are needed to hold, legibly, the
material this License requires to appear in the title page. For
works in formats which do not have any title page as such, "Title
Page" means the text near the most prominent appearance of the
work's title, preceding the beginning of the body of the text.
The "publisher" means any person or entity that distributes copies
of the Document to the public.
A section "Entitled XYZ" means a named subunit of the Document
whose title either is precisely XYZ or contains XYZ in parentheses
following text that translates XYZ in another language. (Here XYZ
stands for a specific section name mentioned below, such as
"Acknowledgements", "Dedications", "Endorsements", or "History".)
To "Preserve the Title" of such a section when you modify the
Document means that it remains a section "Entitled XYZ" according
to this definition.
The Document may include Warranty Disclaimers next to the notice
which states that this License applies to the Document. These
Warranty Disclaimers are considered to be included by reference in
this License, but only as regards disclaiming warranties: any other
implication that these Warranty Disclaimers may have is void and
has no effect on the meaning of this License.
2. VERBATIM COPYING
You may copy and distribute the Document in any medium, either
commercially or noncommercially, provided that this License, the
copyright notices, and the license notice saying this License
applies to the Document are reproduced in all copies, and that you
add no other conditions whatsoever to those of this License. You
may not use technical measures to obstruct or control the reading
or further copying of the copies you make or distribute. However,
you may accept compensation in exchange for copies. If you
distribute a large enough number of copies you must also follow the
conditions in section 3.
You may also lend copies, under the same conditions stated above,
and you may publicly display copies.
3. COPYING IN QUANTITY
If you publish printed copies (or copies in media that commonly
have printed covers) of the Document, numbering more than 100, and
the Document's license notice requires Cover Texts, you must
enclose the copies in covers that carry, clearly and legibly, all
these Cover Texts: Front-Cover Texts on the front cover, and
Back-Cover Texts on the back cover. Both covers must also clearly
and legibly identify you as the publisher of these copies. The
front cover must present the full title with all words of the title
equally prominent and visible. You may add other material on the
covers in addition. Copying with changes limited to the covers, as
long as they preserve the title of the Document and satisfy these
conditions, can be treated as verbatim copying in other respects.
If the required texts for either cover are too voluminous to fit
legibly, you should put the first ones listed (as many as fit
reasonably) on the actual cover, and continue the rest onto
adjacent pages.
If you publish or distribute Opaque copies of the Document
numbering more than 100, you must either include a machine-readable
Transparent copy along with each Opaque copy, or state in or with
each Opaque copy a computer-network location from which the general
network-using public has access to download using public-standard
network protocols a complete Transparent copy of the Document, free
of added material. If you use the latter option, you must take
reasonably prudent steps, when you begin distribution of Opaque
copies in quantity, to ensure that this Transparent copy will
remain thus accessible at the stated location until at least one
year after the last time you distribute an Opaque copy (directly or
through your agents or retailers) of that edition to the public.
It is requested, but not required, that you contact the authors of
the Document well before redistributing any large number of copies,
to give them a chance to provide you with an updated version of the
Document.
4. MODIFICATIONS
You may copy and distribute a Modified Version of the Document
under the conditions of sections 2 and 3 above, provided that you
release the Modified Version under precisely this License, with the
Modified Version filling the role of the Document, thus licensing
distribution and modification of the Modified Version to whoever
possesses a copy of it. In addition, you must do these things in
the Modified Version:
A. Use in the Title Page (and on the covers, if any) a title
distinct from that of the Document, and from those of previous
versions (which should, if there were any, be listed in the
History section of the Document). You may use the same title
as a previous version if the original publisher of that
version gives permission.
B. List on the Title Page, as authors, one or more persons or
entities responsible for authorship of the modifications in
the Modified Version, together with at least five of the
principal authors of the Document (all of its principal
authors, if it has fewer than five), unless they release you
from this requirement.
C. State on the Title page the name of the publisher of the
Modified Version, as the publisher.
D. Preserve all the copyright notices of the Document.
E. Add an appropriate copyright notice for your modifications
adjacent to the other copyright notices.
F. Include, immediately after the copyright notices, a license
notice giving the public permission to use the Modified
Version under the terms of this License, in the form shown in
the Addendum below.
G. Preserve in that license notice the full lists of Invariant
Sections and required Cover Texts given in the Document's
license notice.
H. Include an unaltered copy of this License.
I. Preserve the section Entitled "History", Preserve its Title,
and add to it an item stating at least the title, year, new
authors, and publisher of the Modified Version as given on the
Title Page. If there is no section Entitled "History" in the
Document, create one stating the title, year, authors, and
publisher of the Document as given on its Title Page, then add
an item describing the Modified Version as stated in the
previous sentence.
J. Preserve the network location, if any, given in the Document
for public access to a Transparent copy of the Document, and
likewise the network locations given in the Document for
previous versions it was based on. These may be placed in the
"History" section. You may omit a network location for a work
that was published at least four years before the Document
itself, or if the original publisher of the version it refers
to gives permission.
K. For any section Entitled "Acknowledgements" or "Dedications",
Preserve the Title of the section, and preserve in the section
all the substance and tone of each of the contributor
acknowledgements and/or dedications given therein.
L. Preserve all the Invariant Sections of the Document, unaltered
in their text and in their titles. Section numbers or the
equivalent are not considered part of the section titles.
M. Delete any section Entitled "Endorsements". Such a section
may not be included in the Modified Version.
N. Do not retitle any existing section to be Entitled
"Endorsements" or to conflict in title with any Invariant
Section.
O. Preserve any Warranty Disclaimers.
If the Modified Version includes new front-matter sections or
appendices that qualify as Secondary Sections and contain no
material copied from the Document, you may at your option designate
some or all of these sections as invariant. To do this, add their
titles to the list of Invariant Sections in the Modified Version's
license notice. These titles must be distinct from any other
section titles.
You may add a section Entitled "Endorsements", provided it contains
nothing but endorsements of your Modified Version by various
parties--for example, statements of peer review or that the text
has been approved by an organization as the authoritative
definition of a standard.
You may add a passage of up to five words as a Front-Cover Text,
and a passage of up to 25 words as a Back-Cover Text, to the end of
the list of Cover Texts in the Modified Version. Only one passage
of Front-Cover Text and one of Back-Cover Text may be added by (or
through arrangements made by) any one entity. If the Document
already includes a cover text for the same cover, previously added
by you or by arrangement made by the same entity you are acting on
behalf of, you may not add another; but you may replace the old
one, on explicit permission from the previous publisher that added
the old one.
The author(s) and publisher(s) of the Document do not by this
License give permission to use their names for publicity for or to
assert or imply endorsement of any Modified Version.
5. COMBINING DOCUMENTS
You may combine the Document with other documents released under
this License, under the terms defined in section 4 above for
modified versions, provided that you include in the combination all
of the Invariant Sections of all of the original documents,
unmodified, and list them all as Invariant Sections of your
combined work in its license notice, and that you preserve all
their Warranty Disclaimers.
The combined work need only contain one copy of this License, and
multiple identical Invariant Sections may be replaced with a single
copy. If there are multiple Invariant Sections with the same name
but different contents, make the title of each such section unique
by adding at the end of it, in parentheses, the name of the
original author or publisher of that section if known, or else a
unique number. Make the same adjustment to the section titles in
the list of Invariant Sections in the license notice of the
combined work.
In the combination, you must combine any sections Entitled
"History" in the various original documents, forming one section
Entitled "History"; likewise combine any sections Entitled
"Acknowledgements", and any sections Entitled "Dedications". You
must delete all sections Entitled "Endorsements."
6. COLLECTIONS OF DOCUMENTS
You may make a collection consisting of the Document and other
documents released under this License, and replace the individual
copies of this License in the various documents with a single copy
that is included in the collection, provided that you follow the
rules of this License for verbatim copying of each of the documents
in all other respects.
You may extract a single document from such a collection, and
distribute it individually under this License, provided you insert
a copy of this License into the extracted document, and follow this
License in all other respects regarding verbatim copying of that
document.
7. AGGREGATION WITH INDEPENDENT WORKS
A compilation of the Document or its derivatives with other
separate and independent documents or works, in or on a volume of a
storage or distribution medium, is called an "aggregate" if the
copyright resulting from the compilation is not used to limit the
legal rights of the compilation's users beyond what the individual
works permit. When the Document is included in an aggregate, this
License does not apply to the other works in the aggregate which
are not themselves derivative works of the Document.
If the Cover Text requirement of section 3 is applicable to these
copies of the Document, then if the Document is less than one half
of the entire aggregate, the Document's Cover Texts may be placed
on covers that bracket the Document within the aggregate, or the
electronic equivalent of covers if the Document is in electronic
form. Otherwise they must appear on printed covers that bracket
the whole aggregate.
8. TRANSLATION
Translation is considered a kind of modification, so you may
distribute translations of the Document under the terms of section
4. Replacing Invariant Sections with translations requires special
permission from their copyright holders, but you may include
translations of some or all Invariant Sections in addition to the
original versions of these Invariant Sections. You may include a
translation of this License, and all the license notices in the
Document, and any Warranty Disclaimers, provided that you also
include the original English version of this License and the
original versions of those notices and disclaimers. In case of a
disagreement between the translation and the original version of
this License or a notice or disclaimer, the original version will
prevail.
If a section in the Document is Entitled "Acknowledgements",
"Dedications", or "History", the requirement (section 4) to
Preserve its Title (section 1) will typically require changing the
actual title.
9. TERMINATION
You may not copy, modify, sublicense, or distribute the Document
except as expressly provided under this License. Any attempt
otherwise to copy, modify, sublicense, or distribute it is void,
and will automatically terminate your rights under this License.
However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the
copyright holder fails to notify you of the violation by some
reasonable means prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from
that copyright holder, and you cure the violation prior to 30 days
after your receipt of the notice.
Termination of your rights under this section does not terminate
the licenses of parties who have received copies or rights from you
under this License. If your rights have been terminated and not
permanently reinstated, receipt of a copy of some or all of the
same material does not give you any rights to use it.
10. FUTURE REVISIONS OF THIS LICENSE
The Free Software Foundation may publish new, revised versions of
the GNU Free Documentation License from time to time. Such new
versions will be similar in spirit to the present version, but may
differ in detail to address new problems or concerns. See
.
Each version of the License is given a distinguishing version
number. If the Document specifies that a particular numbered
version of this License "or any later version" applies to it, you
have the option of following the terms and conditions either of
that specified version or of any later version that has been
published (not as a draft) by the Free Software Foundation. If the
Document does not specify a version number of this License, you may
choose any version ever published (not as a draft) by the Free
Software Foundation. If the Document specifies that a proxy can
decide which future versions of this License can be used, that
proxy's public statement of acceptance of a version permanently
authorizes you to choose that version for the Document.
11. RELICENSING
"Massive Multiauthor Collaboration Site" (or "MMC Site") means any
World Wide Web server that publishes copyrightable works and also
provides prominent facilities for anybody to edit those works. A
public wiki that anybody can edit is an example of such a server.
A "Massive Multiauthor Collaboration" (or "MMC") contained in the
site means any set of copyrightable works thus published on the MMC
site.
"CC-BY-SA" means the Creative Commons Attribution-Share Alike 3.0
license published by Creative Commons Corporation, a not-for-profit
corporation with a principal place of business in San Francisco,
California, as well as future copyleft versions of that license
published by that same organization.
"Incorporate" means to publish or republish a Document, in whole or
in part, as part of another Document.
An MMC is "eligible for relicensing" if it is licensed under this
License, and if all works that were first published under this
License somewhere other than this MMC, and subsequently
incorporated in whole or in part into the MMC, (1) had no cover
texts or invariant sections, and (2) were thus incorporated prior
to November 1, 2008.
The operator of an MMC Site may republish an MMC contained in the
site under CC-BY-SA on the same site at any time before August 1,
2009, provided the MMC is eligible for relicensing.
ADDENDUM: How to use this License for your documents
====================================================
To use this License in a document you have written, include a copy of
the License in the document and put the following copyright and license
notices just after the title page:
Copyright (C) YEAR YOUR NAME.
Permission is granted to copy, distribute and/or modify this document
under the terms of the GNU Free Documentation License, Version 1.3
or any later version published by the Free Software Foundation;
with no Invariant Sections, no Front-Cover Texts, and no Back-Cover
Texts. A copy of the license is included in the section entitled ``GNU
Free Documentation License''.
If you have Invariant Sections, Front-Cover Texts and Back-Cover
Texts, replace the "with...Texts." line with this:
with the Invariant Sections being LIST THEIR TITLES, with
the Front-Cover Texts being LIST, and with the Back-Cover Texts
being LIST.
If you have Invariant Sections without Cover Texts, or some other
combination of the three, merge those two alternatives to suit the
situation.
If your document contains nontrivial examples of program code, we
recommend releasing these examples in parallel under your choice of free
software license, such as the GNU General Public License, to permit
their use in free software.
Tag Table:
Node: Top756
Node: Introduction2808
Ref: Introduction-Footnote-13559
Node: Installation3606
Node: Activation4723
Ref: Activation-Footnote-15390
Node: Feedback5516
Ref: Feedback-Footnote-15934
Node: Navigation mode6155
Node: Record navigation7893
Node: Field navigation8847
Node: Field folding9523
Node: Field edition10269
Node: Searches12063
Node: Statistics12839
Node: Data integrity13908
Node: Edition modes14420
Ref: Edition modes-Footnote-116095
Node: Configuration16126
Node: Finding the recutils16427
Node: Records appearance17041
Node: GNU Free Documentation License17544
End Tag Table
recutils-1.8/doc/recutils.texi 0000644 0000000 0000000 00000476674 13413345572 013434 0000000 0000000 \input texinfo
@comment %**start of header
@setfilename recutils.info
@include version.texi
@settitle GNU Recutils
@afourpaper
@comment %**end of header
@comment Latin: videre licet,
@macro viz
@i{viz:@:}
@end macro
@comment Latin: id est
@macro ie
@i{i.e.@:}
@end macro
@comment Latin: exempli gratia
@macro eg
@i{e.g.@:}
@end macro
@comment Latin: et cetera
@macro etc
@i{etc.@:}
@end macro
@copying
This manual is for GNU recutils (version @value{VERSION},
@value{UPDATED}).
Copyright @copyright{} 2009-2019 Jose E. Marchesi
Copyright @copyright{} 1994-2014 Free Software Foundation, Inc.
@quotation
Permission is granted to copy, distribute and/or modify this document
under the terms of the GNU Free Documentation License, Version 1.3 or
any later version published by the Free Software Foundation; with no
Invariant Sections, no Front-Cover Texts, and no Back-Cover Texts. A
copy of the license is included in the section entitled ``GNU Free
Documentation License''.
@end quotation
@end copying
@dircategory Database
@direntry
* recutils: (recutils). The GNU Recutils manual.
@end direntry
@dircategory Individual utilities
@direntry
* recinf: (recutils)Invoking recinf. Get info about recfiles.
* recsel: (recutils)Invoking recsel. Read records.
* recins: (recutils)Invoking recins. Insert records.
* recdel: (recutils)Invoking recdel. Delete records.
* recset: (recutils)Invoking recset. Manage fields.
* recfix: (recutils)Invoking recfix. Fix recfiles.
* csv2rec: (recutils)Invoking csv2rec. CSV to recfiles.
* rec2csv: (recutils)Invoking rec2csv. Recfiles to CSV.
* mdb2rec: (recutils)Invoking mdb2rec. MDB to recfiles.
@end direntry
@titlepage
@title GNU recutils
@subtitle for version @value{VERSION}, @value{UPDATED}
@author by Jose E. Marchesi and John Darrington
@page
@vskip 0pt plus 1filll
@insertcopying
@end titlepage
@contents
@ifnottex
@node Top
@top GNU Recutils
This manual documents version @value{VERSION} of the GNU recutils.
@insertcopying
@end ifnottex
@menu
The Basics
* Introduction:: Introducing recutils.
* The Rec Format:: Writing recfiles.
Using the Recutils
* Querying Recfiles:: Extracting data from recfiles.
* Editing Records:: Inserting and deleting records.
* Editing Fields:: Inserting, modifying and deleting fields.
Data Integrity
* Field Types:: Restrictions on the values of fields.
* Constraints on Record Sets:: Requiring or forbidding specific fields.
* Checking Recfiles:: Making sure the data is ok.
Advanced Topics
* Remote Descriptors:: Implementing distributed databases.
* Grouping and Aggregates:: Statistics.
* Queries which Join Records:: Crossing record of different types.
* Auto-Generated Fields:: Counters and time-stamps.
* Encryption:: Storing sensitive information.
* Generating Reports:: Formatted output with templates.
* Interoperability:: Importing and exporting to other formats.
* Bash Builtins:: Boosting the recutils in the shell.
Reference Material
* Invoking the Utilities:: Exhaustive list of command line arguments.
* Regular Expressions:: Flavor of regexps supported in recutils.
* Date input formats:: Specifying dates and times.
* GNU Free Documentation License:: Distribution terms for this document.
Indexes
* Concept Index::
@detailmenu
--- The Detailed Node Listing ---
---------------------------------
Here are some other nodes which are really subnodes of the ones
already listed, mentioned here so you can get to them in one step:
Introduction
* Purpose:: Why recutils.
* A Little Example:: Recutils in action.
The Rec Format
* Fields:: The key--value pairs which comprise the data.
* Records:: The main entities of a recfile.
* Comments:: Information for humans' benefit only.
* Record Descriptors:: Describing different types of records.
Querying Recfiles
* Simple Selections:: Introducing @command{recsel}.
* Selecting by Type:: Get the records of some given type.
* Selecting by Position:: Get the record occupying some position.
* Random Records:: Get a set of random records.
* Selection Expressions:: Get the records satisfying some expression.
* Field Expressions:: Selecting a subset of fields.
* Sorted Output:: Get the records in a given order.
Editing Records
* Inserting Records:: Inserting data into recfiles.
* Deleting Records:: Removing entries.
* Sorting Records:: Physical reordering of records.
Editing Fields
* Setting Fields:: Editing field values.
* Adding Fields:: Adding new fields to records.
* Deleting Fields:: Removing or commenting-out fields.
Field Types
* Declaring Types:: Declaration of types in record descriptors.
* Types and Fields:: Associating fields with types.
* Scalar Field Types:: Numbers and ranges.
* String Field Types:: Lines, limited strings and regular expressions.
* Enumerated Field Types:: Enumerations and boolean values.
* Date and Time Types:: Dates and times.
* Other Field Types:: Emails, fields, UUIDs, @dots{}
Constraints on Record Sets
* Mandatory Fields:: Requiring the presence of fields.
* Prohibited Fields:: Forbidding the presence of fields.
* Allowed Fields:: Restricting the presence of fields.
* Keys and Unique Fields:: Fields characterizing records.
* Size Constraints:: Limiting the size of a record set.
* Arbitrary Constraints:: Constraints records must comply with.
Checking Recfiles
* Syntactical Errors:: Fixing structure errors in recfiles.
* Semantic Errors:: Fixing semantic errors in recfiles.
Remote Descriptors
Grouping and Aggregates
* Grouping Records:: Combining records by fields.
* Aggregate Functions:: Statistics and more.
Joins
* Foreign Keys:: Referring records from another records.
* Joining Records:: Performing cross-joins.
Auto-Generated Fields
* Counters:: Generating incremental Ids.
* Unique Identifiers:: Generating universally unique Ids.
* Time-Stamps:: Tracking the creation of records.
Encryption
* Confidential Fields:: Declaring fields as sensitive data.
* Encrypting Files:: Encrypt confidential fields.
* Decrypting Data:: Reading encrypted fields.
Generating Reports
* Templates:: Formatted output.
Interoperability
* CSV Files:: Converting recfiles to/from csv files.
* Importing MDB Files:: Importing MS Access Databases.
Bash Builtins
* readrec:: Exporting the contents of records to the shell.
Invoking the Utilities
* Invoking recinf:: Printing information about rec files.
* Invoking recsel:: Selecting records.
* Invoking recins:: Inserting records.
* Invoking recdel:: Deleting records.
* Invoking recset:: Managing fields.
* Invoking recfix:: Fixing broken rec files, and diagnostics.
* Invoking recfmt:: Formatting records using templates.
* Invoking csv2rec:: Converting csv data into rec data.
* Invoking rec2csv:: Converting rec data into csv data.
* Invoking mdb2rec:: Converting mdb files into rec files.
@end detailmenu
@end menu
@node Introduction
@chapter Introduction
@menu
* Purpose:: Why recutils.
* A Little Example:: Recutils in action.
@end menu
@node Purpose
@section Purpose
GNU recutils is a set of tools and libraries to access human-editable,
text-based databases called @emph{recfiles}. The data is stored as a
sequence of records, each record containing an arbitrary number of
named fields. Advanced capabilities usually found in other data
storage systems are supported: data types, data integrity (keys,
mandatory fields, @etc{}) as well as the ability of records to refer to
other records (sort of foreign keys). Despite its simplicity,
recfiles can be used to store medium-sized databases.
So, yet another data storage system? The mere existence of this
package deserves an explanation. There is a rich set of already
available free data storage systems, covering a broad range of
requirements. Big systems having complex data storage requirements
will probably make use of some full-fledged relational system such as
MySQL or PostgreSQL@. Less demanding applications, or applications
with special deployment requirements, may find it more convenient to
use a simpler system such as SQLite, where the data is stored in a
single binary file. XML files are often used to store configuration
settings for programs, and to encode data for transmission through
networks.
So it looks like all the needs are covered by the existing
solutions @dots{} but consider the following characteristics of the
data storage systems mentioned in the previous paragraph:
@itemize @minus
@item The stored data is not directly human readable.
@item The stored data is definitely not directly writable by humans.
@item They are program dependent.
@item They are not easily managed by version control systems.
@end itemize
@cindex readability
Regarding the first point (human readability), while it is clearly
true for the binary files, some may argue XML files are indeed human
readable@dots{} well@dots{} @code{try to r&iamp;ead
this
}. YAML @footnote{Yet Another Markup Language} is an
example of a hierarchical data storage format which is much more
readable than XML@. The problem with YAML is that it was designed as a
``data serialization language'' and thus to map the data constructs
usually found in programming languages. That makes it too complex for
the simple task of storing plain lists of items.
Recfiles are human-readable, human-writable and still easy to
parse and to manipulate automatically. Obviously they are not
suitable for any task (for example, it can be difficult to manage
hierarchies in recfiles) and performance is somewhat sacrificed in
favor of readability. But they are quite handy to store small to
medium simple databases.
The GNU recutils suite comprises:
@itemize @minus
@item This Texinfo manual, describing the Rec format and the accompanying software.
@item A C library (librec) that provides a rich set of functions to manipulate rec data.
@item A set utilities that can be used in shell scripts and in the command line to operate on rec files.
@item An emacs mode, @code{rec-mode}.
@end itemize
@node A Little Example
@section A Little Example
@cindex books
Everyone loves to grow a nice book collection at home. Unfortunately,
in most cases the management of our private books gets uncontrolled:
some books get lost, some of them may be loaned to some friend, there
are some duplicated (or even triplicated!) titles because we forgot
about the existence of the previous copy, and many more details.
In order to improve the management of our little book collection we
could make use of a complex data storage system such as a relational
database. The problem with that approach, as explained in the
previous section, is that the tool is too complicated for the simple
task: we do not need the full power of a relational database system to
maintain a simple collection of books.
With GNU recutils it is possible to maintain such a little database in
a text file. Let's call it @file{books.rec}. The following table
resumes the information items that we want to store for each title,
along with some common-sense restrictions.
@itemize @minus
@item
Every book has a title, even if it is ``No Title''.
@item
A book can have several titles.
@item
A book can have more than one author.
@item
For some books the author is not known.
@item
Sometimes we don't care about who the author of a book is.
@item
We usually store our books at home.
@item
Sometimes we loan books to friends.
@item
On occasions we lose track of the physical location of a book. Did
we loan it to anyone? Was it lost in the last move? Is it in some
hidden place at home?
@end itemize
@noindent
The contents of the rec file follows:
@example
# -*- mode: rec -*-
%rec: Book
%mandatory: Title
%type: Location enum loaned home unknown
%doc:
+ A book in my personal collection.
Title: GNU Emacs Manual
Author: Richard M. Stallman
Publisher: FSF
Location: home
Title: The Colour of Magic
Author: Terry Pratchett
Location: loaned
Title: Mio Cid
Author: Anonymous
Location: home
Title: chapters.gnu.org administration guide
Author: Nacho Gonzalez
Author: Jose E. Marchesi
Location: unknown
Title: Yeelong User Manual
Location: home
# End of books.rec
@end example
Simple. The file contains a set of records separated by blank lines.
Each record comprises a set of fields with a name and a value.
The GNU recutils can then be used to access the contents of the file.
For example, we could get a list of the names of loaned books by invoking
@command{recsel} in the following way:
@example
$ recsel -e "Location = 'loaned'" -P Title books.rec
The Colour of Magic
@end example
@node The Rec Format
@chapter The Rec Format
A recfile is nothing but a text file which conforms to a few simple
rules. This chapter shows you how, by observing these rules, recfiles
of arbitrary complexity can be written.
@menu
* Fields:: The key--value pairs which comprise the data.
* Records:: The main entities of a recfile.
* Comments:: Information for humans' benefit only.
* Record Descriptors:: Describing different types of records.
@end menu
@node Fields
@section Fields
@cindex field
A @dfn{field} is the written form of an association between a label
and a value. For example, if we wanted to associate the label
@code{Name} with the value @code{Ada Lovelace} we would write:
@example
Name: Ada Lovelace
@end example
The separator between the field name and the field value is a colon
followed by a blank character (space and tabs, but not newlines). The
name of the field shall begin in the first column of the line.
@cindex field name
A @dfn{field name} is a sequence of alphanumeric characters plus
underscores (@code{_}), starting with a letter or the character
@code{%}. The regular expression denoting a field name is:
@example
[a-zA-Z%][a-zA-Z0-9_]*
@end example
@cindex case, in field names
Field names are case-sensitive. @code{Foo} and @code{foo} are
different field names.
The following list contains valid field names (the final colon is not
part of the names):
@example
Foo:
foo:
A23:
ab1:
A_Field:
@end example
@cindex field values
The @dfn{value of a field} is a sequence of characters terminated by a
single newline character (@code{\n}).
@cindex multiline field values
Sometimes a value is too long to fit in the usual width of terminals
and screens. In that case, depending on the specific tool used to
access the file, the readability of the data would not be that good.
It is therefore possible to physically split a logical line by
escaping a newline with a backslash character, as in:
@example
LongLine: This is a quite long value \
comprising a single unique logical line \
split in several physical lines.
@end example
The sequence @code{\n} (newline) @code{+} (PLUS) and an optional
@code{_} (SPACE) is interpreted as a newline when found in a field
value. For example, the C string @code{"bar1\nbar2\n bar3"} would be
encoded in the following way in a field value:
@example
Foo: bar1
+ bar2
+ bar3
@end example
@node Records
@section Records
@cindex record
A @dfn{record} is a group of one or more fields written one after the
other:
@example
Name1: Value1
Name2: Value2
Name2: Value3
@end example
It is possible for several fields in a record to share the same name
or/and the field value. The following is a valid record containing
three fields:
@example
Name: John Smith
Email: john.smith@@foomail.com
Email: john@@smith.name
@end example
@cindex record size
@cindex size, record size
The @dfn{size of a record} is defined as the number of fields that it
contains. A record cannot be empty, so the minimum size
for a record is 1. The maximum number of fields for a record is only
limited by the available physical resources. The size of the previous
record is 3.
Records are separated by one or more blank lines. For instance, the
following example shows a file named @file{personalities.rec}
featuring three records:
@example
Name: Ada Lovelace
Age: 36
Name: Peter the Great
Age: 53
Name: Matusalem
Age: 969
@end example
@node Comments
@section Comments
@cindex comments
Any line having an @code{#} (ASCII 0x23) character in the first column
is a comment line.
Comments may be used to insert information that
is not part of the database but useful in other ways.
They are completely ignored by processing tools and can only be seen by
looking at the recfile itself.
It is also quite convenient to comment-out information from the
recfile without having to remove it in a definitive way: you may want
to recover the data into the database later! Comment lines can be
used to comment-out both full registers and single fields:
@example
Name: Jose E. Marchesi
# Occupation: Software Engineer
# Severe lack of brain capacity
# Fired on 02/01/2009 (without compensation)
Occupation: Unoccupied
@end example
Comments are also useful for headers, footers, comment blocks and all
kind of markers:
@example
# -*- mode: rec -*-
#
# TODO
#
# This file contains the Bugs database of GNU recutils.
#
# Blah blah@dots{}
@dots{}
# End of TODO
@end example
Unlike some file formats, comments in recfiles must be complete lines.
You cannot start a comment in the middle of a line.
For example, in the following record, the @code{#} does @emph{not} start a comment:
@example
Name: Peter the Great # Russian Tsar
Age: 53
@end example
@node Record Descriptors
@section Record Descriptors
@cindex descriptor
Certain properties of a set of records can be specified by preceding
them with a @dfn{record descriptor}. A record descriptor is itself a
record, and uses fields with some predefined names to store
properties.
@menu
* Record Sets:: Defining different types of records.
* Naming Record Types:: Some conventions on naming record sets.
* Documenting Records:: Documenting your record sets.
* Record Sets Properties:: Introducing the special fields.
@end menu
@node Record Sets
@subsection Record Sets
@cindex record sets
The most basic property that can be specified for a set of records is
their @dfn{type}. The special field name @code{%rec} is used for that
purpose:
@cindex @code{%rec}
@example
%rec: Entry
Id: 1
Name: Entry 1
Id: 2
Name: Entry 2
@end example
The records following the descriptors are then identified as having
its type. So in the example above we would say there are two records
of type ``Entry''. Or in a more colloquial way we would say there are
two ``Entries'' in the database.
The effect of a record descriptor ends when another descriptor is
found in the stream of records. This allows you to store different kinds
of records in the same database. For example, suppose you are
maintaining a depot. You will need to keep track of both what items
are available and when they are sold or restocked.
The following example shows the usage of two record descriptors to
store both kind of records: articles and stock.
@example
%rec: Article
Id: 1
Title: Article 1
Id: 2
Title: Article 2
%rec: Stock
Id: 1
Type: sell
Date: 20 April 2011
Id: 2
Type: stock
Date: 21 April 2011
@end example
The collection of records having same types in recfiles are known as
@dfn{record sets} in recutils jargon. In the example above two
record sets are defined: one containing articles and the other
containing stock movements.
Nothing prevents having empty record sets in databases. This is in fact
usually the case when a new recfile is written but no data exists yet.
In our depot example we could write a first version of the database
containing just the record descriptors:
@example
%rec: Article
%rec: Stock
@end example
@cindex default record types
Special records are not required, and many recfiles do not have them.
This is because
all the records contained in the file are of the same type, and their
nature can usually be inferred from both the file name and their
contents. For example, @file{contacts.rec} could simply contain
records representing contacts without an explicit @code{%rec: Contact}
record descriptor. In this case we say that the type of the anonymous
records stored in the file is the @dfn{default record type}.
Another possible situation, although not usual, is to have a recfile
containing both non-typed (default) and typed record types:
@example
Id: 1
Title: Blah
Id: 2
Title: Bleh
%rec: Movement
Date: 13-Aug-2012
Concept: 20
Date: 24-Sept-2012
Concept: 12
@end example
@noindent
In this case the records preceding the movements are of the
``default'' type, whereas the records following the record descriptor
are of type @code{Movement}. Even though it is supported by the format
and the utilities, it is generally not recommended to mix non-typed
and typed records in a recfile.
@node Naming Record Types
@subsection Naming Record Types
It is up to you how to name your record sets. Any string comprising
only alphanumeric characters or underscores, and that starts with a
letter will be a legal name. However, it is recommended to use the
singular form of a noun in order to describe the ``type'' of the
records in the records set. Examples are @code{Article},
@code{Contributor}, @code{Employee} and @code{Movement}.
The used noun should be specific enough in order to characterize the
property of the records which matters. For example, in a
contributor's database it would be better to have a record set named
@code{Contributor} than @code{Person}.
The reason of using singular nouns instead of their plural forms is
that it works better with the utilities: it is more natural to read
@command{recsel -t Contributor} (@command{-t} is for ``type'') than
@command{recsel -t Contributors}.
@node Documenting Records
@subsection Documenting Records
@cindex @code{%doc}
@cindex documentation fields
@cindex description of record sets
As well as a name, it is a good idea to provide a description of the record set.
This is sometimes called the record set's @dfn{documentation} and is specified
using the @code{%doc} field.
Whereas the name is usually short and can contain only alphanumeric
characters and underscores, no such restriction applies to the
documentation. The documentation is typically more verbose than the
name provided by the @code{%rec} field and may contain arbitrary
characters such as punctuation and parentheses. It is somewhat
similar to a comment (@pxref{Comments}), but it can be managed more easily
in a programmatic way. Unlike a comment, the @code{%doc} field is
recognized by tools such as @command{recinf} (@pxref{Invoking recinf})
which processes record descriptors. For example, you might have two
record sets with @code{%rec} and @code{%doc} fields as follows:
@example
%rec: Contact
%doc: Family, friends and acquaintances (other than business).
Name: Granny
Phone: +12 23456677
Name: Edwina
Phone: +55 0923 8765
%rec: Associate
%doc: Colleagues and other business contacts
Name: Karl Schmidt
Phone: +49 88234566
Name: Genevieve Curie
Phone: +33 34 87 65
@end example
@node Record Sets Properties
@subsection Record Sets Properties
@cindex field, special fields
@cindex special fields
Besides determining the type of record that follows in the
stream, record descriptors can be used to describe other properties of
those records. This can be done by using @dfn{special
fields}, which have special names from a predefined set.
Consider for example the following database, where record descriptors
are used to specify a (optional) numeric `Id' and a mandatory `Title' field:
@cindex @code{%mandatory}
@cindex mandatory fields
@example
%rec: Item
%type: Id int
%mandatory: Title
Id: 10
Title: Notebook (big)
Id: 11
Title: Fountain Pen
@end example
Note that the names of special fields always start with the character
@code{%}. Also note that it is also possible to use non-special
fields in a record descriptor, but such fields will have no effect on
the described record set.
Every record set must contain one, and only one, field named
@code{%rec}. It is not mandated that that field must occupy the first
position in the record. However, it is considered a good style to
place it as the first field in the record set, in order for the casual
reader to easily identify the type of the records.
The following list briefly describes the special fields defined in the
recutils format, along with references to the sections of this manual
describing their usage in depth.
@cindex special fields, list of
@table @code
@item %rec
Naming record types. Also, they allow using external and remote
descriptors. @xref{Remote Descriptors}.
@item %mandatory, %allowed and %prohibit
Requiring or forbidding specific fields. @xref{Mandatory Fields}.
@xref{Prohibited Fields}. @xref{Allowed Fields}.
@item %unique and %key
Working with keys. @xref{Keys and Unique Fields}.
@item %doc
Documenting your database. @xref{Documenting Records}.
@item %typedef and %type
Field types. @xref{Field Types}.
@item %auto
Auto-counters and time-stamps. @xref{Auto-Generated Fields}.
@item %sort
Keeping your record sets sorted. @xref{Sorted Output}.
@item %size
Restricting the size of your database. @xref{Size Constraints}.
@item %constraint
Enforcing arbitrary constraints. @xref{Arbitrary Constraints}.
@item %confidential
Storing confidential information. @xref{Encryption}.
@end table
@node Querying Recfiles
@chapter Querying Recfiles
Since recfiles are always human readable, you could lookup data simply
by opening an editor and searching for the desired information. Or
you could use a standard tool such as @command{grep} to extract
strings matching a pattern. However, recutils provides a more powerful
and flexible way to lookup data. The following sections explore how
the recutils can be used in order to extract data from recfiles, from
very basic and simple queries to quite complex examples.
@menu
* Simple Selections:: Introducing @command{recsel}.
* Selecting by Type:: Get the records of some given type.
* Selecting by Position:: Get the record occupying some position.
* Random Records:: Get a set of random records.
* Selection Expressions:: Get the records satisfying some expression.
* Field Expressions:: Selecting a subset of fields.
* Sorted Output:: Get the records in a given order.
@end menu
@node Simple Selections
@section Simple Selections
@command{recsel} is an utility whose primary purpose is to select
records from a recfile and print them on standard output.
Consider the following example record set, which we shall assume is
saved in a recfile called @file{acquaintances.rec}:
@example
# This database contains a list of both real and fictional people
# along with their age.
Name: Ada Lovelace
Age: 36
Name: Peter the Great
Age: 53
# Name: Matusalem
# Age: 969
Name: Bart Simpson
Age: 10
Name: Adrian Mole
Age: 13.75
@end example
@noindent
If we invoke @command{recsel acquaintances.rec} we will get a list of
all the records stored in the file in the terminal:
@example
$ recsel acquaintances.rec
Name: Ada Lovelace
Age: 36
Name: Peter the Great
Age: 53
Name: Bart Simpson
Age: 10
Name: Adrian Mole
Age: 13.75
@end example
@noindent
Note that the commented out parts of the file, in this case the
explanatory header and the record corresponding to Matusalem, are not
part of the output produced by @command{recsel}. This is because
@command{recsel} is concerned only with the data.
@command{recsel} will also ``pack'' the records so any extra empty
lines that may be between records are not echoed in the output:
@multitable @columnfractions .5 .5
@item
@example
@strong{acquaintances.rec:}
Name: Peter the Great
Age: 53
# Note the extra empty lines.
Name: Bart Simpson
Age: 10
@end example
@tab
@example
$ recsel acquaintances.rec
Name: Peter the Great
Age: 53
Name: Bart Simpson
Age: 10
@end example
@end multitable
@noindent
It is common to store data gathered in several recfiles.
For example, we could have a @file{contacts.rec} file containing
general contact records, and also a @file{work-contacts.rec} file
containing business contacts:
@multitable @columnfractions .5 .5
@item
@example
@strong{contacts.rec:}
Name: Granny
Phone: +12 23456677
Name: Doctor
Phone: +12 58999222
@end example
@tab
@example
@strong{work-contacts.rec:}
Name: Yoyodyne Corp.
Email: sales@@yoyod.com
Phone: +98 43434433
Name: Robert Harris
Email: robert.harris@@yoyod.com
Note: Sales Department.
@end example
@end multitable
Both files can be passed to @command{recsel} in the command line. In
that case @command{recsel} will simply process them and output their
records in the same order they were specified:
@example
$ recsel contacts.rec work-contacts.rec
Name: Granny
Phone: +12 23456677
Name: Doctor
Phone: +12 58999222
Name: Yoyodyne Corp.
Email: sales@@yoyod.com
Phone: +98 43434433
Name: Robert Harris
Email: robert.harris@@yoyod.com
Note: Sales Department.
@end example
@noindent
As mentioned above, the output follows the ordering on the command
line, so @command{recsel work-contacts.rec
contacts.rec} would output the records of @file{work-contacts.rec} first
and then the ones from @file{contacts.rec}.
@noindent
Note however that @command{recsel} will merge records from several
files specified in the command line only if they are anonyomuse.
If the contacts in our files were typed:
@multitable @columnfractions .5 .5
@item
@example
@strong{contacts.rec:}
%rec: Contact
Name: Granny
Phone: +12 23456677
Name: Doctor
Phone: +12 58999222
@end example
@tab
@example
@strong{work-contacts.rec:}
%rec: Contact
Name: Yoyodyne Corp.
Email: sales@@yoyod.com
Phone: +98 43434433
Name: Robert Harris
Email: robert.harris@@yoyod.com
Note: Sales Department.
@end example
@end multitable
@noindent
Then we would get the following error message:
@example
$ recsel contacts.rec work-contacts.rec
recsel: error: duplicated record set 'Contact' from work-contacts.rec.
@end example
@node Selecting by Type
@section Selecting by Type
As we saw in the section discussing record descriptors, it is possible
to have several different types of records in a single recfile.
Consider for example a @file{gnu.rec} file containing information
about maintainers and packages in the GNU Project:
@example
%rec: Maintainer
Name: Jose E. Marchesi
Email: jemarch@@gnu.org
Name: Luca Saiu
Email: positron@@gnu.org
%rec: Package
Name: GNU recutils
LastRelease: 12 February 2014
Name: GNU epsilon
LastRelease: 10 March 2013
@end example
@noindent If @command{recsel} is invoked in that file it will complain:
@example
$ recsel gnu.rec
recsel: error: several record types found. Please use -t to specify one.
@end example
@noindent
This is because @command{recsel} does not know which records to
output: the maintainers or the packages. This can be resolved by
using the @code{-t} command line option:
@example
$ recsel -t Package gnu.rec
Name: GNU recutils
LastRelease: 12 February 2014
Name: GNU epsilon
LastRelease: 10 March 2013
@end example
@noindent
By default @command{recsel} never outputs record descriptors. This is
because most of the time the user is only interested in the data.
However, with the @code{-d} command line option, the record descriptor
of the selected type is printed preceding the data records:
@example
$ recsel -d -t Maintainer gnu.rec
%rec: Maintainer
Name: Jose E. Marchesi
Email: jemarch@@gnu.org
Name: Luca Saiu
Email: positron@@gnu.org
@end example
@noindent
Note that at the moment it is not possible to select non-typed
(default) records when other record sets are stored in the same file.
This is one of the reasons why mixing non-typed records and typed
records in a single recfile is not recommended.
@noindent
Note also that if a nonexistent record type is specified in @code{-t}
then @command{recsel} does nothing.
@node Selecting by Position
@section Selecting by Position
As was explained in the previous sections, @command{recsel} outputs
all the records of some record set. The records are echoed in the
same order they are written in the recfile. However, often it is
desirable to select a subset of the records, determined by the position
they occupy in their record set.
The @code{-n} command line option to @command{recsel} supports doing
this in a natural way. This is how we would retrieve the first
contact listed in a contacts database using @command{recsel}:
@example
$ recsel -n 0 contacts.rec
Name: Granny
Phone: +12 23456677
@end example
@noindent
Note that the index is zero-based. If we want to retrieve more
records we can specify several indexes to @code{-n} separated by
commas. If a given index is too big, it is simply ignored:
@example
$ recsel -n 0,1,999 contacts.rec
Name: Granny
Phone: +12 23456677
Name: Doctor
Phone: +12 58999222
@end example
@noindent With @code{-n}, the order in which the records are echoed does not
depend on the order of the indexes passed to @code{-n}.
For example, the output of @command{recsel -n 0,1} will be
identical to the output of @command{recsel -n 1,0}.
Ranges of indexes can also be used to select a subset of the records.
For example, the following call would also select the first three
contacts of the database:
@example
$ recsel -n 0-2 contacts.rec
Name: Granny
Phone: +12 23456677
Name: Doctor
Phone: +12 58999222
Name: Dad
Phone: +12 88229900
@end example
@noindent It is possible to mix single indexes and index
ranges in the same call. For example, @command{recsel -n 0,5-6} would
select the first, sixth and seventh records.
@node Random Records
@section Random Records
Consider a database in which each record is a cooking recipe. It is
always difficult to decide what to cook each day, so it would be nice
if we could ask @command{recsel} to pick up a random recipe. This can
be achieved using the @code{-m} (@code{--random}) command line option
of @command{recsel}:
@example
$ recsel -m 1 recipes.rec
Title: Curry chicken
Ingredient: A whole chicken
Ingredient: Curry
Preparation: ...
@end example
@noindent If we need two recipes, because we will be cooking at
both lunch and dinner, we can pass a different number to @code{-m}:
@example
$ recsel -m 2 recipes.rec
Title: Fabada Asturiana
Ingredient: 300 gr of fabes.
Ingredient: Chorizo
Ingredient: Morcilla
Preparation: ...
Title: Pasta with ragu
Ingredient: 500 gr of spaghetti.
Ingredient: 2 tomatoes.
Ingredient: Minced meat.
Preparation: ...
@end example
@noindent
The algorithm used to implement @code{-m} guarantees that
you will never get multiple instances of the same record. This means
that if a record set has @var{n} records and you ask for @var{n}
random records, you will get all the records in a random order.
@node Selection Expressions
@section Selection Expressions
@cindex selection expressions
@dfn{Selection expressions}, also known as ``sexes'' in recutils
jargon, are infix expressions that can be applied to a record.
A ``sex'' is a predicate which selects a subset of records within a recfile.
They can be simple expressions involving just one operator and a pair of
operands, or complex compound expressions with parenthetical sub-expressions
and many operators and operands.
One of their most common uses is to examine records matching a particular
set of conditions.
@menu
* Selecting by predicate:: Selecting records which satisfy conditions.
* SEX Operands:: Literal values, fields and sub-expressions.
* SEX Operators:: Arithmetic, logical and other operators.
* SEX Evaluation:: Selection expressions are like generators.
@end menu
@node Selecting by predicate
@subsection Selecting by predicate
@cindex selecting records
@cindex looking up data
@cindex retrieving data
Consider the example recfile @file{acquaintances.rec} introduced earlier.
It contains names of people along with their respective ages.
Suppose we want to get a list of the names of all the children.
It would not be easy to do this using @command{grep}.
Neither would it, for any reasonably large recfile, be feasible to search
manually for the children.
Fortunately the @command{recsel} command provides an easy way to do
such a lookup:
@cindex @command{recsel}
@example
$ recsel -e "Age < 18" -P Name acquaintances.rec
Bart Simpson
Adrian Mole
@end example
@noindent Let us look at each of the arguments to @command{recsel} in turn.
Firstly we have @code{-e} which tells @command{recsel} to lookup records
matching the expression @code{Age < 18} --- in other words all those people
whose ages are less than 18.
@cindex selection expressions
This is an example of a @dfn{selection expression}.
In this case it is a simple test, but it can be as complex as needed.
Next, there is @code{-P} which tells @command{recsel} to print out the value of
the @code{Name} field --- because we want just the name, not the entire record.
The final argument is the name of the file from whence the records are
to come: @file{acquaintances.rec}.
Rather than explicitly storing ages in the recfile, a more realistic example
might have the date of birth instead
(otherwise it would be necessary to update the people's ages in the
recfile on every birthday).
@example
# Date of Birth
%type: Dob date
Name: Alfred Nebel
Dob: 20 April 2010
Email: alf@@example.com
Name: Bertram Worcester
Dob: 3 January 1966
Email: bert@@example.com
Name: Charles Spencer
Dob: 4 July 1997
Email: charlie@@example.com
Name: Dirk Hogart
Dob: 29 June 1945
Email: dirk@@example.com
Name: Ernest Wright
Dob: 26 April 1978
Email: ernie@@example.com
@end example
@noindent Now we can achieve a similar result as before, by looking up
the names of all those people who were born after a particular date:
@example
$ recfix acquaintances.rec
$ recsel -e "Dob >> '31 July 1994'" -p Name acquaintances.rec
Name: Alfred Nebel
Name: Charles Spencer
@end example
@cindex date comparison
@noindent The @code{>>} operator means ``later than'', and is used
here to select a date of birth after 31st July 1994.
Note also that this example uses a lower case @code{-p} whereas the preceding example
used the upper case @code{-P}. The difference is that @code{-p} prints the field name
and field value, whereas @code{-P} prints just the value.
@command{recsel} accepts more than one @code{-e} argument,
each introducing a selection expression,
in which case the records which satisfy all expressions are selected.
You can provide more than one field label to @code{-P} or @code{-p} in order to select
additional fields to be displayed.
For example, if you wanted to send an email to all children 14 to 18
years of age,
and today's date were @w{1st August} 2012, then you could use the following command to get
the name and email address of all such children:
@example
$ recfix acquaintances.rec
$ recsel -e "Dob >> '31 July 1994' && Dob << '01 August 1998'" \
-p Name,Email acquaintances.rec
Name: Charles Spencer
Email: charlie@@example.com
@end example
@noindent As you can see, there is only one such child in our record set.
@cindex quotation marks
Note that the example command shown above contains both double quotes @code{"} and
single quotes @code{'}.
@cindex date comparison
The double quotes are interpreted by the shell (@eg{} @command{bash}) and
the single quotes are interpreted by @command{recsel}, defining a
string. (And the backslash is interpreted by the shell, the usual
continuation character so that this manual doesn't have a too-long line.)
@node SEX Operands
@subsection SEX Operands
@cindex operands, SEX operands
The supported operands are: numbers, strings, field names and
parenthesized expressions.
@subsubsection Numeric Literals
@cindex literals, numeric literals
The supported numeric literals are integer numbers and real numbers.
The usual sign character @samp{-} is used to denote negative values.
Integer values can be denoted in base 10, base 16 using the @code{0x}
prefix, and base 8 using the @code{0} prefix. Examples are:
@example
10000
0
0xFF
-0xa
012
-07
-1342
.12
-3.14
@end example
@subsubsection String Literals
@cindex literals, string literals
String values are delimited by either the @code{'} character or the
@code{"} character. Whichever delimiter is used, the delimiter closing
the literal must be the same as the delimiter used to open it.
Newlines and tabs can be part of a string literal.
Examples are:
@example
'Hello.'
'The following example is the empty string.'
''
@end example
@cindex quotation marks
The @code{'} and @code{"} characters can be part of a string if they
are escaped with a backslash, as in:
@example
'This string contains an apostrophe: \'.'
"This one a double quote: \"."
@end example
@subsubsection Field Values
@cindex field values, in selection expressions
The value of a field value can be included in a selection expression
by writing its name. The field name is replaced by a string
containing the field value, to handle the possibility of records with
more than one field by that name. Examples:
@example
Name
Email
long_field_name
@end example
It is possible to use the role part of a field if it is not empty.
So, for example, if we are searching for the issues opened by
@samp{John Smith} in a database of issues we could write:
@example
$ recsel -e "OpenedBy = 'John Smith'"
@end example
@noindent
instead of using a full field name:
@example
$ recsel -e "Hacker:Name:OpenedBy = 'John Smith'"
@end example
When the name of a field appears in an expression, the expression is
applied to all the fields in the record featuring that name. So, for
example, the expression:
@example
Email ~ "\\.org"
@end example
@noindent
matches any record in which there is a field named @samp{Email}
whose value terminates in (the literal string) @samp{.org}.
If we are interested in the value of some specific email, we can specify
its relative position in the containing record by using @dfn{subscripts}.
@cindex subscripts, in selection expressions
Consider, for example:
@example
Email[0] ~ "\\.org"
@end example
@noindent
Will match for:
@example
Name: Mr. Foo
Email: foo@@foo.org
Email: mr.foo@@foo.com
@end example
@noindent
But not for:
@example
Name: Mr. Foo
Email: mr.foo@@foo.com
Email: foo@@foo.org
@end example
The regexp syntax supported in selection expressions is POSIX
EREs, with several GNU extensions. @xref{Regular Expressions}.
@subsubsection Parenthesized Expressions
@cindex parentheses, in selection expressions.
Parenthesis characters (@code{(} and @code{)}) can be used to group
sub expressions in the usual way.
@node SEX Operators
@subsection Operators
@cindex operators, in selection expressions
The supported operators are arithmetic operators (addition,
subtraction, multiplication, division and modulus), logical operators,
string operators and field operators.
@subsubsection Arithmetic Operators
@cindex arithmetic operators
@cindex operators, arithmetic operators
Arithmetic operators for addition (@code{+}), subtraction (@code{-}),
multiplication (@code{*}), integer division (@code{/}) and modulus
(@code{%}) are supported with their usual meanings.
These operators require either numeric operands or string operands
whose value can be interpreted as numbers (integer or real).
@subsubsection Boolean Operators
@cindex boolean operators
@cindex operators, boolean operators
The boolean operators @strong{and} (@code{&&}), @strong{or}
(@code{||}) and @strong{not} (@code{!})@: are supported with the same
semantics as their C counterparts.
A compound boolean operator @code{=>} is also supported in order to
ease the elaboration of constraints in records: @code{A => B}, which
can be read as ``A implies B'', translates into @code{!A || (A && B)}.
The boolean operators expect integer operands, and will try to convert
any string operand to an integer value.
@subsubsection Comparison Operators
@cindex operators, comparison operators
@cindex comparison
The compare operators @strong{less than} (@code{<}), @strong{greater
than} (@code{>}), @strong{less than or equal} (@code{<=}),
@strong{greater than or equal} (@code{>=}), @strong{equal} (@code{=})
and @strong{unequal} (@code{!=}) are supported with their usual
meaning.
Strings can be compared with the equality operator (@code{=}).
The match operator (@code{~}) can be used to match a string with a
given regular expression (@pxref{Regular Expressions}).
@subsubsection Date Comparison Operators
@cindex date comparison
The compare operators @strong{before} (@code{<<}), @strong{after}
(@code{>>}) and @strong{same time} (@code{==}) can be used with fields
and strings containing parseable dates.
@xref{Date input formats}.
@subsubsection Field Operators
@cindex field operators
@cindex counting occurrences of a field
Field counters are replaced by the number of occurrences of a field
with the given name in the record. For example:
@example
#Email
@end example
The previous expression is replaced with the number of fields named
@code{Email} in the record. It can be zero if the record does not
have a field with that name.
@subsubsection String Operators
@cindex string operators
@cindex operators, string operators
The string concatenation operator (@code{&}) can be used to
concatenate any number of strings and field values.
@example
'foo' & Name & 'bar'
@end example
@subsubsection Conditional Operator
@cindex conditional operator
@cindex operators, conditional operator
The ternary conditional operator can be used to select alternatives
based on the value of some expression:
@example
expr1 ? expr2 : expr3
@end example
If @code{expr1} evaluates to true (@ie{} it is an integer or the string
representation of an integer and its value is not zero) then the
operator yields @code{expr2}. Otherwise it yields @code{expr3}.
@node SEX Evaluation
@subsection Evaluation of Selection Expressions
@cindex evaluation, of selection expressions
Given that:
@itemize @minus
@item It is possible to refer to fields by name in selection expressions.
@item Records can have several fields with the same name.
@end itemize
@noindent
It is clear that some backtracking mechanism is needed in the
evaluation of the selection expressions. For example, consider the
following expression that is deciding whether a ``registration'' in a
webpage should be rejected:
@example
((Email ~ "foomail\.com") || (Age <= 18)) && !#Fixed
@end example
The previous expression will be evaluated for every possible
permutation of the fields ``Email'', ``Age'' and ``Fixed'' present in
the record, until one of the combinations succeeds. At that point the
computation is interrupted.
When used to decide whether a record matches some criteria, the goal
of a selection expression is to act as a boolean expression. In that
case the final value of the expression depends on both the type and
the value of the result launched by the top-most subexpression:
@itemize @minus
@item If the result is an @b{integer}, the expression is true if its
value is not zero.
@item If the result is a @b{real}, or a @b{string}, the expression
evaluates to false.
@end itemize
Sometimes a selection expression is used to compute a result instead
of a boolean. In that case the returned value is converted to a
string. This is used when replacing the slots in templates
(@pxref{Templates}).
@node Field Expressions
@section Field Expressions
@cindex field expressions
@cindex FEX
@dfn{Field expressions} (also known as ``fexes'') are a way to select
fields of a record. They also allow you to do certain transformations
on the selected fields, such as changing their names.
A FEX comprises a sequence of @dfn{elements} separated by commas:
@example
ELEM_1,ELEM_2,@dots{},ELEM_N
@end example
Each element makes a reference to one or more fields in a record
identified by a given name and an optional subscript:
@example
@var{Field_Name}[@var{min}-@var{max}]
@end example
@noindent
@var{min} and @var{max} are zero-based indexes. It is possible to
refer to a field occupying a given position. For example, consider
the following record:
@example
Name: Mr. Foo
Email: foo@@foo.com
Email: foo@@foo.org
Email: mr.foo@@foo.org
@end example
@noindent
We would select all the emails of the record with:
@example
Email
@end example
@noindent
The first email with:
@example
Email[0]
@end example
@noindent
The third email with:
@example
Email[2]
@end example
@noindent
The second and the third email with:
@example
Email[1-2]
@end example
And so on. It is possible to select the same field (or
range of fields) more than once just by repeating them in a field
expression. Thus, the field expression:
@example
Email[0],Name,Email
@end example
@noindent
will print the first email, the name, and then all the email fields
including the first one.
@cindex aliasing, field name aliasing
It is possible to include a @dfn{rewrite rule} in an element of a
field expression, which specifies an alias for the selected fields:
@example
@var{Field_Name}[@var{min}-@var{max}]:@var{Alias}
@end example
@noindent
For example, the following field expression specifies an alias for the
fields named @code{Email} in a record:
@example
Name,Email:ElectronicMail
@end example
Since the rewrite rules only affect the fields selected in a single
element of the field expression, it is possible to define different
aliases to several fields having the same name but occupying different
positions:
@example
Name,Email[0]:PrimaryEmail,Email[1]:SecondaryEmail
@end example
@noindent
When that field expression is applied to the following record:
@example
Name: Mr. Foo
Email: primary@@email.com
Email: secondary@@email.com
Email: other@@email.com
@end example
@noindent
the result will be:
@example
Name: Mr. Foo
PrimaryEmail: primary@@email.com
SecondaryEmail: secondary@@email.com
Email: other@@email.com
@end example
It is possible to use the dot notation in order to refer to field and
sub-fields. This is mainly used in the context of joins, where new
fields are created having compound names such as @code{Foo_Bar}. A
reference to such a field can be done in the fex using dot notation
as follows:
@example
Foo.Bar
@end example
@node Sorted Output
@section Sorted Output
@cindex @code{%sort}
@cindex sorting
This special field sets sorting criteria for the records
contained in a record set. Its usage is:
@example
%sort: @var{field1} @var{field2} ...
@end example
@noindent
Meaning that the desired order for the records will be determined by
the contents of the fields named in the @code{%sort} value. The
sorting is always done in ascending order, and there may be records
that lack the involved fields, @ie{} the sorting
fields need not be mandatory.
It is an error to have more than one @code{%sort} field in the same
record descriptor, as only one field list can be used as sorting
criteria.
Consider for example that we want to keep the records in our inventory
system ordered by entry date. We could achieve that by using the
following record descriptor in the database:
@example
%rec: Item
%type: Date date
%sort: Date
Id: 1
Title: Staplers
Date: 10 February 2011
Id: 2
Title: Ruler Pack 20
Date: 2 March 2009
@dots{}
@end example
@noindent
As you can see in the example above, the fact we use @code{%sort} in a
database does not mean that the database will be always physically
ordered. Unsorted record sets are not a data integrity
problem, and thus the diagnosis tools must not declare a recfile as
+invalid because of this. The utility @command{recfix} provides a way
+to physically order the fields in the file (@pxref{Invoking recfix}).
On the other hand any program listing, presenting or processing data
extracted from the recfile must honor the @code{%sort} entry. For
example, when using the following @command{recsel} program in the
database above we would get the output sorted by date:
@example
$ recsel inventory.rec
Id: 2
Title: Ruler Pack 20
Date: 2 March 2009
Id: 1
Title: Staplers
Date: 10 February 2011
@end example
@cindex order of fields
@noindent
The sorting of the selected field depends on its type:
@itemize @minus
@item Numeric fields (integers, ranges, reals) are numerically ordered.
@item Boolean fields are ordered considering that ``false'' values come first.
@item Dates are ordered chronologically.
@item Any other kind of field is ordered using a lexicographic order.
@end itemize
It is possible to specify several fields as the sorting criteria. In
that case the records are sorted using a lexicographic order. Consider
for example the following unsorted database containing marks for
several students:
@example
%rec: Marks
%type: Class enum A B C
%type: Score real
Name: Mr. One
Class: C
Score: 6.8
Name: Mr. Two
Class: A
Score: 6.8
Name: Mr. Three
Class: B
Score: 9.2
Name: Mr. Four
Class: A
Score: 2.1
Name: Mr. Five
Class: C
Score: 4
@end example
@noindent
If we wanted to sort it by @code{Class} and by @code{Score} we would
insert a @code{%sort} special field in the descriptor, having:
@example
%rec: Marks
%type: Class enum A B C
%type: Score real
%sort: Class Score
Name: Mr. Four
Class: A
Score: 2.1
Name: Mr. Two
Class: A
Score: 6.8
Name: Mr. Three
Class: B
Score: 9.2
Name: Mr. Five
Class: C
Score: 4
Name: Mr. One
Class: C
Score: 6.8
@end example
@noindent
The order of the fields in the @code{%sort} field is
significant. If we reverse the order in the example above then we get
a different sorted set:
@example
%rec: Marks
%type: Class enum A B C
%type: Score real
%sort: Score Class
Name: Mr. Four
Class: A
Score: 2.1
Name: Mr. Five
Class: C
Score: 4
Name: Mr. Two
Class: A
Score: 6.8
Name: Mr. One
Class: C
Score: 6.8
Name: Mr. Three
Class: B
Score: 9.2
@end example
@noindent
In this last case, @code{Mr. One} comes after @code{Mr. Two} because the
class @code{A}
comes before the class @code{B} even though the score is the same (@code{6.8}).
@node Editing Records
@chapter Editing Records
The simplest way of editing a recfile is to start your favourite
text editor and hack the contents of the file as desired. However,
the rec format is structured enough so recfiles can be updated
automatically by programs. This is useful for writing shell scripts
or when there are complex data integrity rules stored in the file that
we want to be sure to preserve.
The following sections discuss the usage of the recutils for altering
recfiles in the level of record: adding new records, deleting or
commenting them out, sorting them, @etc{}
@menu
* Inserting Records:: Inserting data into recfiles.
* Deleting Records:: Removing data.
* Sorting Records:: Physical reordering of records.
@end menu
@node Inserting Records
@section Inserting Records
Adding new records to a recfile is pretty trivial: open it with your
text editor and just write down the fields comprising the records.
This is really the best way to add contents to a recfile containing
simple data. However, complex databases may introduce some
difficulties:
@table @emph
@item Multi-line values.
It can be tedious to manually encode the several lines.
@item Data integrity.
It is difficult to manually maintain the integrity of data stored
in the data base.
@item Counters and timestamps.
Some record sets feature auto-generated fields, which are commonly
used to implement counters and time-stamps. @xref{Auto-Generated
Fields}.
@end table
Thus, to facilitate the insertion of new data a command line utility called
@command{recins} is included in the recutils. The usage of @command{recins} is
very simple, and can be used both in the command line or called from
another program. The following subsections discuss several aspects of
using this utility.
@menu
* Adding Records With recins:: Basics of the @command{recins} utility.
* Replacing Records With recins:: Substituting records in a file.
* Adding Anonymous Records:: Inserting or replacing records with no
type.
@end menu
@node Adding Records With recins
@subsection Adding Records With recins
Each invocation of @command{recins} adds one record to the targeted
database. The fields comprising the records are specified using pairs
of @code{-f} and @code{-v} command line arguments. For example, this
is how we would add the first entry to a previously empty contacts
database:
@example
$ recins -f Name -v "Mr Foo" -f Email -v foo@@bar.baz contacts.rec
$ cat contacts.rec
Name: Mr. Foo
Email: foo@@bar.baz
@end example
@noindent
If we invoke @command{recins} again on the same database we will be adding a
second record:
@example
$ recins -f Name -v "Mr Bar" -f Email -v bar@@gnu.org contacts.rec
$ cat contacts.rec
Name: Mr. Foo
Email: foo@@bar.baz
name: Mr. Bar
Email: bar@@gnu.org
@end example
There is no limit on the number of @code{-f} @code{-v} pairs that can
be specified to @command{recins}, other than any limit on command line arguments
which may be imposed by the shell.
The field values provided using @code{-v} are encoded to follow the
rec format conventions, including multi-line field values.
Consider the following example:
@example
$ recins -f Name -v "Mr. Foo" -f Address -v '
Foostrs. 19
Frankfurt am Oder
Germany' contacts.rec
$ cat contacts.rec
Name: Mr. Foo
Address:
+ Foostrs. 19
+ Frankfurt am Oder
+ Germany
@end example
It is also possible to provide fields already encoded as rec data for
their addition, using the @code{-r} command line argument. This
argument can be intermixed with @code{-f} @code{-v}.
@example
$ recins -f Name -v "Mr. Foo" -r "Email: foo@@bar.baz" contacts.rec
$ cat contacts.rec
Name: Mr. Foo
Email: foo@@bar.baz
@end example
If the string passed to @code{-r} is not valid rec data then
@command{recins} will complain with an error and the operation will be
aborted.
At this time, it is not possible to add new records
containing comments.
@node Replacing Records With recins
@subsection Replacing Records With recins
@command{recins} can also be used to replace existing records in a
database with a provided record. This is done by specifying some
criteria selecting the record (or records) to be replaced.
Consider for example the following command applied to our contacts
database:
@example
$ recins -e "Email = 'foo@@bar.baz'" -f Name -v "Mr. Foo" \
-f Email -v "new@@bar.baz" contacts.rec
@end example
@noindent
The contact featuring an email @code{foo@@bar.baz} gets replaced with
the following record:
@example
Name: Mr. Foo
Email: new@@bar.baz
@end example
The records to be replaced can also be specified by index, or a
range of indexes. For example, the following command replaces the
first, second and third records in a database with dummy records:
@example
$ recins -n 0,1-2 -f Dummy -v XXX foo.rec
$ cat foo.rec
Dummy: XXX
Dummy: XXX
Dummy: XXX
... Other records ...
@end example
@node Adding Anonymous Records
@subsection Adding Anonymous Records
In a previous chapter we noted that @command{recsel} interprets the
absence of a @command{-t} argument depending on the actual contents of
the file. If the recfile contains records of just one type the
command assumes that the user is referring to these records.
@command{recins} does not follow this convention, and the absence of
an explicit type always means to insert (or replace) an anonymous
record. Consider for example the following database:
@example
%rec: Marks
%type: Class enum A B C
Name: Alfred
Class: A
Name: Bertram
Class: B
@end example
@noindent
If we want to insert a new mark we have to specify the type explicitly
using @command{-t}:
@example
$ cat marks.rec | recins -t Marks -f Name -v Xavier -f Class -v C
%rec: Marks
%type: Class enum A B C
Name: Alfred
Class: A
Name: Bertram
Class: B
Name: Xavier
Class: C
@end example
@noindent
If we forget to specify the type then an anonymous record is created
instead:
@example
$ cat marks.rec | recins -f Name -v Xavier -f Class -v C
Name: Xavier
Class: C
%rec: Marks
%type: Class enum A B C
Name: Alfred
Class: A
Name: Bertram
Class: B
@end example
@node Deleting Records
@section Deleting Records
@cindex deleting records
Just as @code{recins} inserts records, the utility @code{recdel} deletes them.
Consider the following recfile @file{stock.rec}:
@example
%rec: Item
%type: Expiry date
%sort: Title
Title: First Aid Kit
Expiry: 2 May 2009
Title: Emergency Rations
Expiry: 10 August 2009
Title: Life raft
Expiry: 2 March 2009
@end example
Suppose we wanted to delete all items
with an @code{Expiry} value before a certain date, we could do this with the following command:
@example
$ recdel -t Item -e 'Expiry << "5/12/2009"' stock.rec
@end example
@noindent
After running this command, only one record will remain in the file
(@viz{} the one titled `Emergency Rations') because all the others have expiry dates
prior to 12 May 2009.
@footnote{`5/12/2009' means the 12th day of May 2009, @emph{not} the fifth day of December,
even if your @env{LC_TIME} environment variable has been set to suggest otherwise.}
The @command{-t} option can be omitted if, and only if, there is no @code{%rec} field
in the recfile.
@command{recdel} tries to warn you if you attempt to perform a delete operation
which it deems to be too pervasive. In such cases, it will refuse to run,
unless you give the @command{--force} flag.
However, you should not rely upon @command{recdel} to protect you, because it cannot
always correctly guess that you might be deleting more records than intended.
For this reason, it may be wise to use the @command{-c} flag, which causes
the relevant records to be commented out, rather than deleted. (And
of course backups are always wise.)
The complete options available to the @command{recdel} command are explained later.
@xref{Invoking recdel}.
@node Sorting Records
@section Sorting Records
@cindex sorting
@cindex sorting, physically
In the example above, note the existence of the @code{%sort: Title} line.
This field was discussed previously (@pxref{Sorted Output}) and, as mentioned, does not
imply that the records need to be stored in the recfile in any particular order.
However, if desired, you can automatically arrange the recfile in that order using
@command{recfix} with the @command{--sort} flag.
After running the command
@example
$ recfix --sort stock.rec
@end example
@noindent
the file @file{stock.rec} will have its records sorted in alphabetical order
of the @code{Title} fields, thus:
@example
%rec: Item
%type: Expiry date
%sort: Title
Title: Emergency Rations
Expiry: 10 August 2009
Title: First Aid Kit
Expiry: 2 May 2009
Title: Liferaft
Expiry: 2 March 2009
@end example
@node Editing Fields
@chapter Editing Fields
Fields of a recfile can, of course, be edited manually using an editor and this is often
the easiest way when only a few fields need to be changed or when the nature of the changes do
not follow any particular pattern.
If, however, a large number of similar changes to several records are
required,the @command{recset} command can make the job easier.
The formal description of @command{recset} is presented later
(@pxref{Invoking recset}). In this chapter some typical usage
examples are discussed. As with @command{recdel}, @command{recset} if
used erroneously has the potential to make very pervasive changes,
which could result in a large loss of data. It is prudent therefore
to take a copy of a recfile before running such commands.
@menu
* Adding Fields:: Adding new fields to records.
* Setting Fields:: Editing field values.
* Deleting Fields:: Removing or commenting-out fields.
* Renaming Fields:: Changing the name of a field.
@end menu
@node Adding Fields
@section Adding Fields
@cindex adding fields
As mentioned above, the command @command{recins} adds new records to a
recfile, but it cannot
add fields to an existing record.
This task can be achieved automatically using @command{recset} with its @command{-a} flag.
Suppose that (after a stock inspection) you wanted to add an `Inspected' field to all the items in
the recfile.
The following command could be used.
@example
$ recset -t Item -f Inspected -a 'Yes' stock.rec
@end example
@noindent
Here, because no record selection flag was provided, the command affected @emph{all} the
records of type `Item'.
We could limit the effect of the command using the @command{-e}, @command{-q},
@command{-n} or @command{-m} flags.
For example to add the `Inspected' field to only the first item the following command
would work:
@example
$ recset -t Item -n 0 -f Inspected -a 'Yes' stock.rec
@end example
@noindent
Similarly, a selection expression could have been used with the @command{-e} flag in order to
add the field only to records which satisfy the expression.
If you use @command{recset} with the @command{-a} flag on a field that already exists, a
new field (in addition to those already present) will be appended with the given value.
@node Setting Fields
@section Setting Fields
@cindex mutating field values
It is also possible to update the value of a field.
This is done using @command{recset} with its @command{-s} flag.
In the previous example, an `Inspected' flag was added to certain records,
with the value `yes'.
After reflection, one might want to record the date of inspection, rather than
a simple yes/no flag.
Records which have no such field will remain unchanged.
@example
$ recset -t Item -f Inspected -s '30 October 2006' stock.rec
@end example
Although the above command does not have any selection criteria, it will
only affect those records for which a `Inspected' field exists.
This is because the @command{-s} flag only sets values of existing fields.
It will not create any fields.
If instead the @command{-S} flag is used, this will create the field
(if it does not already exist) @emph{and} set its value.
@example
$ recset -t Item -f Inspected -S '30 October 2006' stock.rec
@end example
@node Deleting Fields
@section Deleting Fields
@cindex deleting fields
You can delete fields using @command{recset}'s @command{-d} flag.
For example, if we wanted to delete the @code{Inspected} field which we introduced above,
we could do so as follows:
@example
$ recset -t Item -f Inspected -d stock.rec
@end example
@noindent
This would delete @emph{all} fields named @code{Inspected} from @emph{all} records of type
@code{Item}.
It may be that, we only wanted to delete the @code{Inspected} fields from records which satisfy
a certain condition.
The following would delete the fields only from items whose @code{Expiry} date was before
2 January 2010:
@example
$ recset -t Item -e 'Expiry << "2 January 2010"' -f Inspected -d stock.rec
@end example
@node Renaming Fields
@section Renaming Fields
@cindex renaming fields
Another use of @command{recset} is to rename existing fields. This is achieved using the
@command{-r} flag.
To rename all instances of the @code{Expiry} field occurring in any
record of type @code{Item} to @code{UseBy},
the following command suffices:
@example
$ recset -t Item -f Expiry -r 'UseBy' stock.rec
@end example
@noindent
As with most operations, this could be done selectively, using the @command{-e} flag and a
selection expression.
@node Field Types
@chapter Field Types
Field values are, by default, unrestricted text strings. However, it
is often useful to impose some restrictions on the values of certain
fields. For example, consider the following record:
@example
Id: 111
Name: Jose E. Marchesi
Age: 30
MaritalStatus: single
Phone: +49 666 666 66
@end example
The values of the fields must clearly follow some structure in order
to make sense. @code{Id} is a numeric identifier for a
person. @code{Name} will never use several lines. @code{Age} will
typically be in the range @code{0..120}, and there are only a few
valid values for @code{MaritalStatus}: single, married, divorced, and
widow(er).
Phones may be restricted to some standard format as well to be valid.
All these restrictions (and many others) can be enforced by using
@dfn{field types}.
There are two kind of field types: @dfn{anonymous} and @dfn{named}. Those are
described in the following subsections.
@menu
* Declaring Types:: Declaration of types in record descriptors.
* Types and Fields:: Associating fields with types.
* Scalar Field Types:: Numbers and ranges.
* String Field Types:: Lines, limited strings and regular expressions.
* Enumerated Field Types:: Enumerations and boolean values.
* Date and Time Types:: Dates and times.
* Other Field Types:: Emails, fields, UUIDs, @dots{}
@end menu
@node Declaring Types
@section Declaring Types
A type can be declared in a record descriptor by using the
@code{%typedef} special field. The syntax is:
@example
%typedef: @var{type_name} @var{type_description}
@end example
@noindent
Where @var{type_name} is the name of the new type, and
@var{type_description} a description which varies depending of the
kind of type.
@cindex @code{range}, type description
For example, this is how a type @code{Age_t} could
be defined as numbers in the range @code{0..120}:
@example
%typedef: Age_t range 0 120
@end example
@noindent
Type names are identifiers having the following syntax:
@example
[a-zA-Z][a-zA-Z0-9_]*
@end example
@noindent
Even though any identifier with that syntax could be used for types,
it is a good idea to consistently follow some convention to help
distinguishing type names from field names. For example, the
@code{_t} suffix could be used for types.
A type can be declared to be an alias for another type. The syntax
is:
@example
%typedef: @var{type_name} @var{other_type_name}
@end example
@noindent
Where @var{type_name} is declared to be a synonym of
@var{other_type_name}. This is useful to avoid duplicated type
descriptions. For example, consider the following example:
@example
%typedef: Id_t int
%typedef: Item_t Id_t
%typedef: Transaction_t Id_t
@end example
@noindent
Both @code{Item_t} and @code{Transaction_t} are aliases for the type
@code{Id_t}. Which is in turn an alias for the type @code{int}.
So, they are both numeric identifiers.
The order of the @code{%typedef} fields is not relevant. In
particular, a type definition can forward-reference another type that is defined
subsequently. The previous example could have been written as:
@example
%typedef: Item_t Id_t
%typedef: Transaction_t Id_t
%typedef: Id_t int
@end example
@noindent
@cindex integrity problems
Integrity check will complain if undefined types are referenced. As well as when any aliases up referencing back (looping back
directly or indirectly) in type declarations. For
example, the following set of declarations contains a loop.
Thus, it's invalid:
@example
%typedef: A_t B_t
%typedef: B_t C_t
%typedef: C_t A_t
@end example
@noindent
The scope of a type is the record descriptor where it is defined.
@node Types and Fields
@section Types and Fields
@cindex @code{%type}
@cindex @code{%typedef}
@cindex types
@cindex field types,
Fields can be declared to have a given type by using the @code{%type}
special field in a record descriptor. The synopsis is:
@example
%type: @var{field_list} @var{type_name_or_description}
@end example
@noindent
Where @var{field_list} is a list of field names separated by commas.
@var{type_name_or_description} can be either a type name which has
been previously declared using @code{%typedef}, or a type description.
Type names are useful when several fields are declared to be of the
same type:
@example
%typedef: Id_t int
%type: Id Id_t
%type: Product Id_t
@end example
@cindex anonymous types
@noindent
Anonymous types can be specified by writing a type description instead
of a type name. They help to avoid superfluous type declarations in
the common case where a type is used by just one field. A record
containing a single @code{Id} field, for example, can be defined
without having to use a @code{%typedef} in the following way:
@example
%rec: Task
%type: Id int
@end example
@node Scalar Field Types
@section Scalar Field Types
The rec format supports the declaration of fields of the following
scalar types: integer numbers, ranges and real numbers.
@cindex integers
Signed @dfn{integers} are supported by using the @code{int}
declaration:
@example
%typedef: Id_t int
@end example
@cindex hexadecimal
@cindex octal
@noindent
Given the declaration above, fields of type @code{Id_t} must
contain integers, and they may be negative. Hexadecimal values can be written
using the @code{0x} prefix, and octal values using an extra
@code{0}. Valid examples are:
@example
%type: Id Id_t
Id: 100
Id: -23
Id: -0xFF
Id: 020
@end example
@cindex ranges
@noindent
Sometimes it is desirable to reduce the @dfn{range} of integers allowed in a
field. This can be achieved by using a range type declaration:
@example
%typedef: Interrupt_t range 0 15
@end example
@noindent
Note that it is possible to omit the minimum index in ranges. In that
case it is implicitly zero:
@example
%typedef: Interrupt_t range 15
@end example
@noindent
It is possible to use the keywords @code{MIN} and @code{MAX} instead
of a numeral literal in one or both of the points conforming the
range. They mean the minimum and the maximum integer value supported
by the implementation respectively. See the following examples:
@example
%typedef: Negative range MIN -1
%typedef: Positive range 0 MAX
%typedef: AnyInt range MIN MAX
%typedef: Impossible range MAX MIN
@end example
@noindent
Hexadecimal and octal numbers can be used to specify the limits in a
range. This helps to define scalar types whose natural base is not
ten, like for example:
@example
%typedef: Address_t range 0x0000 0xFFFF
%typedef: Perms_t range 755
@end example
@cindex reals
@cindex fractions
@cindex floating point numbers
@noindent
@dfn{Real} number fields can be declared with the @code{real} type
specifier.
A wide range of real numbers can be represented this way, only limited
by the underlying floating point representation.
@cindex decimal separator
@cindex locale
The decimal separator is always the dot (@code{.}) character regardless
of the locale setting.
For example:
@example
%typedef: Longitude_t real
@end example
@noindent
Examples of fields of type real:
@example
%rec: Rectangle
%typedef: Longitude_t real
%type: Width Longitude_t
%type: Height Longitude_t
Width: 25.01
Height: 10
@end example
@node String Field Types
@section String Field Types
@cindex strings
The @code{line} field type specifier can be used to restrict the value
of a field to a single line, @ie{} no newline characters are allowed.
For example, a type for proper names could be declared as:
@example
%typedef: Name_t line
@end example
@noindent
Examples of fields of type line:
@cindex multiline field values
@example
Name: Mr. Foo Bar
Name: Mrs. Bar Baz
Name: This is
+ invalid
@end example
@cindex field size
@cindex size, field size
@cindex @code{size}, type description
@noindent
Sometimes it is the maximum size of the field value that shall be
restricted. The @code{size} field type specifier can be used to
define the maximum number of characters a field value can have. For
example, if we were collecting input that will get written in a
paper-based forms system allowing up to 25 characters width entries,
we could declare the entries as:
@example
%typedef: Address_t size 25
@end example
@noindent
Note that hexadecimal and octal integer constants can also be used to
specify field sizes:
@example
%typedef: Address_t size 0x18
@end example
@cindex restricting values of fields
@noindent
Arbitrary restrictions can be defined by using regular expressions.
@cindex @code{regexp}, type description
The @dfn{regexp} field type specifier introduces an ERE (extended
regular expression) that will be matched against fields having that
name. The synopsis is:
@example
%typedef: @var{type_name} regexp /@var{re}/
@end example
@noindent
where @var{re} is the regular expression to match.
For example, consider the @code{Id_t} type designed to represent
the encoding of the identifier of ID cards in some country:
@example
%typedef: Id_t regexp /[0-9]@{9@}[a-zA-Z]/
@end example
@noindent
Examples of fields of type @code{Id_t} are:
@example
IDCard: 123456789Z
IDCard: invalid id card
@end example
@noindent
Note that the slashes delimiting the @var{re} can be replaced with
any other character that is not itself used as part of the regexp.
That is useful in some cases such as:
@example
%typedef: Path_t regexp |(/[^/]/?)+|
@end example
@noindent
The regexp flavor supported in recfiles are the POSIX EREs plus
several GNU extensions. @xref{Regular Expressions}.
@node Enumerated Field Types
@section Enumerated Field Types
@cindex enumerated types
Fields of this type contain symbols taken from an enumeration.
The type is described by writing the sequence of symbols comprising
the enumeration. Enumeration symbols are strings described by the
following regexp:
@example
[a-zA-Z0-9][a-zA-Z0-9_-]*
@end example
@noindent
The symbols are separated by blank characters (including newlines).
For example:
@cindex day of week
@example
%typedef: Status_t enum NEW STARTED DONE CLOSED
%typedef: Day_t enum Monday Tuesday Wednesday Thursday Friday
+ Saturday Sunday
@end example
@noindent
@cindex comments, in enumerated types
It is possible to insert comments when describing an enum type. The
comments are delimited by parenthesis pairs. The contents of the
comments can be any character but parentheses. For example:
@example
%typedef: TaskStatus_t enum
+ NEW (The task was just created)
+ IN_PROGRESS (Task started)
+ CLOSED (Task closed)
@end example
@noindent
@cindex boolean types
@dfn{Boolean} fields, declared with the type specifier @code{bool},
can be seen as special enumerations holding the
binary values true and false.
@example
%typedef: Yesno_t bool
@end example
@noindent
The literals allowed in boolean fields are @code{yes/no}, @code{0/1}
and @code{true/false}. Examples are:
@example
SwitchedOn: 1
SwitchedOn: yes
SwitchedOn: false
@end example
@node Date and Time Types
@section Date and Time Types
@cindex date, fields containing dates
@cindex time, fields containing time values
The @dfn{date} field type specifier can be used to declare dates and
times. The synopsis is:
@example
%typedef: @var{type_name} date
@end example
@cindex locale
@cindex time zone correction
@noindent
There are many permitted date formats, described in detail later in this manual (@pxref{Date input formats}).
Of particular note are the following:
@itemize @minus
@item Dates and times read from recfiles are not affected by the
locale or the timezone. This means that the @env{LC_TIME} and the
@env{TZ} environment variables are ignored.
If you wish, for example, to specify a time which must be interpreted as UTC, you
must explicitly append the time zone correction: @eg{} @samp{2001-1-10 12:09Z}.
@item The field value `1/10/2001' means January 10, 2001, @strong{not} October 1, 2001.
@item Relative times and dates (such as `1 day ago') are permitted but are not
particularly useful.
@end itemize
@node Other Field Types
@section Other Field Types
@cindex email
The @dfn{Email} field type specifier is used to declare electronic
addresses. The synopsis is:
@example
%typedef: Email_t email
@end example
@noindent
Sometimes it is useful to make fields to store field names. For that
purpose the @dfn{Field} field type specifier is supported. The
synopsis is:
@example
%typedef: Field_t field
@end example
@noindent
@cindex UUID
Universally Unique Identifiers (also known as UUIDs) are a way to
assign a globally unique label to some object. The @dfn{uuid} field
type specifier serves that purpose. The synopsis is:
@example
%typedef: Id_t uuid
@end example
@noindent
The format of the uuids is specified as 32 hexadecimal digits,
displayed in five groups separated by hyphens. For example:
@example
550e8400-e29b-41d4-a716-446655440000
@end example
@noindent
@cindex foreign key
There is one other possible field type, @viz{} a foreign key.
The following example
defines the type @code{Maintainer_t} to be of type ``record @code{Hacker}'';
in other words, a foreign key referring to a record in the @code{Hacker} record set.
@example
%typedef: Maintainer_t rec Hacker
@end example
@noindent This essentially means that the values
to be stored in fields of type @code{Maintainer_t} are of whatever
type is defined for the primary key of the @code{Hacker} record set.
Why this is useful is discussed later. @xref{Queries which Join Records}.
@node Constraints on Record Sets
@chapter Constraints on Record Sets
The records in a recfile are by default not restricted to any particular
structure
except that they must contain one or more fields and optional comments.
This provides the format with huge expressive power;
but in many cases, it is also desirable to impose some restrictions in
order to reflect some of the properties of the data stored in the
database. It is also useful in order to preserve data integrity and
thus avoid data corruption.
The following sections describe the usage of some predefined special
fields whose purpose is to impose this kind of restriction in the
structure of the records.
@menu
* Mandatory Fields:: Requiring the presence of fields.
* Prohibited Fields:: Forbidding the presence of fields.
* Allowed Fields:: Restricting the presence of fields.
* Keys and Unique Fields:: Fields characterizing records.
* Size Constraints:: Constraints on the number of records in a set.
* Arbitrary Constraints:: Constraints records must comply with.
@end menu
@node Mandatory Fields
@section Mandatory Fields
@cindex @code{%mandatory}
@cindex mandatory fields
@cindex requiring certain fields in records
@cindex compulsory fields
Sometimes, you want to make sure that @emph{every} record of a particular type
contains certain fields.
To do this, use the special field @code{%mandatory}.
The usage is:
@example
%mandatory: @var{field1} @var{field2} @dots{} @var{fieldN}
@end example
@noindent
The field names are separated by one or more
blank characters.
@cindex field, compulsory fields
@cindex field, mandatory fields
The fields listed in a @code{%mandatory} entry are
non-optional; @ie{} at least one field with this name shall be present
in any record of this kind.
@cindex integrity problems
Records violating this restriction are
invalid and a checking tool will report the situation as
a data integrity failure.
Consider for example an ``address book'' database where each record
stores the information associated with a contact. The records will be
heterogeneous, in the sense they won't all contain exactly the same
fields: the contact of an Internet shop will probably have a
@code{URL} field, while the entry for our grandmother probably won't.
We still want to make sure that every entry has a field with the name
of the contact. In this case, we could use @code{%mandatory} as
follows:
@example
%rec: Contact
%mandatory: Name
Name: Granny
Phone: +12 23456677
Name: Yoyodyne Corp.
Email: sales@@yoyod.com
Phone: +98 43434433
@end example
A word of caution, however: In many situations, especially in day to day social
interaction, it is common to find that certain information is simply unavailable.
For example, although every person has a date of birth, some people will refuse
to provide that information.
It is probably wise therefore to avoid stipulating a field as mandatory, unless it is
essential to the enterprise.
Otherwise,
a data entry clerk faced with this situation will have to make the choice between
dropping the entry entirely or entering some fake data to keep the system happy.
@node Prohibited Fields
@section Prohibited Fields
@cindex @code{%prohibit}
@cindex restricting fields from records
@cindex field, forbidden fields
@cindex prohibited fields
The inverse of @code{%mandatory} is @code{%prohibit}.
Prohibited fields may not occur in @emph{any} record of the given type.
The usage is:
@example
%prohibit: @var{field1} @var{field2} @dots{} @var{fieldN}
@end example
@noindent The field names are separated by one or more blank characters.
@noindent
Fields listed in a @code{%prohibit} entry are
forbidden; @ie{} no field with this name should be present
in any record of this kind.
Again, records violating this restriction
are invalid.
@noindent
Several @code{%prohibit} fields can appear in
the same record descriptor.
The set of prohibited fields
is the union of all the entries.
For example, in the following
database both @code{Id} and @code{id} are prohibited:
@example
%rec: Entry
%prohibit: Id
%prohibit: id
@end example
One possible use case for prohibited fields arises
when some field name is reserved for some future
use.
For example, if we were organizing a sports competition, we would want
competitors to register before the event.
However a competitor's @code{result} should not and cannot be entered
before the competition takes place.
Initially then, we would change the record
descriptor as follows:
@example
%rec: Contact
%mandatory: Name
%prohibit: result
@end example
@noindent
At the start of the event, the @code{%prohibit} line can be deleted, to
allow results to be entered.
@node Allowed Fields
@section Allowed Fields
@cindex @code{%allowed}
@cindex restricting fields from records
@cindex field, allowed fields
@cindex allowed fields
In some cases we know the set of fields that may appear in the records
of a given type, even if they are not mandatory. The @code{%allowed}
special field is used to specify this restriction. The usage is:
@example
%allowed: @var{field1} @var{field2} @dots{} @var{fieldN}
@end example
@noindent The field names are separated by one or more blank
chracters.
@noindent
If there are more or one @code{%allowed} fields in a record
descriptor, all fields of all the records in the record set must be in
the union of @code{%allowed}, @code{%mandatory} and @code{%key}.
Otherwise an integrity error is raised.
@noindent
Several @code{%allowed} fields can appear in the same record
descriptor. The set of allowed fields is the union of all the
entries.
@node Keys and Unique Fields
@section Keys and Unique Fields
@cindex @code{%unique}
@cindex @code{%key}
The @code{%unique} and @code{%key} special fields are
used to avoid several instances of the
same field in a record, and to implement keys in record sets.
Their usage is:
@example
%unique: @var{field1} @var{field2} @dots{} @var{fieldN}
%key: @var{field}
@end example
@noindent
The field names are separated by one or more blank characters.
@cindex unique fields
Normally it is permitted for a record to contain two or more fields of
the same name.
The @code{%unique} special field revokes this permissiveness.
A field declared ``unique'' cannot appear more than once in a single record.
For example, an entry in an address book database could contain an
@code{Age} field. It does not make sense for a single person to be of
several ages. So, a field could be declared as ``unique'' in the
corresponding record descriptor as follows:
@example
%rec: Contact
%mandatory: Name
%unique: Age
@end example
@noindent
Several @code{%unique} fields can appear in the same record
descriptor. The set of unique fields is the union of all the entries.
@cindex primary key
@code{%key} makes the referenced field the primary key of the record
set.
The primary key behaves as if both @code{%unique} and
@code{%mandatory} had been specified for that field.
Additionally, there is further restriction, @viz{}
a given value of a primary key field may appear no more than once within a
record set.
Consider for example a database of items in stock. Each item is
identified by a numerical @code{Id} field. No item may have more than
one @code{Id}, and no items may exist without an associated
@code{Id}. Additionally, no two items may share the same @code{Id}.
This common situation can be implementing by declaring @code{Id} as
the key in the record descriptor:
@example
%rec: Item
%key: Id
%mandatory: Title
Id: 1
Title: Box
Id: 2
Title: Sticker big
@end example
@noindent
It would not make sense to have several primary keys in a record set.
Thus, it is not allowed to have several @code{%key} fields in the
same record descriptor.
It is also forbidden for two items to share the same `Id' value.
@cindex integrity problems
Both of these situations would be data integrity
violations, and will be reported by a checking tool.
Elsewhere, we discuss how primary keys can be used to link one record set to
another using primary keys together with foreign keys. @xref{Queries which Join Records}.
@node Size Constraints
@section Size Constraints
@cindex @code{%size}
@cindex size, record size
@cindex record size
Sometimes it is desirable to place constraints on entire records.
This can be done with the @code{%size} special field which is used to limit the
number of records in a record set. Its usage is:
@example
%size: [@var{relational_operator}] @var{number}
@end example
@noindent
If no operator is specified then @var{number} is interpreted as the
exact number of records of this type. The number can be any integer
literal, including hexadecimal and octal constants. For example:
@example
%rec: Day
%size: 7
%type: Name enum
+ Monday Tuesday Wednesday Thursday Friday
+ Saturday Sunday
%doc: There should be exactly 7 days.
@end example
@cindex operators
The optional @var{relational_operator} shall be one of @code{<},
@code{<=}, @code{>} and @code{>=}@. For example:
@example
%rec: Item
%key: Id
%size: <= 100
%doc: We have at most 100 different articles.
@end example
It is valid to specify a size of @code{0}, meaning that no records of
this type shall exist in the file.
Only one @code{%size} field shall appear in a record descriptor.
@node Arbitrary Constraints
@section Arbitrary Constraints
@cindex @code{%constraint}
@cindex restricting values of fields
Occasionally, @code{%mandatory}, @code{%prohibit} and @code{%size} are just not flexible enough.
We might, for instance, want to ensure that @emph{if} a field is present,
then it must have a certain relationship to other fields.
Or we might want to stipulate that under certain conditions only, a record contains
a particular field.
To this end, recutils provides a way for arbitrary field constraints to be defined.
These permit restrictions on the presence and/or value of fields, based upon the value or
presence of other fields within that record.
This is done using the @code{%constraint} special field.
Its usage is:
@example
%constraint: @var{expr}
@end example
@noindent
where @var{expr} is a selection expression (@pxref{Selection Expressions}).
When a constraint is
present in a record set it means that all the records of that type
must satisfy the selection expression, @ie{} the evaluation of the
expression with the record returns 1. Otherwise an integrity error is
raised.
@cindex integrity problems
Consider for example a record type @code{Task} featuring two fields of
type date called @code{Start} and @code{End}. We can use a constraint
in the record set to specify that the task cannot start after it
finishes:
@example
%rec: Task
%type: Start,End date
%constraint: Start << End
@end example
@cindex implies, logical implication
@cindex constraints
The ``implies'' operator @code{=>} is especially useful when defining
constraints, since it can be used to specify conditional constraints,
@ie{} constraints applying only in certain records. For example, we
could specify that if a task is closed then it must have an @code{End}
date in the following way:
@example
%rec: Task
%type: Start,End date
%constraint: Start << End
%constraint: Status = 'CLOSED' => #End
@end example
It is acceptable to declare several constraints in the same record
set.
@node Checking Recfiles
@chapter Checking Recfiles
@cindex integrity, checking
Sometimes, when creating a recfile by hand, typographical errors or other
mistakes will occur.
If a recfile contains such mistakes, then one cannot rely upon the results
of queries or other operations.
Fortunately
there is a tool called @command{recfix} which can find these errors.
It is a good idea to get into the habit of running @command{recfix} on
a file after editing it, and before trying other commands.
@menu
* Syntactical Errors:: Fixing structure errors in recfiles.
* Semantic Errors:: Fixing semantic errors in recfiles.
@end menu
@node Syntactical Errors
@section Syntactical Errors
One easy mistake is to forget the colon separating the field name from
its value.
@example
%rec: Article
%key Id
Name: Thing
Id: 0
@end example
@cindex @command{recfix}
@noindent
Running @command{recfix} on this file will immediately tell us that
there is a problem:
@example
$ recfix --check inventory.rec
inventory.rec: 2: error: expected a record
@end example
@noindent
Here, @command{recfix} has diagnosed a problem in the file @file{inventory.rec}
and the problem lies at line 2.
If, as in this case, @command{recfix} shows there is a problem with
the recfile, you should attend to that problem before trying to use
any other recutils program on that file, otherwise strange things
could happen.
The @code{--check} flag is optional but in normal execution not required because that is the
default operation.
@node Semantic Errors
@section Semantic Errors
@cindex special fields
However @command{recfix} checks more than the syntactical integrity of the recfile.
It also checks certain semantics and that the data is self-consistent.
To do this, it uses the special fields of the record, some of which were introduced
above (@pxref{Constraints on Record Sets}).
It is a good idea to use the special fields to stipulate the ``enterprise rules''
of the data.
Errors will be reported if any of the following special keywords are present and
the data does not match the stipulated conditions
@table @code
@item %mandatory
The mandated fields are missing from a record.
@item %prohibit
The prohibited fields are present in a record.
@item %unique
There is more than one field in a single record of the given name.
@item %key
Two or more records share the same value of the field which is the key field.
@item %typedef and %type
A field has a value which does not conform to the specified type.
@item %size
The number of records does not conform to the specified restriction.
@item %constraint
A field does not conform to the specified constraint.
@item %confidential
An unencrypted value exists for a confidential field.
@end table
@node Remote Descriptors
@chapter Remote Descriptors
@cindex @code{%rec}
The @code{%rec} special field is used for two main purposes: to
identify a record as a record descriptor, and to provide a name for
the described record set. The synopsis of the usage of the field is
the following:
@example
%rec: @var{type} [@var{url_or_file}]
@end example
@noindent
@var{type} is the name of the kind of records described by the
descriptor. It is mandatory to specify it, and it follows the same
lexical conventions used by field names. @xref{Fields}.
There is a non-enforced convention to use singular nouns, because the
name makes reference to the type of a single entity, even if it
applies to all the records contained in the record set. For example,
the following record set contains transactions, and the type specified
in the record descriptor is @code{Transaction}.
@example
%rec: Transaction
Id: 10
Title: House rent
Id: 11
Title: Loan
@end example
@noindent
Only one @code{%rec} field should be in a record descriptor. If
there are more it is an integrity violation. It is highly
recommended (but not enforced) to place this field in the first
position of the record descriptor.
Sometimes it is convenient to store records of the same type in
different files.
@cindex integrity problems
The duplication of record descriptors in this case would surely lead to
consistency problems.
A possible solution would
be to keep the record descriptor in a separated file and then include
it in any operation by using pipes. For example:
@example
$ cat descriptor.rec data.rec | recsel @dots{}
@end example
@cindex external descriptor
@cindex descriptor, external descriptor
@noindent
For those cases it is more convenient to use a @dfn{external
descriptor}. External descriptors can be built appending a file path
to the @code{%rec} field value, like:
@example
%rec: FSD_Entry /path/to/file.rec
@end example
The previous example indicates that a record descriptor describing the
@code{FSD_Entry} records shall be read from the file
@file{/path/to/file.rec}. A record descriptor for @code{FSD_Entry}
may not exist in the external file. Both relative and absolute paths
can be specified there.
@cindex URL
@cindex remote descriptors
URLs can be used as sources for external descriptors as well. In that
case we talk about @dfn{remote descriptors}. For example:
@example
%rec: Department http://www.myorg.com/Org.rec
@end example
@noindent
The URL shall point to a text file containing rec data. If there is a
record descriptor in the remote file documenting the @code{Department}
type, it will be used.
Note that the local record descriptor can provide additional fields to
``expand'' the record type. For example:
@example
%rec: FSD_Entry http://www.jemarch.net/downloads/FSD.rec
%mandatory: Rating
@end example
@noindent
The record descriptor above is including the contents of the
@code{FSD_Entry} record descriptor from the URL, and adding them to
the local record descriptor, that in this case contains just the
@code{%mandatory} field.
If you are using GNU recutils (@pxref{Invoking the Utilities}) to
process your recfiles, any URL
schema supported by @code{libcurl} will work.
@node Grouping and Aggregates
@chapter Grouping and Aggregates
Grouping and aggregate functions are two related features which
are useful to extract statistics from a record set, or a
subset of that record set.
@menu
* Grouping Records:: Combining records by fields.
* Aggregate Functions:: Statistics and more.
@end menu
@node Grouping Records
@section Grouping Records
@cindex grouping
Consider a recfile containing a list of items in a shop
inventory. For each item it is stored its type, its category, its
price, the date of the last selling operation of an item of that type,
and the amount of items currently available in stock. A sample of
such a database could be:
@example
Type: EC Car
Category: Toy
Price: 12.2
LastSell: 20-April-2012
Available: 623
Type: Terria
Category: Food
Price: 0.60
LastSell: 22-April-2012
Available: 8239
Type: Typex
Category: Office
Price: 1.20
LastSell: 22-April-2012
Available: 10878
Type: Notebook
Category: Office
Price: 1.00
LastSell: 21-April-2012
Available: 77455
Type: Sexy Puzzle
Category: Toy
Price: 6.20
LastSell: 6.20
Available: 12
@end example
@noindent
Now imagine we are interested in grouping the contents of the
@code{Items} record set in groups of items of the same category. We
can do it using the @command{-G} command line argument for
@command{recsel}. This argument accepts a list of fields separated by
commas. The argument can be read as ``group by''.
In this case we want to group by @code{Category}, so we would do:
@example
$ recsel -G Category
Type: Terria
Category: Food
Price: 0.60
LastSell: 22-April-2012
Available: 8239
Type: Typex
Category: Office
Price: 1.20
LastSell: 22-April-2012
Available: 10878
Type: Notebook
Price: 1.00
LastSell: 21-April-2012
Available: 77455
Type: EC Car
Category: Toy
Price: 12.2
LastSell: 20-April-2012
Available: 623
Type: Sexy Puzzle
Price: 6.20
LastSell: 6.20
Available: 12
@end example
@noindent
We can see that the output is three records, corresponding to the three
different categories of items present in the database.
However, we are only interested in the types of products in each category,
so we can remove unwanted information using @code{-p}:
@example
$ recsel -G Category -p Category,Type items.rec
Category: Food
Type: Terria
Category: Office
Type: Typex
Type: Notebook
Category: Toy
Type: EC Car
Type: Sexy Puzzle
@end example
@noindent
It is also possible to group by several fields. We could group by
both @code{Category} and @code{LastSell}:
@example
$ recsel -G Category,LastSell -p Category,LastSell,Type items.rec
Category: Food
LastSell: 22-April-2012
Type: Terria
Category: Office
LastSell: 21-April-2012
Type: Notebook
Category: Office
LastSell: 22-April-2012
Type: Typex
Category: Toy
LastSell: 20-April-2012
Type: EC Car
Category: Toy
LastSell: 6.20
Type: Sexy Puzzle
@end example
@node Aggregate Functions
@section Aggregate Functions
@cindex aggregate function
recutils supports @dfn{aggregate functions}. These are so called
because they accept a record set and a field name as inputs and
generate a single result. Usually this result is numerical.
The supported aggregate functions are the following:
@table @code
@item Count(FIELD)
Counts the number of occurrences of a field.
@item Avg(FIELD)
Calculates the average (mean) of the numerical values of a field.
@item Sum(FIELD)
Calculates the sum of the numerical values of a field.
@item Min(FIELD)
Calculates the minimum numerical value of a field.
@item Max(FIELD)
Calculates the maximum numerical value of a field.
@end table
The aggregate functions are to be invoked in the field expressions in
@command{recsel}. By default they are applied to the totality of the
records in a record set. For example, using the items database from
the previous section, we can do calculations as in the following examples.
The SQL aggregate functions can be applied to the totality of the
tuples in the relation. For example, using the @code{Count} aggregate
function we can calculate the number of fields named @code{Category}
present in the record set as follows:
@example
$ recsel -p "Count(Category)" items.rec
Count_Category: 5
@end example
@noindent
The result is a field whose name is derived from the function name and
the field passed as its parameter, separated by an underline. This
name scheme probably suffices for most purposes, but it is always
possible to use a rewrite rule to obtain something different:
@example
$ recsel -p "Count(Category):NumCategories" items.rec
NumCategories: 5
@end example
@noindent
You can use different letter case in writing the name of the aggregate, and
this will be reflected in the field name:
@example
$ recsel -p "CoUnT(Category)" items.rec
CoUnT_Category: 5
@end example
@noindent
It is possible to use more than one aggregate function in the field
expression. Suppose we are also interested in the average price of
the items we sell. We can use the @code{Avg} aggregate:
@example
$ recsel -p "Count(Category),Avg(Price)" items.rec
Count_Category: 5
Avg_Price: 4.240000
@end example
@noindent
Now let's add a field along with an aggregate function to the field
expression and see what we get:
@example
$ recsel -p "Type,Avg(Price)" items.rec
Type: EC Car
Avg_Price: 12.200000
Type: Terria
Avg_Price: 0.600000
Type: Typex
Avg_Price: 1.200000
Type: Notebook
Avg_Price: 1
Type: Sexy Puzzle
Avg_Price: 6.200000
@end example
@noindent
We get five records! The reason is that when @emph{only} aggregate
functions are part of the field expression, they are applied to the single
record that would result from concatenating all the records in the record
set together. However, when a regular field appears in the field
expression the aggregate functions are applied to the individual
records. This is still useful in some cases, such as a database of
maintainers:
@example
Name: Jose E. Marchesi
Email: jemarch@@gnu.org
Email: jemarch@@es.gnu.org
Name: Luca Saiu
Email: positron@@gnu.org
@end example
@noindent
Lets see how many emails each maintainer has:
@example
$ recsel -p "Name,Count(Email)" maintainers.rec
Name: Jose E. Marchesi
Count_Email: 2
Name: Luca Saiu
Count_Email: 1
@end example
@noindent
Aggregate functions are most useful when we combine them with
grouping. This is when we are interested in some property of a subset
of the records in the database. For example, the average prices of
each item category stored in the database can be obtained by
executing:
@example
$ recsel -p "Category,Avg(Price)" -G Category items.rec
Category: Food
Avg_Price: 0.600000
Category: Office
Avg_Price: 1.100000
Category: Toy
Avg_Price: 9.200000
@end example
@noindent
If we were interested in the actual prices that result in each average
we can do:
@example
$ recsel -p "Category,Price,Avg(Price)" -G Category items.rec
Category: Food
Price: 0.60
Avg_Price: 0.600000
Category: Office
Price: 1.20
Price: 1.00
Avg_Price: 1.100000
Category: Toy
Price: 12.2
Price: 6.20
Avg_Price: 9.200000
@end example
@node Queries which Join Records
@chapter Queries which Join Records
Suppose you wanted to add the residential address of the people in
the @file{acquaintances.rec} file from
@ref{Simple Selections}.
One way to do this is as follows:
@example
%type: Dob date
Name: Alfred Nebel
Dob: 20 April 2010
Email: alf@@example.com
Address: 42 Abbeter Way, Inprooving, WORCS
Telephone: 01234 5676789
Name: Mandy Nebel
Dob: 21 February 1972
Email: mandy@@example.com
Address: 42 Abbeter Way, Inprooving, WORCS
Telephone: 01234 5676789
Name: Bertram Nebel
Dob: 3 January 1966
Email: bert@@example.com
Address: 42 Abbeter Way, Inprooving, WORCS
Telephone: 01234 5676789
Name: Charles Spencer
Dob: 4 July 1997
Email: charlie@@example.com
Address: 2 Serpe Rise, Little Worning, SURREY
Telephone: 09876 5432109
Name: Dirk Spencer
Dob: 29 June 1945
Email: dirk@@example.com
Address: 2 Serpe Rise, Little Worning, SURREY
Telephone: 09876 5432109
Name: Ernest Wright
Dob: 26 April 1978
Email: ernie@@example.com
Address: 1 Wanter Rise, Greater Inncombe, BUCKS
@end example
This will work fine.
However you will notice that there are two addresses where more than one person
live (presumably they are members of the same family).
This has a number of disadvantages:
@itemize @minus
@item You have to type (or copy) the same information several times.
@item Should a family move house, then you would have to update the addresses (and telephone number) of all the family members.
@item A typing error in one of the addresses would lead an automatic query to erroneously suggest that the people lived at different addresses.
@item It unnecessarily increases the size of the recfile.
@end itemize
@menu
* Foreign Keys:: Referring to records from another records.
* Joining Records:: Performing cross-joins.
@end menu
@node Foreign Keys
@section Foreign Keys
@cindex record sets
A better way would be to separate the addresses and people into different record sets.
@cindex duplication, avoiding
The first record set might look like this:
@example
%rec: Person
%type: Dob date
%type: Abode rec Residence
Name: Alfred Nebel
Dob: 20 April 2010
Email: alf@@example.com
Abode: 42AbbeterWay
Name: Mandy Nebel
Dob: 21 February 1972
Email: mandy@@example.com
Mobile: 0555 342123
Abode: 42AbbeterWay
Name: Bertram Nebel
Dob: 3 January 1966
Email: bert@@example.com
Abode: 42AbbeterWay
Name: Charles Spencer
Dob: 4 July 1997
Email: charlie@@example.com
Abode: 2SerpeRise
Name: Dirk Spencer
Dob: 29 June 1945
Email: dirk@@example.com
Mobile: 0555 342123
Abode: 2SerpeRise
Name: Ernest Wright
Dob: 26 April 1978
Abode: ChezGrampa
@end example
@noindent and the second (following in the same file), like this:
@example
%rec: Residence
%key: Id
Address: 42 Abbeter Way, Inprooving, WORCS
Telephone: 01234 5676789
Id: 42AbbeterWay
Address: 2 Serpe Rise, Little Worning, SURREY
Telephone: 09876 5432109
Id: 2SerpeRise
Address: 1 Wanter Rise, Greater Inncombe, BUCKS
Id: ChezGrampa
@end example
Here you can see that there are two record sets @viz{} @code{Person}
and @code{Residence}.
There are six people, but only three residences, because some residences
accommodate more than one person.
@cindex @code{%key}
Note also that the @code{Residence} descriptor has the entry @code{%key: Id}
whilst the @code{Person} descriptor has @code{%type: Abode rec Residence}.
@cindex foreign key
@cindex key, foreign key
@cindex @code{rec}, type description
This is because @code{Abode} is the foreign key which identifies the residence
where a person lives.
@cindex readability
We could have declared the @code{Id} field as @code{%auto}. This would have had
the advantage that we need not manually update it.
However, we decided that the @code{Abode} field values in the @code{Person} records
are better as alphanumeric fields, so that they can contain
human readable values. In this way, it is self-evident by reading a @code{Person}
record where that person lives.
Yet since the @code{Id} field is declared using the @code{%key} special field
name, you can be sure that you don't accidentally reuse an existing key.
@node Joining Records
@section Joining Records
The above example has also added a new field to the @code{Person} record set
to contain that person's mobile phone number. Note that the @code{Telephone}
field belongs to the @code{Residence} record set because that contains the telephone
number of the home,
whereas @code{Mobile} belongs to @code{Person} since mobile telephones are normally
used exclusively by one individual.
If we want to look up the name and address of a person in our recfile, we can
use @command{recsel} as before.
Because we now have more than one record set in the @file{acquaintances.rec}
file, we have to tell @command{recsel} in which record set we want to
look up
records.
We do this with the @code{-t} flag as follows:
@example
$ recsel -t Person -P Name,Abode acquaintances.rec
Alfred Nebel
42AbbeterWay
Mandy Nebel
42AbbeterWay
Bertram Nebel
42AbbeterWay
Charles Spencer
2SerpeRise
Dirk Spencer
2SerpeRise
Ernest Wright
ChezGrampa
@end example
This result tells us the names of all the people in the recfile, as well as
giving a concise and hopefully effective reminder telling us where they live.
However these results would not be useful to someone unacquainted with the
individuals.
They need a list of names and full addresses.
We can use @command{recsel} to produce such a list:
@example
$ recsel -t Person -j Abode acquaintances.rec
Name: Charles Spencer
Dob: 4 July 1997
Email: charlie@@example.com
Abode_Address: 2 Serpe Rise, Little Worning, SURREY
Abode_Telephone: 09876 5432109
Abode_Id: 2SerpeRise
Name: Dirk Spencer
Dob: 29 June 1945
Email: dirk@@example.com
Mobile: 0555 342123
Abode_Address: 2 Serpe Rise, Little Worning, SURREY
Abode_Telephone: 09876 5432109
Abode_Id: 2SerpeRise
Name: Ernest Wright
Dob: 26 April 1978
Abode_Address: 1 Wanter Rise, Greater Inncombe, BUCKS
Abode_Id: ChezGrampa
@end example
The @code{-t} flag we have seen before. It tells @command{recsel} that we want
to extract records of type @code{Person}.
@cindex join
The @code{-j} flag is new. It says that we want to perform a @dfn{join}.
Specifically we want to join the @code{Person} records according to their
@code{Abode} field.
In the above example, @command{recsel} displays several field names which
do not appear anywhere in the input @eg{} @code{Abode_Address}.
This is the @code{Address} field in the record joined by the foreign key @code{Abode}.
In this example probably only the name and address are of interest.
The other information such as date of birth is incidental.
The foreign key @code{Abode_Id} is certainly not wanted in the output since it
is redundant.
As usual, you can use the @code{-P} or @code{-p} options to limit the fields
which will be displayed.
However the full joined field name, if appropriate, must be specified.
So the names and addresses without the other information can be retrieved thus:
@example
$ recsel -t Person -j Abode -p Name,Abode_Address acquaintances.rec
Name: Charles Spencer
Abode_Address: 2 Serpe Rise, Little Worning, SURREY
Name: Dirk Spencer
Abode_Address: 2 Serpe Rise, Little Worning, SURREY
Name: Ernest Wright
Abode_Address: 1 Wanter Rise, Greater Inncombe, BUCKS
@end example
@node Auto-Generated Fields
@chapter Auto-Generated Fields
@cindex @code{%auto}
@cindex automatically generated values
Consider for example a list of articles in stock in a toy store:
@example
%rec: Item
%key: Description
Description: 2cm metal soldier WWII
Amount: 2111
Description: Flying Helicopter Indoor Maxi
Amount: 8
@dots{}
@end example
It would be natural to identify the items by their descriptions, but it
is also error prone: was it ``Flying Helicopter Indoor Maxi'' or
``Flying Helicopter Maxi Indoor''? Was ``Helicopter'' in lower case or
upper case?
@cindex primary key
@cindex key, primary key
@cindex @code{%key}
@cindex ID numbers
Thus it is quite common in databases to use some kind of numeric ``Id'' to
uniquely identify items like those ones, because numbers are
easy to increment and manipulate. So we could add a new
numeric @code{Id} field and use it as the primary key:
@example
%rec: Item
%key: Id
%mandatory: Description
Id: 0
Description: 2cm metal soldier WWII
Amount: 2111
Id: 1
Description: Flying Helicopter Indoor Maxi
Amount: 8
@dots{}
@end example
A problem with this approach is that we must be careful to not assign
already used ids when we introduce more articles in the
database. Other than its uniqueness, it is not important which number
is associated with which article.
To ease the management of those Ids database systems use to provide a
facility called ``auto-counters''. Auto-counters can be implemented in
recfiles using the @code{%auto} directive in the record descriptor.
Its usage is:
@example
%auto: @var{field1} @var{field2} @dots{} @var{fieldN}
@end example
@noindent
The list of field names are separated by one or more blank characters.
There can be several @code{%auto} fields in the same record
descriptor, the effective list of auto-generated fields being the
union of all the entries.
When @command{recins} inserts a new record in the recfile, it looks
for any declared auto field. If any of these fields are not provided
explicitly in the command line then @command{recins} generates them
along with the user-provided fields. Such auto fields are generated
at the beginning of the new records, in the same order they are found
in the @code{%auto} directives.
For example, consider a @file{items.rec} database with an empty record
set:
@example
%rec: Item
%key: Id
%auto: Id
%mandatory: Description
@end example
@noindent
If we insert a new record and we do not specify an @code{Id} then it
will be generated automatically by @command{recins}:
@example
$ recins -t Item -f Description -v 'recutils t-shirts' \
-f Amount -v 200 \
items.rec
$ cat items.rec
%rec: Item
%key: Id
%auto: Id
%mandatory: Description
Id: 0
Description: recutils t-shirts
Amount: 200
@end example
@noindent
The concrete effect of the @code{%auto} directive depends on the type
of the affected field. The following sections document how.
@menu
* Counters:: Generating incremental Ids.
* Unique Identifiers:: Generating universally unique Ids.
* Time-Stamps:: Tracking the creation of records.
@end menu
@node Counters
@section Counters
@cindex counters
If an auto field is of type @code{integer} or @code{range} then any
newly generated field will use the ``next biggest'' unused number in the
record set.
Consider the toy inventory database introduced above. We could
declare the @code{Id} field to be generated automatically:
@example
%rec: Item
%key: Id
%type: Id int
%mandatory: Description
%auto: Id
Id: 0
Description: 2cm metal soldier WWII
Amount: 2111
@end example
@noindent
When the next new item is introduced in the database, @command{recins}
will note the @code{%auto}, and create a new @code{Id} field for the
new record with the next-biggest unused integer, since @code{Id} is
declared to be of type @code{int}. In this example, the new record
would have an Id of @code{1}. The database can still provide an
explicit Id for the new record. In that case the field is not
generated automatically.
Note that if no explicit type is defined for an auto generated field
then it is assumed to be an integer.
@node Unique Identifiers
@section Unique Identifiers
@cindex unique identifiers
@cindex uuid
Universally Unique Identifiers, often abbreviated as UUIDs, can also
be auto-generated using recutils. Suppose you maintain a database
with events featuring the following record descriptor:
@example
%rec: Event
%key: Id
%mandatory: Title Date
@end example
@noindent
What would be appropriate to identify each event? We could use an
integer and declare it as auto-generated. After adding two events the
database would look like this:
@example
%rec: Event
%key: Id
%mandatory: Title Date
Id: 0
Title: Team meeting
Date: 12-08-2013
Id: 1
Title: Dave's birthday
Date: 20-12-2013
@end example
@noindent
However, suppose that we want to share our events with other people,
@ie{} to send them event records and to incorporate their records into
our own database. In this case the @code{Id}s would collide. A good
solution is to use @code{uuids} and declare them as @code{auto}:
@example
%rec: Event
%key: Id
%type: Id uuid
%mandatory: Title Date
Id: f81d4fae-7dec-11d0-a765-00a0c91e6bf6
Title: Team meeting
Date: 12-08-2013
Id: f81d4fae-dc18-11d0-a765-a01328400a0c
Title: Dave's birthday
Date: 20-12-2013
@end example
@node Time-Stamps
@section Time-Stamps
@cindex timestamps
Auto generated dates can be used to implement automatic timestamps.
Consider for example a ``Transfer'' record set registering bank
transfers. We want to save a timestamp every time a transfer is done,
so we include an @code{%auto} for the date:
@example
%rec: Transfer
%key: Id
%type: Id int
%type: Date date
%auto: Id Date
@end example
@node Encryption
@chapter Encryption
@cindex encryption
For ethical or security reasons it is sometimes necessary that information
in a recfile should not be readable by unauthorized people.
One way to prevent a recfile from being read is to use the security features of
the operating system.
A more secure way would be to encrypt the entire recfile using a free strong encryption program
such as @uref{http://gnu.org/software/gnupg,GnuPG}.
The disadvantage of both these methods is that the entire
recfile has to be secured
when it may well be the case that only certain data need to be protected.
Recutils offers a way to encrypt specified fields in a record, whilst leaving
the rest in clear text.
@menu
* Confidential Fields:: Declaring fields as sensitive data.
* Encrypting Files:: Encrypt confidential fields.
* Decrypting Data:: Reading encrypted fields.
@end menu
@node Confidential Fields
@section Confidential Fields
@cindex @code{%confidential}
@cindex passwords
@cindex confidential data
To specify that a field should be encrypted, use the @code{%confidential}
special field.
This special field declares a set of fields as
@dfn{confidential}, meaning they contain secret data such as
passwords or personal information.
Its usage is:
@example
%confidential: @var{field1} @var{field2} @dots{} @var{fieldN}
@end example
@noindent
The field names are separated by one or more blank characters.
There can be several @code{%confidential} fields in the same record
descriptor, the effective list of confidential fields being the union
of all the entries.
@cindex encrypted fields
Declaring a field as confidential indicates that its contents must not
be stored in plain text, but encrypted with a password-based
mechanism. When the information is retrieved from the database the
confidential fields are unencrypted if the correct password is
provided. Likewise, when information is inserted in the database the
confidential fields are encrypted with some given password.
For example, consider a database of users of some service. For each
user we want to store a name, a login name, an email address and a
password. All this information is public with the obvious exception
of the password. Thus we declare the @code{Password} field as
confidential in the corresponding record descriptor:
@example
%rec: Account
%type: Name line
%type: Login line
%type: Email email
%confidential: Password
@end example
The rec format does not impose the usage of a specific encryption
algorithm, but requires that:
@itemize @minus
@item The algorithm must be password-based.
@item The value of any encrypted field shall begin with the string
@samp{encrypted-} followed by the encrypted data.
@item The encrypted data must be encoded in some ASCII encoding such
as base64.
@end itemize
The above rules assure that it is possible to determine whether a
given field is encrypted. For example, the following is an excerpt
from the account database described above. It contains an entry with
the password encrypted and another with the password unencrypted:
@example
Name: Mr. Foo
Login: foo
Email: foo@@foo.com
Password: encrypted-AAABBBCCDDDEEEFFF
Name: Mr. Bar
Login: bar
Email: bar@@bar.com
Password: secret
@end example
Unencrypted confidential fields are a data integrity error,
and utilities like @code{recfix} will report it.
@cindex integrity problems
The same utility can
be used to ``fix'' the database by massively encrypting any
unencrypted field.
Nothing prevents the usage of several passwords in the same database.
This allows the establishment of several level of securities or
security profiles. For example, we may want to store different
passwords for different online services:
@example
%rec: Account
%confidential: WebPassword ShellPassword
@end example
@noindent
We could then encrypt WebPassword entries using a password shared
among all the webmasters, and the ShellPassword entries with a more
restricted password available only to the administrator of the
machine.
Note that since the utilities only accept to specify one password at a
time different passwords cannot be specified at decryption time. This
means that in the example above the administrator would need to run
@command{recsel} twice in order to decrypt all the encrypted data in
the recfile.
The GNU recutils fully support encrypted fields. See the documentation
for @command{recsel}, @command{recins} and @command{recfix} for details on how
to operate on files containing confidential fields.
@node Encrypting Files
@section Encrypting Files
@command{recins} allows the insertion of encrypted fields in a
database. When the @option{-s} (@option{--password}) command line option is
specified in the command line any field declared as confidential in
the record descriptor will get encrypted using the given passphrase.
If the command is executed interactively and @option{-s} is not used
then the user is asked to provide a password using the terminal. For
example, the invocation:
@example
$ recins -t Account -s mypassword -f Login -v foo -f Password \
-v secret accounts.rec
@end example
@noindent
will encrypt the value of the @code{Password} field with
@code{mypassword} as long as the field is declared as confidential.
(@pxref{Confidential Fields} for details on confidential fields).
@command{recins} will issue a warning if a confidential field is
inserted in the database but no password was provided to encrypt it.
This is to avoid having unencrypted sensitive data in the recfiles.
@node Decrypting Data
@section Decrypting Data
The contents of confidential fields can be read using the
@option{-s} (@option{--password}) command line option to @command{recsel}. When
used, any selected record containing encrypted fields will try to
decrypt them with the given password. If the operation succeeds then
the output will include the unencrypted data. Otherwise the
ASCII-encoded encrypted data will be emitted.
If @command{recsel} is invoked interactively and no password is
specified with @option{-s}, the user will be asked for a password in
case one is needed. No echo of the password will appear in the screen.
The provided password will be used to decrypt all confidential fields
as if it was specified with @option{-s}.
For example, consider the following database storing information about
the user accounts of some online service. Each entry stores a login,
a full name, email and a password. The password is declared as
confidential:
@example
%rec: Account
%key: Login
%confidential: Password
Login: foo
Name: Mr. Foo
Email: foo@@foo.com
Password: encrypted-AAABBBCCCDDD
Login: bar
Name: Ms. Bar
Email: bar@@bar.org
Password: encrypted-XXXYYYZZZUUU
@end example
@noindent
If we use @command{recsel} to get a list of records of type
@code{Account} without specifying a password, or if the wrong password
was specified in interactive mode, then we would get the following
output with the encrypted values:
@example
$ cat accounts.rec | recsel -t Account -p Login,Password
Login: foo
Password: encrypted-AAABBBCCCDDD
Login: bar
Password: encrypted-XXXYYYZZZUUU
@end example
@noindent
If we specify a password and both entries were encrypted using that
password, we would get the unencrypted values:
@example
$ recsel -t Account -s secret -p Login,Password accounts.rec
Login: foo
Password: foosecret
Login: bar
Password: barsecret
@end example
As mentioned above, a confidential field may be encrypted with
different passwords in different records (@pxref{Confidential Fields}).
For example,
we may have an entry in our database with data about the account of
the administrator of the online service. In that case we might want
to store the password associated with that account using a
different password than that for users. In that case the output of
the last command
would have been:
@example
$ recsel -t Account -s secret -p Login,Password accounts.rec
Login: foo
Password: foosecret
Login: bar
Password: barsecret
Login: admin
Password: encrypted-TTTVVVBBBNNN
@end example
@noindent
We would need to invoke @command{recsel} with the password used to
encrypt the admin entry in order to read it back unencrypted.
@node Generating Reports
@chapter Generating Reports
@cindex reports
Having a list of names and addresses, one might want to use this list
to address envelopes
(say, to send annual greeting cards).
Since addresses are normally written on several lines, it would be appropriate
then to split the @code{Address} field values across multiple lines as described in
@ref{Fields}.
Suitable text can now be obtained thus:
@example
$ recsel -t Person -j Abode -P Name,Abode_Address acquaintances.rec
Charles Spencer
2 Serpe Rise,
Little Worning,
SURREY
Dirk Spencer
2 Serpe Rise,
Little Worning,
SURREY
Ernest Wright
1 Wanter Rise,
Greater Inncombe,
BUCKS
@end example
A business enterprise might want to go one step further and generate letters
(such as an advertisement or a recall notice) to customers.
Since @command{recsel} merely selects records and fields from record sets, on
its own it cannot do this; so
there is another command designed for this purpose, called @command{recfmt}.
@cindex @command{recfmt}
@cindex templates
This command uses a @dfn{template} which defines the general form of the
desired output.
A letter template might look as follows:
@example
@{@{Name@}@}
@{@{Abode_Address@}@}
Dear @{@{Name@}@},
Re: Special offer for January
We are delighted to be able to offer you a 95% discount on all car and
truck hire contracts between 1 January and 2 February. Please call us
to take advantage of this offer.
Yours sincerely,
Karen van Rental (CEO)
^L
@end example
It is best to place such a template into a file, so that you can edit it
as you wish.
Notice the instances of double braces enclosing a field name, @eg{} @code{@{@{Name@}@}}.
These are called @dfn{spots} and indicate places where the respective field's
value should be placed.
@cindex spots
Let's assume this template is in a file called @file{offer.templ}.
We can then pipe the output from @command{recsel} into @command{recfmt} in order
as follows:
@example
$ recsel -t Person -j Abode acquaintances.rec | recfmt -f offer.templ
Charles Spencer
2 Serpe Rise,
Little Worning,
SURREY
Dear Charles Spencer,
Re: Special offer for January
We are delighted to be able to offer you a 95% discount on all car and
.
.
.
@end example
@noindent For each record that @command{recsel} selects, one copy of
@file{offer.templ} will be generated. Each spot will be replaced
with the field value corresponding to the field name in the spot.
@menu
* Templates:: Formatted output.
@end menu
@node Templates
@section Templates
@cindex templates
A recfmt template is a text string that may contain @dfn{template
spots}. Those spots are substituted in the template using the
information of a given record. Any text that is not within a spot is
copied literally to the output.
Spots are written surrounded by double curly braces, like:
@example
@{@{@dots{}@}@}
@end example
Spots contain selection expressions, that are executed every time the
template is applied to a record. The spot is then replaced by the
string representation of the value returned by the expression.
For example, consider the following template:
@example
Task @{@{Id@}@}: @{@{Summary@}@}
------------------------
@{@{Description@}@}
--
Created at @{@{CreatedAt@}@}
@end example
@noindent
When applied to the following record:
@example
Id: 123
Summary: Fix recfmt.
CreatedAt: 12 December 2010
Description:
+ The recfmt tool shall be fixed, because right
+ now it is leaking 200 megabytes per processed record.
@end example
@noindent
The result is:
@example
Task 123: Fix recfmt.
------------------------
The recfmt tool shall be fixed, because right
now it is leaking 200 megabytes per processed record.
--
Created at 12 December 2010
@end example
You can use any selection expression in the slots, including
conditionals and string concatenation.
@node Interoperability
@chapter Interoperability
Included in the recutils package are a number of utilities to assist
in the creation
of recfiles using data which already exists in other formats,
and for exporting data from recfiles so that it can be used in other applications.
@menu
* CSV Files:: Converting recfiles to/from csv files.
* Importing MDB Files:: Importing MS Access Databases.
@end menu
@node CSV Files
@section CSV Files
@cindex csv
@cindex comma separated values
Many applications are able to read and write files containing so-called
``comma separated values''.
Such files generally contain tabular data where the columns are separated
by commas and the rows by line feed and/or carriage return characters.
Although record sets are not tables, tables can be easily emulated
using records having the same fields in the same order. For example:
@example
a: value
b: value
c: value
a: value
b: value
c: value
@dots{}
@end example
In several respects records are more flexible than tables:
@itemize @minus
@item Fields can appear in a different order in several records.
@item There can be several fields with the same name in a single record.
@item Records can differ in the number of fields.
@end itemize
It is evident that records, such as those in recfiles, are a more
general structure than comma separated values.
This means that when converting from csv files to recfiles, certain
decisions need to be made.
The @code{rec2csv} utility (@pxref{Invoking rec2csv})
implements an algorithm to deal with this problem
and generate a table that the user expects.
The algorithm works as follows:
@enumerate
@item
The utility first scans the specified
record set, building a list with the names that will become the table
header.
@item
For each field, a header is added with the form:
@example
FIELDNAME[_@var{n}]
@end example
@noindent
where @var{n} is a number in the range @code{2..inf} and is the ``index'' of
the field in its containing record plus one.
For example, consider
the following record set:
@example
a: a1
b: b11
b: b12
c: c1
a: a2
b: b2
d: d2
@end example
The corresponding list of headers being:
@example
a b b_2 c a b d
@end example
@item
Then duplicates are removed:
@example
a b b_2 c d
@end example
@item
The resulting list of headers is then used to build the table in the
generated csv file.
@end enumerate
In the above example the result would be
@example
"a","b","b_2","c","d"
"a1","b11","b12","c1",
"a2","b2",,,"d2"
@end example
As shown, missing fields are implemented as empty columns in the generated
csv.
@node Importing MDB Files
@section Importing MDB Files
Access files (@dfn{mdb files}) are collections of several relations,
also known as tables. Tables can be either @dfn{user tables} storing
user data, or @dfn{system tables} storing information such as forms,
queries or the relationships between the tables.
It is possible to get a listing with the names of all tables stored in
a mdb file by calling @command{mdb2rec} in the following way:
@example
$ mdb2rec -l sales.mdb
Customers
Products
Orders
@end example
So @file{sales.mdb} stores user information in the tables Customers,
Products and Orders. If we want to include system tables in the
listing we can use the @samp{-s} command line option:
@example
$ mdb2rec -s -l sales.mdb
MSysObjects
MSysACEs
MSysQueries
MSysRelationships
Customers
Products
Orders
@end example
The tables with names starting with @command{MSys} are system tables.
The data stored in those tables is either not relevant to the recutils
user (used by the Access program to create forms and the like) or is
used in an indirect way by @command{mdb2rec} (such as the information
from MSysRelationships).
Let's read some data from the @file{mdb} file. We can get the
relation of Products in rec format:
@example
$ mdb2rec sales.mdb Products
%rec: Products
%type: ProductID int
%type: ProductName size 80
%type: Discontinued bool
ProductID: 1
ProductName: GNU generation T-shirt
Discontinued: 0
@dots{}
@end example
A @dfn{record descriptor} is created for the record set containing the
generated records, called Products. As seen in the example, @command{mdb2rec} is
able to generate type information for the fields. The list of
customers is similar:
@example
$ mdb2rec sales.mdb Customers
%rec: Customers
%type: CustomerID size 4
%type: CompanyName size 80
%type: ContactName size 60
CustomerID: GSOFT
CompanyName: GNU Soft
ContactName: Jose E. Marchesi
@dots{}
@end example
If no table is specified in the invocation to @command{mdb2rec} all
the tables in the file are processed, with the exception of the system
tables, which requires @samp{-s} to be used:
@example
$ mdb2rec sales.mdb
%rec: Products
@dots{}
%rec: Customers
@dots{}
%rec: Orders
@dots{}
@end example
@node Bash Builtins
@chapter Bash Builtins
@cindex bash
@cindex interactive use
@cindex shell
The command-line utilities described in @ref{Invoking the Utilities} are
designed to be used interactively in the shell.
Together, and often
combined with the standard shell utilities, they provide a quite
complete user interface.
However, the user's experience can be greatly
improved by a closer integration between the recutils and the shell.
The following sections describe several extensions for @command{bash},
the GNU shell (@pxref{Top,,, bash, The GNU Bourne-Again SHell}).
These extensions make the shell ``aware'' of the recutils.
As with any bash built-in, help is available in the command line using
the @command{help} command. For example:
@example
$ help readrec
@end example
If you installed recutils using a binary package in a GNU/Linux
distribution, odds are that the built-in commands described in this
chapter are already available to you. Otherwise (you get a ``command
not found'' or similar error) you may have to register the built-in
commands with your bash. This is very easy using the @command{enable}
bash command. The registering command for readrec would be:
@example
$ enable -f readrec.so readrec
@end example
Note however that some systems require the full path to
@file{readrec.so} in order for this command to work.
@menu
* readrec:: Exporting the contents of records to the shell.
@end menu
@node readrec
@section readrec
The bash built-in @command{read}, when invoked with no options,
consumes one line from standard input and makes it available in
the predefined @code{REPLY} environment variable, or any other
variable whose name is passed as an argument. This allows processing
data structured in lines in a quite natural way. For example, the
following program prints the third field of each line, with fields
separated by commas, until standard input is exhausted:
@example
# Process one line at a time.
while read
do
echo "The third field is " `echo $REPLY | cut -d, -f 2`
done
@end example
However, @command{read} is not very useful when it comes to
processing recutils records in the shell. Even though it is
possible to customize the character used by @command{read} to split
the input into records, we would need to ignore the empty records in
the likely case of more than one empty line separating records.
Also, we would need to use @command{recsel} to access to the record
fields. Too complicated!
Thus, the @command{readrec} bash built-in is similar to @command{read} with
the difference that it reads records instead of lines. It also
``exports'' the contents of the record to the user as the values of
several environment variables:
@itemize @minus
@item @code{REPLY_REC} is set to the record read from standard input.
@item A set of variables @code{FIELD} named after each field found in
the record are set to the (decoded) value of the fields found in the
input record. When several fields with the same name are found in the
input record then a bash array is created.
@end itemize
Consider for example the following simple database containing
contacts information:
@example
Name: Mr. Foo
Email: foo@@bar.com
Email: bar@@baz.net
Checked: no
Name: Mr. Bar
Email: bar@@foo.com
Telephone: 999666000
Checked: yes
@end example
@noindent
We would like to write some shell code to send an email to all the
contacts, but only if the contact has not been checked before,
@ie{} the @code{Checked} field contains @code{no}. The following code
snippet would do the job nicely using @command{readrec}:
@example
recsel contacts.rec | while readrec
do
if [ $Checked = "no" ]
then
mail -s "You are being checked." $@{Email[0]@} < email.txt
recset -e "Email = '$Email'" -f Checked -S yes contacts.rec
sleep 1
fi
done
@end example
@noindent
Note the usage of the bash array when accessing the primary email
address of each contact. Note also that we update each contact to
figure as ``checked'', using @command{recset}, so she won't get
pestered again the next time the
script is run.
@node Invoking the Utilities
@chapter Invoking the Utilities
Certain options are available in all of these programs. Rather than
writing identical descriptions for each of the programs, they are
listed here.
@anchor{Common Options}
@table @samp
@item --version
Print the version number, then exit successfully.
@item --help
Print a help message, then exit successfully.
@item --
Delimit the option list. Later arguments, if any, are treated as
operands even if they begin with @option{-}. For example,
@code{recsel -- -p} reads from the file named @file{-p}.
@end table
@menu
* Invoking recinf:: Printing information about rec files.
* Invoking recsel:: Selecting records.
* Invoking recins:: Inserting records.
* Invoking recdel:: Deleting records.
* Invoking recset:: Managing fields.
* Invoking recfix:: Fixing broken rec files, and diagnostics.
* Invoking recfmt:: Formatting records using templates.
* Invoking csv2rec:: Converting csv data into rec data.
* Invoking rec2csv:: Converting rec data into csv data.
* Invoking mdb2rec:: Converting mdb files into rec files.
@end menu
@node Invoking recinf
@section Invoking recinf
@cindex @command{recinf}
@command{recinf} reads the given rec files (or the data from
standard input if no file is specified) and prints a summary of the
record types contained in the input.
Synopsis:
@example
recinf [@var{option}]@dots{} [@var{file}]@dots{}
@end example
The default behavior is to emit a line per record type in
the input containing its name and the number of records of that type:
@example
$ recinf hackers.rec tasks.rec
25 Hacker
102 Task
@end example
If the input contains anonymous records, @ie{} records that are before
the first record descriptor, the corresponding output line won't have
a type name:
@example
$ recinf data.rec
10
@end example
In addition to the common options described earlier the program accepts the following options.
@table @samp
@item -t @var{type}
@itemx --type=@var{type}
Select records of a given type only.
@item -d
@itemx --descriptor
Print all the record descriptors present in the file.
@item -n
@itemx --names-only
Output just the names of the record types found in the input. If the
input contains only anonymous records then output nothing.
@item -S
@itemx --print-sexps
Print the data in the form of sexps (Lisp expressions) instead of rec
format. This option can be useful for, of course, Lisp programs.
@end table
@node Invoking recsel
@section Invoking recsel
@cindex @command{recsel}
@cindex selecting records
@command{recsel} reads the given rec files (or the data in the
standard input if no file is specified) and prints out records (or
part of records) based upon some criteria specified by the user.
@command{recsel} searches rec files for records satisfying certain
criteria. Synopsis:
@example
recsel [@var{option}]@dots{} \
[-n @var{indexes} | -e @var{record_expr} | -q @var{str} | -m @var{num}] \
[-c | (-p|-P|-R) @var{field_expr}] \
[@var{file}]@dots{}
@end example
If no @var{file} is specified then the command acts like a filter, getting
the data from standard input and writing the result to
standard output.
In addition to the common options described earlier (@pxref{Common
Options}) the program accepts the following options.
@noindent
The following @dfn{global options} are available.
@table @samp
@item -i
@itemx --case-insensitive
Make string matching case-insensitive in selection expressions.
@cindex case, in selection expressions
@item -C
@item --collapse
Do not section the result in records with newlines.
@item -d
@itemx --include-descriptors
Print record descriptors along with the matched records.
@item -s @var{secret}
@itemx --password=@var{secret}
Try to decrypt confidential fields with the given password.
@item -S
@itemx --sort=@var{fields}
@cindex sorting
Sort the output by the comma-separated list of field names,
@var{fields}. This option takes precedence over any sorting criteria
specified in the corresponding record descriptor with @code{%sort}.
@item -U
@itemx --uniq
Remove duplicated fields in the output records. Fields are
duplicated if they have the same field name
and the same value.
@item -G
@itemx --group-by=@var{fields}
Group the output records by the provided comma-separated list of
@var{fields}. Grouping is performed before sorting.
@end table
The @dfn{selection options} are used to select a subset of
the records in the input.
@table @samp
@item -n @var{indexes}
@item --number=@var{indexes}
Match the records occupying the given positions in its record set.
@var{indexes} must be a comma-separated list of numbers or ranges, with
ranges being two numbers separated with dashes. For example, the
following list denotes the first, the third, the fourth and all
records up to the tenth: @samp{-n 0,2,4-9}.
@item -e @var{expr}
@itemx --expression=@var{expr}
A record selection expression (@pxref{Selection Expressions}). Only
the records matched by the expression will be taken into account to
compute the output.
@item -q @var{str}
@itemx --quick=@var{str}
Select records having a field whose value contains the substring
@var{str}.
@item -m @var{num}
@itemx --random=@var{num}
Select @var{num} random records. If @var{num} is zero then select all
the records.
@item -t @var{type}
@itemx --type=@var{type}
Select records of a given type only.
@item -j @var{field}
@itemx --field=@var{field}
Perform an inner join of the record set selected by @option{-t} and
the record set for which @var{field} is a foreign key. @var{field}
must be a field declared with type @code{rec} and thus must be a
foreign key. If a join is performed then any selection expression and
field expression operate on the joined record sets.
@end table
The @dfn{output options} are used to determine what information about
the selected records to display to the user, and how to display it.
@table @samp
@item -p @var{name_list}
@itemx --print=@var{name_list}
List of fields to print for each record. @var{name_list} is a
list of field names separated by commas. For example:
@example
-p Name,Email
@end example
@noindent
means to print the Name and the Email of every matching record, both
the field names and values.
If this option is not specified then all the fields of the matching
records are printed to standard output.
@item -P @var{name_list}
@itemx --print-values=@var{name_list}
Same as @samp{-p}, but print only the values of the selected fields.
@item -R @var{name_list}
@itemx --print-row=@var{name_list}
Same as @samp{-P}, but print the values separated by single
spaces instead of newlines.
@item -c
@itemx --count
If this option is specified then @command{recsel} will print the number of
matching records instead of the records themselves. This option is
incompatible with @option{-p}, @option{-P} and @option{-R}.
@end table
This @dfn{special option} is available to ease the communication
between the recutils and other programs, namely Lisp interpreters.
This option is not intended to be used by human operators.
@table @samp
@item --print-sexps
Print the data using sexps instead of rec format.
@end table
@node Invoking recins
@section Invoking recins
@cindex @command{recins}
@cindex inserting new records
@command{recins} adds new records to a rec file or to rec data read
from standard input. Synopsis:
@example
recins [@var{option}]@dots{} [-t @var{type}] \
[-n @var{indexes} | -e @var{record_expr} | -q @var{str} | -m @var{num}] \
[( -f @var{str} -v @var{str}]|[-r @var{recdata} )]@dots{} \
[@var{file}]
@end example
The new record to be inserted by the command is constructed by
using pairs of @samp{-f} and @samp{-v} options, or @samp{-r}. Each pair defines a
field. The order of the parameters is significant.
If no @var{file} is specified then the command acts like a filter, getting
the data from standard input and writing the result to
standard output.
If the specified @var{file} does not exist, it is created.
In addition to the common options described earlier (@pxref{Common
Options}) the program accepts the following options.
@table @samp
@item -t
@itemx --type=@var{expr}
The type of the new record. If there is a record set in the input
data matching this type then the new record is added there. Otherwise
a new record set is created. If this parameter is not specified then
the new record is anonymous.
@item -f
@itemx --field=@var{name}
Declares the name of a field. This option must be followed by a
@samp{-v}.
@item -v
@itemx --value=@var{value}
The value of the field being defined.
@item -r
@itemx --record=@var{value}
Add the fields of the record in @var{value}. This option can be
intermixed with @samp{-f @dots{} -v} pairs.
@item -s
@itemx --password
Encrypt confidential fields with the given password.
@item --no-external
Don't use external record descriptors.
@item --verbose
Be verbose when reporting integrity problems.
@item --no-auto
Don't generate @dfn{auto} fields. @xref{Auto-Generated Fields}.
@end table
Record selection arguments are supported too. If they are used
then @command{recins} uses ``replacement mode'': instead of
appending the new record, matched records are replaced by copies of
the provided record. The selection arguments are the same as in
@command{recsel}:
@table @samp
@item -n @var{indexes}
@item --number=@var{indexes}
Match the records occupying the given positions in its record set.
@var{indexes} must be a comma-separated list of numbers or ranges, the
ranges being two numbers separated with dashes. For example, the
following list denotes the first, the third, the fourth and all
records up to the tenth: @code{-n 0,2,4-9}.
@item -e @var{record_expr}
@itemx --expression=@var{expr}
A record selection expression (@pxref{Selection Expressions}).
Matching records will get replaced.
@item -q @var{str}
@itemx --quick=@var{str}
Remove records having a field whose value contains the substring
@var{str}.
@item -m @var{num}
@itemx --random=@var{num}
Select @var{num} random records. If @var{num} is zero then all
records are selected, @ie{} no replace mode is activated.
@item -i
@itemx --case-insensitive
Make strings case-insensitive in selection expressions.
@cindex case, in selection expressions
@item --force
Insert the requested record even in potentially dangerous situations,
such as when the data integrity of the database is compromised.
@end table
@node Invoking recdel
@section Invoking recdel
@cindex @command{recdel}
@cindex deleting records
@command{recdel} removes records from a rec file, or from rec data
read from standard input. Synopsis:
@example
recdel [OPTIONS]@dots{} [-t @var{type}] \
[-n @var{indexes} | -e @var{record_expr} | -q @var{str} | -m @var{num}] \
[@var{file}]
@end example
If no @var{file} is specified then the command acts like a filter,
getting the data from standard input and writing the result to
standard output.
In addition to the common options described earlier (@pxref{Common
Options}) the program accepts the following options.
@table @samp
@item -t
@itemx --type=@var{expr}
Remove records of the given type. If this parameter is not specified
then records of any type will be removed.
@item -n @var{indexes}
@item --number=@var{indexes}
Match the records occupying the given positions in its record set.
@var{indexes} must be a comma-separated list of numbers or ranges, the
ranges being two numbers separated with dashes. For example, the
following list denotes the first, the third, the fourth and all
records up to the tenth: @code{-n 0,2,4-9}.
@item -e @var{record_expr}
@itemx --expression=@var{expr}
A record selection expression (@pxref{Selection Expressions}). Only
the records matched by the expression will be removed from the file.
@item -q @var{str}
@itemx --quick=@var{str}
Remove records having a field whose value contains the substring
@var{str}.
@item -m @var{num}
@itemx --random=@var{num}
Remove @var{num} random records. If @var{num} is zero then remove all
the records.
@item -c
@itemx --comment
Comment the matching records out instead of removing them.
@item --force
Delete even in potentially dangerous situations, such as a request
to delete all the records of some type.
@item --no-external
Don't use external record descriptors.
@item -i
@itemx --case-insensitive
Make strings case-insensitive in selection expressions.
@item --verbose
Be verbose when reporting integrity problems.
@end table
@node Invoking recset
@section Invoking recset
@cindex @command{recset}
@cindex editing fields
@command{recset} manipulates the fields of records in a rec file, or
rec data read from standard input. Synopsis:
@example
recset [@var{option}]@dots{} [@var{file}]@dots{}
@end example
If no @var{file} is specified then the command acts like a filter,
getting the data from standard input and writing the result to
standard output.
In addition to the common options described earlier (@pxref{Common
Options}) the program accepts the following options.
Record selection options:
@table @samp
@item -i
@itemx --case-insensitive
Make strings case-insensitive in selection expressions.
@item -t
@itemx --type=@var{expr}
Operate on the records of the given type. If this parameter is not
specified then records of any type will be affected.
@item -n @var{indexes}
@item --number=@var{indexes}
Operate on the records occupying the given positions in its record
set. @var{indexes} must be a comma-separated list of numbers or
ranges, the ranges being two numbers separated with dashes. For
example, the following list denotes the first, the third, the fourth
and all records up to the tenth: @code{-n 0,2,4-9}.
@item -e @var{expr}
@itemx --expression=@var{expr}
A record selection expression (@pxref{Selection Expressions}). Only
the records matched by the expression will be processed.
@item -q @var{str}
@itemx --quick=@var{str}
Operate on records having a field whose value contains the substring
@var{str}.
@item -m @var{num}
@itemx --random=@var{num}
Operate on @var{num} random records. If @var{num} is zero then
operate on all the records.
@end table
Field selection options:
@table @samp
@item -f
@itemx --fields=@var{FEX}
Field selection expression (@pxref{Field Expressions}) to select the
fields to operate.
@end table
Actions:
@table @samp
@item -s
@itemx --set=@var{value}
Set the value of the selected fields to @var{value}.
@item -a
@itemx --add=@var{value}
Add a new field to the selected record with value @var{value}.
@item -S
@itemx --set-add=@var{value}
Set the value of the selected fields to @var{value}. If some of the
fields don't exist in a record, append it with the specified value.
@item -r
@itemx --rename=@var{value}
Rename a field; @var{value} must be a valid field name. The field
expression associated with this action must contain a single field
name and an optional subscript. If an entire record set is selected
then the field is renamed in the record descriptor as well.
@item -d
@itemx --delete
Delete the selected fields in the selected records.
@item -c
@itemx --comment
Comment out the selected fields in the selected records.
@item --no-external
Don't use external record descriptors.
@item --verbose
Be verbose when reporting integrity problems.
@item --force
Perform the requested operation even in potentially dangerous
situations, or when the integrity of the data stored in the file is
affected.
@end table
@node Invoking recfix
@section Invoking recfix
@cindex @command{recfix}
@cindex checking recfiles
@cindex integrity, checking
@command{recfix} checks and fixes rec files. Synopsis:
@example
recfix [@var{option}]@dots{} [@var{operation}] [@var{op_option}]@dots{} [@var{file}]
@end example
If no @var{file} is specified then the command acts like a filter,
getting the data from standard input and writing the result to
standard output.
In addition to the common options described earlier (@pxref{Common
Options}) the program accepts the following global options.
@table @samp
@item --no-external
Don't use external record descriptors.
@end table
The effect of running @command{recfix} depends on the operation it
performs. The operation mode is selected by using one of the
following options.
@table @samp
@item --check
Check the integrity of the database contained in the file, printing
diagnostics messages in case something is not right. This is the
default operation.
@item --sort
Perform a physical sort of all the records contained in the file (or
standard input) after checking for its integrity. The sorting
criteria are provided by the @code{%sort} special field, if any. If
there is an integrity failure the sorting is not performed.
@cindex sorting
This is a destructive operation.
@item --decrypt
@itemx --encrypt
Decrypt (encrypt) all the (non-)encrypted fields in the database which are marked
as confidential. This operation requires a password. If no password
is specified with @option{-s} and the program is run in a terminal, a
prompt is given to get the password from the user.
If encryption is performed on a file having encrypted fields, the
operation will fail unless @samp{--force} is used.
These are destructive operations.
@item --auto
Insert auto-generated fields as appropriate in the records which are
missing them.
This is a destructive operation.
@end table
As described above, some operations make use of these additional options:
@table @samp
@item -s @var{secret}
@itemx --password=@var{secret}
Password used to encrypt or decrypt fields.
@item --force
Force potentially dangerous operations.
@end table
@node Invoking recfmt
@section Invoking recfmt
@cindex @command{recfmt}
@cindex formatted output
@command{recfmt} formats records using templates. Synopsis:
@example
recfmt [@var{option}]@dots{} [@var{template}]
@end example
This program always works as a filter, getting the data from the
standard input and writing the result to standard output.
In addition to the common options described earlier (@pxref{Common
Options}) the program accepts the following options.
@table @samp
@item -f
@itemx --filename=@var{PATH}
Read the template from the file in @var{PATH} instead of the command
line.
@end table
@node Invoking csv2rec
@section Invoking csv2rec
@cindex @command{csv2rec}
@cindex csv
@cindex comma separated values
@command{csv2rec} reads the given comma-separated-values file (or the
data from standard input if no file is specified) and prints out the
converted rec data, if possible. Synopsis:
@example
csv2rec [@var{option}]@dots{} [@var{csv_file}]
@end example
In addition to the common options described earlier (@pxref{Common
Options}) the program accepts the following options.
@table @samp
@item -t @var{type}
@itemx --type=@var{type}
Type of the converted records. If no type is specified then no type
is used.
@item -s
@itemx --strict
Be strict parsing the csv file.
@item -e
@itemx --omit-empty
Omit empty fields.
@end table
@node Invoking rec2csv
@section Invoking rec2csv
@cindex @command{rec2csv}
@cindex csv
@cindex comma separated values
@command{rec2csv} reads the given rec files (or the data in the
standard input if no file is specified) and prints out the converted
comma-separated-values. Synopsis:
@example
rec2csv [@var{option}]@dots{} [@var{rec_file}]@dots{}
@end example
The rec data can be read from files specified in the command line, or
from standard input. The program writes the converted data to
standard output.
In addition to the common options described earlier (@pxref{Common
Options}) the program accepts the following options.
@table @samp
@item -t @var{type}
@itemx --type=@var{type}
Type of the records to convert. If no type is specified then the
default records (with no name) are converted.
@item -S
@itemx --sort=@var{fields}
Sort the output by the comma-separated list of field names
@var{fields}. This option has precedence to whatever sorting criteria
are specified in the corresponding record descriptor with
@code{%sort}.
@item -d
@itemx --delim=@var{char}
Use @var{char} as the delimiter character separating fields in the
output. Defaults to @code{,}.
@end table
@node Invoking mdb2rec
@section Invoking mdb2rec
@cindex @command{mdb2rec}
@cindex mdb
@cindex MS Access
@command{mdb2rec} reads the given mdb file and prints out the
converted rec data, if possible. Synopsis:
@example
mdb2rec [@var{option}]@dots{} @var{mdb_file} [@var{table}]
@end example
All the tables contained in the mdb file are exported unless a table
is specified in the command line.
In addition to the common options described earlier (@pxref{Common
Options}) the program accepts the following options.
@table @samp
@item -s
@itemx --system-tables
Include system tables in the output.
@item -l
@itemx --list-tables
Dump a list of the table names contained in the mdb file, one per
line.
@item -e
@itemx --keep-empty-fields
Don't prune empty fields in the rec output.
@end table
@node Regular Expressions
@chapter Regular Expressions
@cindex regular expressions
The character @samp{.} matches any single character except the null character.
@table @samp
@item +
match one or more occurrences of the previous atom or regexp.
@item ?
match zero or one occurrences of the previous atom or regexp.
@item \+
matches a @samp{+}
@item \?
matches a @samp{?}.
@end table
Bracket expressions are used to match ranges of characters.
Bracket expressions where the range is backward, for example @samp{[z-a]}, are invalid.
Within square brackets, @samp{\} is taken literally.
Character classes are supported; for example @samp{[[:digit:]]} matches a single decimal digit.
GNU extensions are supported:
@table @samp
@item \w
matches a character within a word
@item \W
matches a character which is not within a word
@item \<
matches the beginning of a word
@item \>
matches the end of a word
@item \b
matches a word boundary
@item \B
matches characters which are not a word boundary
@item \`
matches the beginning of the whole input
@item \'
matches the end of the whole input
@end table
@cindex grouping, within regular expressions
Grouping is performed with parentheses @samp{()}. An unmatched
@samp{)} matches just itself. A backslash followed by a digit acts as
a back-reference and matches the same thing as the previous grouped
expression indicated by that number. For example, @samp{\2} matches
the second group expression. The order of group expressions is
determined by the position of their opening parenthesis @samp{(}.
The alternation operator is @samp{|}.
The characters @samp{^} and @samp{$} always represent the beginning
and end of a string respectively, except within square brackets.
Within brackets, an initial @samp{^} inverts the
character class being matched.
@samp{*}, @samp{+} and @samp{?} are special at any point in a regular
expression except the following places, where they are not allowed:
@enumerate
@item At the beginning of a regular expression
@item After an open-group, @samp{(}
@item After the alternation operator, @samp{|}
@end enumerate
Intervals are specified by @samp{@{} and @samp{@}}. Invalid intervals
such as @samp{a@{1z} are not accepted.
The longest possible match is returned; this applies to the regular
expression as a whole and (subject to this constraint) to
sub-expressions within groups.
@c @lowersections
@include parse-datetime.texi
@c @raisesections
@node GNU Free Documentation License
@appendix GNU Free Documentation License
@cindex license, GNU Free Documentation License
@include fdl.texi
@node Concept Index
@unnumbered Concept Index
@printindex cp
@bye
recutils-1.8/doc/parse-datetime.texi 0000644 0000000 0000000 00000056474 13413351640 014467 0000000 0000000 @c GNU date syntax documentation
@c Copyright (C) 1994-2006, 2009-2019 Free Software Foundation, Inc.
@c Permission is granted to copy, distribute and/or modify this document
@c under the terms of the GNU Free Documentation License, Version 1.3 or
@c any later version published by the Free Software Foundation; with no
@c Invariant Sections, no Front-Cover Texts, and no Back-Cover
@c Texts. A copy of the license is included in the ``GNU Free
@c Documentation License'' file as part of this distribution.
@node Date input formats
@chapter Date input formats
@cindex date input formats
@findex parse_datetime
First, a quote:
@quotation
Our units of temporal measurement, from seconds on up to months, are so
complicated, asymmetrical and disjunctive so as to make coherent mental
reckoning in time all but impossible. Indeed, had some tyrannical god
contrived to enslave our minds to time, to make it all but impossible
for us to escape subjection to sodden routines and unpleasant surprises,
he could hardly have done better than handing down our present system.
It is like a set of trapezoidal building blocks, with no vertical or
horizontal surfaces, like a language in which the simplest thought
demands ornate constructions, useless particles and lengthy
circumlocutions. Unlike the more successful patterns of language and
science, which enable us to face experience boldly or at least
level-headedly, our system of temporal calculation silently and
persistently encourages our terror of time.
@dots{} It is as though architects had to measure length in feet, width
in meters and height in ells; as though basic instruction manuals
demanded a knowledge of five different languages. It is no wonder then
that we often look into our own immediate past or future, last Tuesday
or a week from Sunday, with feelings of helpless confusion. @dots{}
---Robert Grudin, @cite{Time and the Art of Living}.
@end quotation
This section describes the textual date representations that GNU
programs accept. These are the strings you, as a user, can supply as
arguments to the various programs. The C interface (via the
@code{parse_datetime} function) is not described here.
@menu
* General date syntax:: Common rules.
* Calendar date items:: 19 Dec 1994.
* Time of day items:: 9:20pm.
* Time zone items:: EST, PDT, UTC, @dots{}
* Combined date and time of day items:: 1972-09-24T20:02:00,000000-0500.
* Day of week items:: Monday and others.
* Relative items in date strings:: next tuesday, 2 years ago.
* Pure numbers in date strings:: 19931219, 1440.
* Seconds since the Epoch:: @@1078100502.
* Specifying time zone rules:: TZ="America/New_York", TZ="UTC0".
* Authors of parse_datetime:: Bellovin, Eggert, Salz, Berets, et al.
@end menu
@node General date syntax
@section General date syntax
@cindex general date syntax
@cindex items in date strings
A @dfn{date} is a string, possibly empty, containing many items
separated by whitespace. The whitespace may be omitted when no
ambiguity arises. The empty string means the beginning of today (i.e.,
midnight). Order of the items is immaterial. A date string may contain
many flavors of items:
@itemize @bullet
@item calendar date items
@item time of day items
@item time zone items
@item combined date and time of day items
@item day of the week items
@item relative items
@item pure numbers.
@end itemize
@noindent We describe each of these item types in turn, below.
@cindex numbers, written-out
@cindex ordinal numbers
@findex first @r{in date strings}
@findex next @r{in date strings}
@findex last @r{in date strings}
A few ordinal numbers may be written out in words in some contexts. This is
most useful for specifying day of the week items or relative items (see
below). Among the most commonly used ordinal numbers, the word
@samp{last} stands for @math{-1}, @samp{this} stands for 0, and
@samp{first} and @samp{next} both stand for 1. Because the word
@samp{second} stands for the unit of time there is no way to write the
ordinal number 2, but for convenience @samp{third} stands for 3,
@samp{fourth} for 4, @samp{fifth} for 5,
@samp{sixth} for 6, @samp{seventh} for 7, @samp{eighth} for 8,
@samp{ninth} for 9, @samp{tenth} for 10, @samp{eleventh} for 11 and
@samp{twelfth} for 12.
@cindex months, written-out
When a month is written this way, it is still considered to be written
numerically, instead of being ``spelled in full''; this changes the
allowed strings.
@cindex language, in dates
In the current implementation, only English is supported for words and
abbreviations like @samp{AM}, @samp{DST}, @samp{EST}, @samp{first},
@samp{January}, @samp{Sunday}, @samp{tomorrow}, and @samp{year}.
@cindex language, in dates
@cindex time zone item
The output of the @command{date} command
is not always acceptable as a date string,
not only because of the language problem, but also because there is no
standard meaning for time zone items like @samp{IST}@. When using
@command{date} to generate a date string intended to be parsed later,
specify a date format that is independent of language and that does not
use time zone items other than @samp{UTC} and @samp{Z}@. Here are some
ways to do this:
@example
$ LC_ALL=C TZ=UTC0 date
Mon Mar 1 00:21:42 UTC 2004
$ TZ=UTC0 date +'%Y-%m-%d %H:%M:%SZ'
2004-03-01 00:21:42Z
$ date --rfc-3339=ns # --rfc-3339 is a GNU extension.
2004-02-29 16:21:42.692722128-08:00
$ date --rfc-2822 # a GNU extension
Sun, 29 Feb 2004 16:21:42 -0800
$ date +'%Y-%m-%d %H:%M:%S %z' # %z is a GNU extension.
2004-02-29 16:21:42 -0800
$ date +'@@%s.%N' # %s and %N are GNU extensions.
@@1078100502.692722128
@end example
@cindex case, ignored in dates
@cindex comments, in dates
Alphabetic case is completely ignored in dates. Comments may be introduced
between round parentheses, as long as included parentheses are properly
nested. Hyphens not followed by a digit are currently ignored. Leading
zeros on numbers are ignored.
@cindex leap seconds
Invalid dates like @samp{2005-02-29} or times like @samp{24:00} are
rejected. In the typical case of a host that does not support leap
seconds, a time like @samp{23:59:60} is rejected even if it
corresponds to a valid leap second.
@node Calendar date items
@section Calendar date items
@cindex calendar date item
A @dfn{calendar date item} specifies a day of the year. It is
specified differently, depending on whether the month is specified
numerically or literally. All these strings specify the same calendar date:
@example
1972-09-24 # ISO 8601.
72-9-24 # Assume 19xx for 69 through 99,
# 20xx for 00 through 68.
72-09-24 # Leading zeros are ignored.
9/24/72 # Common U.S. writing.
24 September 1972
24 Sept 72 # September has a special abbreviation.
24 Sep 72 # Three-letter abbreviations always allowed.
Sep 24, 1972
24-sep-72
24sep72
@end example
The year can also be omitted. In this case, the last specified year is
used, or the current year if none. For example:
@example
9/24
sep 24
@end example
Here are the rules.
@cindex ISO 8601 date format
@cindex date format, ISO 8601
For numeric months, the ISO 8601 format
@samp{@var{year}-@var{month}-@var{day}} is allowed, where @var{year} is
any positive number, @var{month} is a number between 01 and 12, and
@var{day} is a number between 01 and 31. A leading zero must be present
if a number is less than ten. If @var{year} is 68 or smaller, then 2000
is added to it; otherwise, if @var{year} is less than 100,
then 1900 is added to it. The construct
@samp{@var{month}/@var{day}/@var{year}}, popular in the United States,
is accepted. Also @samp{@var{month}/@var{day}}, omitting the year.
@cindex month names in date strings
@cindex abbreviations for months
Literal months may be spelled out in full: @samp{January},
@samp{February}, @samp{March}, @samp{April}, @samp{May}, @samp{June},
@samp{July}, @samp{August}, @samp{September}, @samp{October},
@samp{November} or @samp{December}. Literal months may be abbreviated
to their first three letters, possibly followed by an abbreviating dot.
It is also permitted to write @samp{Sept} instead of @samp{September}.
When months are written literally, the calendar date may be given as any
of the following:
@example
@var{day} @var{month} @var{year}
@var{day} @var{month}
@var{month} @var{day} @var{year}
@var{day}-@var{month}-@var{year}
@end example
Or, omitting the year:
@example
@var{month} @var{day}
@end example
@node Time of day items
@section Time of day items
@cindex time of day item
A @dfn{time of day item} in date strings specifies the time on a given
day. Here are some examples, all of which represent the same time:
@example
20:02:00.000000
20:02
8:02pm
20:02-0500 # In EST (U.S. Eastern Standard Time).
@end example
@cindex leap seconds
More generally, the time of day may be given as
@samp{@var{hour}:@var{minute}:@var{second}}, where @var{hour} is
a number between 0 and 23, @var{minute} is a number between 0 and
59, and @var{second} is a number between 0 and 59 possibly followed by
@samp{.} or @samp{,} and a fraction containing one or more digits.
Alternatively,
@samp{:@var{second}} can be omitted, in which case it is taken to
be zero. On the rare hosts that support leap seconds, @var{second}
may be 60.
@findex am @r{in date strings}
@findex pm @r{in date strings}
@findex midnight @r{in date strings}
@findex noon @r{in date strings}
If the time is followed by @samp{am} or @samp{pm} (or @samp{a.m.}
or @samp{p.m.}), @var{hour} is restricted to run from 1 to 12, and
@samp{:@var{minute}} may be omitted (taken to be zero). @samp{am}
indicates the first half of the day, @samp{pm} indicates the second
half of the day. In this notation, 12 is the predecessor of 1:
midnight is @samp{12am} while noon is @samp{12pm}.
(This is the zero-oriented interpretation of @samp{12am} and @samp{12pm},
as opposed to the old tradition derived from Latin
which uses @samp{12m} for noon and @samp{12pm} for midnight.)
@cindex time zone correction
@cindex minutes, time zone correction by
The time may alternatively be followed by a time zone correction,
expressed as @samp{@var{s}@var{hh}@var{mm}}, where @var{s} is @samp{+}
or @samp{-}, @var{hh} is a number of zone hours and @var{mm} is a number
of zone minutes.
The zone minutes term, @var{mm}, may be omitted, in which case
the one- or two-digit correction is interpreted as a number of hours.
You can also separate @var{hh} from @var{mm} with a colon.
When a time zone correction is given this way, it
forces interpretation of the time relative to
Coordinated Universal Time (UTC), overriding any previous
specification for the time zone or the local time zone. For example,
@samp{+0530} and @samp{+05:30} both stand for the time zone 5.5 hours
ahead of UTC (e.g., India).
This is the best way to
specify a time zone correction by fractional parts of an hour.
The maximum zone correction is 24 hours.
Either @samp{am}/@samp{pm} or a time zone correction may be specified,
but not both.
@node Time zone items
@section Time zone items
@cindex time zone item
A @dfn{time zone item} specifies an international time zone, indicated
by a small set of letters, e.g., @samp{UTC} or @samp{Z}
for Coordinated Universal
Time. Any included periods are ignored. By following a
non-daylight-saving time zone by the string @samp{DST} in a separate
word (that is, separated by some white space), the corresponding
daylight saving time zone may be specified.
Alternatively, a non-daylight-saving time zone can be followed by a
time zone correction, to add the two values. This is normally done
only for @samp{UTC}; for example, @samp{UTC+05:30} is equivalent to
@samp{+05:30}.
Time zone items other than @samp{UTC} and @samp{Z}
are obsolescent and are not recommended, because they
are ambiguous; for example, @samp{EST} has a different meaning in
Australia than in the United States. Instead, it's better to use
unambiguous numeric time zone corrections like @samp{-0500}, as
described in the previous section.
If neither a time zone item nor a time zone correction is supplied,
timestamps are interpreted using the rules of the default time zone
(@pxref{Specifying time zone rules}).
@node Combined date and time of day items
@section Combined date and time of day items
@cindex combined date and time of day item
@cindex ISO 8601 date and time of day format
@cindex date and time of day format, ISO 8601
The ISO 8601 date and time of day extended format consists of an ISO
8601 date, a @samp{T} character separator, and an ISO 8601 time of
day. This format is also recognized if the @samp{T} is replaced by a
space.
In this format, the time of day should use 24-hour notation.
Fractional seconds are allowed, with either comma or period preceding
the fraction. ISO 8601 fractional minutes and hours are not
supported. Typically, hosts support nanosecond timestamp resolution;
excess precision is silently discarded.
Here are some examples:
@example
2012-09-24T20:02:00.052-05:00
2012-12-31T23:59:59,999999999+11:00
1970-01-01 00:00Z
@end example
@node Day of week items
@section Day of week items
@cindex day of week item
The explicit mention of a day of the week will forward the date
(only if necessary) to reach that day of the week in the future.
Days of the week may be spelled out in full: @samp{Sunday},
@samp{Monday}, @samp{Tuesday}, @samp{Wednesday}, @samp{Thursday},
@samp{Friday} or @samp{Saturday}. Days may be abbreviated to their
first three letters, optionally followed by a period. The special
abbreviations @samp{Tues} for @samp{Tuesday}, @samp{Wednes} for
@samp{Wednesday} and @samp{Thur} or @samp{Thurs} for @samp{Thursday} are
also allowed.
@findex next @var{day}
@findex last @var{day}
A number may precede a day of the week item to move forward
supplementary weeks. It is best used in expression like @samp{third
monday}. In this context, @samp{last @var{day}} or @samp{next
@var{day}} is also acceptable; they move one week before or after
the day that @var{day} by itself would represent.
A comma following a day of the week item is ignored.
@node Relative items in date strings
@section Relative items in date strings
@cindex relative items in date strings
@cindex displacement of dates
@dfn{Relative items} adjust a date (or the current date if none) forward
or backward. The effects of relative items accumulate. Here are some
examples:
@example
1 year
1 year ago
3 years
2 days
@end example
@findex year @r{in date strings}
@findex month @r{in date strings}
@findex fortnight @r{in date strings}
@findex week @r{in date strings}
@findex day @r{in date strings}
@findex hour @r{in date strings}
@findex minute @r{in date strings}
The unit of time displacement may be selected by the string @samp{year}
or @samp{month} for moving by whole years or months. These are fuzzy
units, as years and months are not all of equal duration. More precise
units are @samp{fortnight} which is worth 14 days, @samp{week} worth 7
days, @samp{day} worth 24 hours, @samp{hour} worth 60 minutes,
@samp{minute} or @samp{min} worth 60 seconds, and @samp{second} or
@samp{sec} worth one second. An @samp{s} suffix on these units is
accepted and ignored.
@findex ago @r{in date strings}
The unit of time may be preceded by a multiplier, given as an optionally
signed number. Unsigned numbers are taken as positively signed. No
number at all implies 1 for a multiplier. Following a relative item by
the string @samp{ago} is equivalent to preceding the unit by a
multiplier with value @math{-1}.
@findex day @r{in date strings}
@findex tomorrow @r{in date strings}
@findex yesterday @r{in date strings}
The string @samp{tomorrow} is worth one day in the future (equivalent
to @samp{day}), the string @samp{yesterday} is worth
one day in the past (equivalent to @samp{day ago}).
@findex now @r{in date strings}
@findex today @r{in date strings}
@findex this @r{in date strings}
The strings @samp{now} or @samp{today} are relative items corresponding
to zero-valued time displacement, these strings come from the fact
a zero-valued time displacement represents the current time when not
otherwise changed by previous items. They may be used to stress other
items, like in @samp{12:00 today}. The string @samp{this} also has
the meaning of a zero-valued time displacement, but is preferred in
date strings like @samp{this thursday}.
When a relative item causes the resulting date to cross a boundary
where the clocks were adjusted, typically for daylight saving time,
the resulting date and time are adjusted accordingly.
The fuzz in units can cause problems with relative items. For
example, @samp{2003-07-31 -1 month} might evaluate to 2003-07-01,
because 2003-06-31 is an invalid date. To determine the previous
month more reliably, you can ask for the month before the 15th of the
current month. For example:
@example
$ date -R
Thu, 31 Jul 2003 13:02:39 -0700
$ date --date='-1 month' +'Last month was %B?'
Last month was July?
$ date --date="$(date +%Y-%m-15) -1 month" +'Last month was %B!'
Last month was June!
@end example
Also, take care when manipulating dates around clock changes such as
daylight saving leaps. In a few cases these have added or subtracted
as much as 24 hours from the clock, so it is often wise to adopt
universal time by setting the @env{TZ} environment variable to
@samp{UTC0} before embarking on calendrical calculations.
@node Pure numbers in date strings
@section Pure numbers in date strings
@cindex pure numbers in date strings
The precise interpretation of a pure decimal number depends
on the context in the date string.
If the decimal number is of the form @var{yyyy}@var{mm}@var{dd} and no
other calendar date item (@pxref{Calendar date items}) appears before it
in the date string, then @var{yyyy} is read as the year, @var{mm} as the
month number and @var{dd} as the day of the month, for the specified
calendar date.
If the decimal number is of the form @var{hh}@var{mm} and no other time
of day item appears before it in the date string, then @var{hh} is read
as the hour of the day and @var{mm} as the minute of the hour, for the
specified time of day. @var{mm} can also be omitted.
If both a calendar date and a time of day appear to the left of a number
in the date string, but no relative item, then the number overrides the
year.
@node Seconds since the Epoch
@section Seconds since the Epoch
If you precede a number with @samp{@@}, it represents an internal
timestamp as a count of seconds. The number can contain an internal
decimal point (either @samp{.} or @samp{,}); any excess precision not
supported by the internal representation is truncated toward minus
infinity. Such a number cannot be combined with any other date
item, as it specifies a complete timestamp.
@cindex beginning of time, for POSIX
@cindex epoch, for POSIX
Internally, computer times are represented as a count of seconds since
an epoch---a well-defined point of time. On GNU and
POSIX systems, the epoch is 1970-01-01 00:00:00 UTC, so
@samp{@@0} represents this time, @samp{@@1} represents 1970-01-01
00:00:01 UTC, and so forth. GNU and most other
POSIX-compliant systems support such times as an extension
to POSIX, using negative counts, so that @samp{@@-1}
represents 1969-12-31 23:59:59 UTC.
Traditional Unix systems count seconds with 32-bit two's-complement
integers and can represent times from 1901-12-13 20:45:52 through
2038-01-19 03:14:07 UTC@. More modern systems use 64-bit counts
of seconds with nanosecond subcounts, and can represent all the times
in the known lifetime of the universe to a resolution of 1 nanosecond.
@cindex leap seconds
On most hosts, these counts ignore the presence of leap seconds.
For example, on most hosts @samp{@@915148799} represents 1998-12-31
23:59:59 UTC, @samp{@@915148800} represents 1999-01-01 00:00:00
UTC, and there is no way to represent the intervening leap second
1998-12-31 23:59:60 UTC.
@node Specifying time zone rules
@section Specifying time zone rules
@vindex TZ
Normally, dates are interpreted using the rules of the current time
zone, which in turn are specified by the @env{TZ} environment
variable, or by a system default if @env{TZ} is not set. To specify a
different set of default time zone rules that apply just to one date,
start the date with a string of the form @samp{TZ="@var{rule}"}. The
two quote characters (@samp{"}) must be present in the date, and any
quotes or backslashes within @var{rule} must be escaped by a
backslash.
For example, with the GNU @command{date} command you can
answer the question ``What time is it in New York when a Paris clock
shows 6:30am on October 31, 2004?'' by using a date beginning with
@samp{TZ="Europe/Paris"} as shown in the following shell transcript:
@example
$ export TZ="America/New_York"
$ date --date='TZ="Europe/Paris" 2004-10-31 06:30'
Sun Oct 31 01:30:00 EDT 2004
@end example
In this example, the @option{--date} operand begins with its own
@env{TZ} setting, so the rest of that operand is processed according
to @samp{Europe/Paris} rules, treating the string @samp{2004-10-31
06:30} as if it were in Paris. However, since the output of the
@command{date} command is processed according to the overall time zone
rules, it uses New York time. (Paris was normally six hours ahead of
New York in 2004, but this example refers to a brief Halloween period
when the gap was five hours.)
A @env{TZ} value is a rule that typically names a location in the
@uref{http://www.twinsun.com/tz/tz-link.htm, @samp{tz} database}.
A recent catalog of location names appears in the
@uref{http://twiki.org/cgi-bin/xtra/tzdate, TWiki Date and Time
Gateway}. A few non-GNU hosts require a colon before a
location name in a @env{TZ} setting, e.g.,
@samp{TZ=":America/New_York"}.
The @samp{tz} database includes a wide variety of locations ranging
from @samp{Arctic/Longyearbyen} to @samp{Antarctica/South_Pole}, but
if you are at sea and have your own private time zone, or if you are
using a non-GNU host that does not support the @samp{tz}
database, you may need to use a POSIX rule instead. Simple
POSIX rules like @samp{UTC0} specify a time zone without
daylight saving time; other rules can specify simple daylight saving
regimes. @xref{TZ Variable,, Specifying the Time Zone with @code{TZ},
libc, The GNU C Library}.
@node Authors of parse_datetime
@section Authors of @code{parse_datetime}
@c the anchor keeps the old node name, to try to avoid breaking links
@anchor{Authors of get_date}
@cindex authors of @code{parse_datetime}
@cindex Bellovin, Steven M.
@cindex Salz, Rich
@cindex Berets, Jim
@cindex MacKenzie, David
@cindex Meyering, Jim
@cindex Eggert, Paul
@code{parse_datetime} started life as @code{getdate}, as originally
implemented by Steven M. Bellovin
(@email{smb@@research.att.com}) while at the University of North Carolina
at Chapel Hill. The code was later tweaked by a couple of people on
Usenet, then completely overhauled by Rich $alz (@email{rsalz@@bbn.com})
and Jim Berets (@email{jberets@@bbn.com}) in August, 1990. Various
revisions for the GNU system were made by David MacKenzie, Jim Meyering,
Paul Eggert and others, including renaming it to @code{get_date} to
avoid a conflict with the alternative Posix function @code{getdate},
and a later rename to @code{parse_datetime}. The Posix function
@code{getdate} can parse more locale-specific dates using
@code{strptime}, but relies on an environment variable and external
file, and lacks the thread-safety of @code{parse_datetime}.
@cindex Pinard, F.
@cindex Berry, K.
This chapter was originally produced by Fran@,{c}ois Pinard
(@email{pinard@@iro.umontreal.ca}) from the @file{parse_datetime.y} source code,
and then edited by K. Berry (@email{kb@@cs.umb.edu}).
recutils-1.8/build-aux/ 0000755 0000000 0000000 00000000000 13413354013 012052 5 0000000 0000000 recutils-1.8/build-aux/config.rpath 0000755 0000000 0000000 00000044216 13413353030 014307 0000000 0000000 #! /bin/sh
# Output a system dependent set of variables, describing how to set the
# run time search path of shared libraries in an executable.
#
# Copyright 1996-2019 Free Software Foundation, Inc.
# Taken from GNU libtool, 2001
# Originally by Gordon Matzigkeit , 1996
#
# This file is free software; the Free Software Foundation gives
# unlimited permission to copy and/or distribute it, with or without
# modifications, as long as this notice is preserved.
#
# The first argument passed to this file is the canonical host specification,
# CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM
# or
# CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM
# The environment variables CC, GCC, LDFLAGS, LD, with_gnu_ld
# should be set by the caller.
#
# The set of defined variables is at the end of this script.
# Known limitations:
# - On IRIX 6.5 with CC="cc", the run time search patch must not be longer
# than 256 bytes, otherwise the compiler driver will dump core. The only
# known workaround is to choose shorter directory names for the build
# directory and/or the installation directory.
# All known linkers require a '.a' archive for static linking (except MSVC,
# which needs '.lib').
libext=a
shrext=.so
host="$1"
host_cpu=`echo "$host" | sed 's/^\([^-]*\)-\([^-]*\)-\(.*\)$/\1/'`
host_vendor=`echo "$host" | sed 's/^\([^-]*\)-\([^-]*\)-\(.*\)$/\2/'`
host_os=`echo "$host" | sed 's/^\([^-]*\)-\([^-]*\)-\(.*\)$/\3/'`
# Code taken from libtool.m4's _LT_CC_BASENAME.
for cc_temp in $CC""; do
case $cc_temp in
compile | *[\\/]compile | ccache | *[\\/]ccache ) ;;
distcc | *[\\/]distcc | purify | *[\\/]purify ) ;;
\-*) ;;
*) break;;
esac
done
cc_basename=`echo "$cc_temp" | sed -e 's%^.*/%%'`
# Code taken from libtool.m4's _LT_COMPILER_PIC.
wl=
if test "$GCC" = yes; then
wl='-Wl,'
else
case "$host_os" in
aix*)
wl='-Wl,'
;;
mingw* | cygwin* | pw32* | os2* | cegcc*)
;;
hpux9* | hpux10* | hpux11*)
wl='-Wl,'
;;
irix5* | irix6* | nonstopux*)
wl='-Wl,'
;;
linux* | k*bsd*-gnu | kopensolaris*-gnu)
case $cc_basename in
ecc*)
wl='-Wl,'
;;
icc* | ifort*)
wl='-Wl,'
;;
lf95*)
wl='-Wl,'
;;
nagfor*)
wl='-Wl,-Wl,,'
;;
pgcc* | pgf77* | pgf90* | pgf95* | pgfortran*)
wl='-Wl,'
;;
ccc*)
wl='-Wl,'
;;
xl* | bgxl* | bgf* | mpixl*)
wl='-Wl,'
;;
como)
wl='-lopt='
;;
*)
case `$CC -V 2>&1 | sed 5q` in
*Sun\ F* | *Sun*Fortran*)
wl=
;;
*Sun\ C*)
wl='-Wl,'
;;
esac
;;
esac
;;
newsos6)
;;
*nto* | *qnx*)
;;
osf3* | osf4* | osf5*)
wl='-Wl,'
;;
rdos*)
;;
solaris*)
case $cc_basename in
f77* | f90* | f95* | sunf77* | sunf90* | sunf95*)
wl='-Qoption ld '
;;
*)
wl='-Wl,'
;;
esac
;;
sunos4*)
wl='-Qoption ld '
;;
sysv4 | sysv4.2uw2* | sysv4.3*)
wl='-Wl,'
;;
sysv4*MP*)
;;
sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*)
wl='-Wl,'
;;
unicos*)
wl='-Wl,'
;;
uts4*)
;;
esac
fi
# Code taken from libtool.m4's _LT_LINKER_SHLIBS.
hardcode_libdir_flag_spec=
hardcode_libdir_separator=
hardcode_direct=no
hardcode_minus_L=no
case "$host_os" in
cygwin* | mingw* | pw32* | cegcc*)
# FIXME: the MSVC++ port hasn't been tested in a loooong time
# When not using gcc, we currently assume that we are using
# Microsoft Visual C++.
if test "$GCC" != yes; then
with_gnu_ld=no
fi
;;
interix*)
# we just hope/assume this is gcc and not c89 (= MSVC++)
with_gnu_ld=yes
;;
openbsd*)
with_gnu_ld=no
;;
esac
ld_shlibs=yes
if test "$with_gnu_ld" = yes; then
# Set some defaults for GNU ld with shared library support. These
# are reset later if shared libraries are not supported. Putting them
# here allows them to be overridden if necessary.
# Unlike libtool, we use -rpath here, not --rpath, since the documented
# option of GNU ld is called -rpath, not --rpath.
hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir'
case "$host_os" in
aix[3-9]*)
# On AIX/PPC, the GNU linker is very broken
if test "$host_cpu" != ia64; then
ld_shlibs=no
fi
;;
amigaos*)
case "$host_cpu" in
powerpc)
;;
m68k)
hardcode_libdir_flag_spec='-L$libdir'
hardcode_minus_L=yes
;;
esac
;;
beos*)
if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then
:
else
ld_shlibs=no
fi
;;
cygwin* | mingw* | pw32* | cegcc*)
# hardcode_libdir_flag_spec is actually meaningless, as there is
# no search path for DLLs.
hardcode_libdir_flag_spec='-L$libdir'
if $LD --help 2>&1 | grep 'auto-import' > /dev/null; then
:
else
ld_shlibs=no
fi
;;
haiku*)
;;
interix[3-9]*)
hardcode_direct=no
hardcode_libdir_flag_spec='${wl}-rpath,$libdir'
;;
gnu* | linux* | tpf* | k*bsd*-gnu | kopensolaris*-gnu)
if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then
:
else
ld_shlibs=no
fi
;;
netbsd*)
;;
solaris*)
if $LD -v 2>&1 | grep 'BFD 2\.8' > /dev/null; then
ld_shlibs=no
elif $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then
:
else
ld_shlibs=no
fi
;;
sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX*)
case `$LD -v 2>&1` in
*\ [01].* | *\ 2.[0-9].* | *\ 2.1[0-5].*)
ld_shlibs=no
;;
*)
if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then
hardcode_libdir_flag_spec='`test -z "$SCOABSPATH" && echo ${wl}-rpath,$libdir`'
else
ld_shlibs=no
fi
;;
esac
;;
sunos4*)
hardcode_direct=yes
;;
*)
if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then
:
else
ld_shlibs=no
fi
;;
esac
if test "$ld_shlibs" = no; then
hardcode_libdir_flag_spec=
fi
else
case "$host_os" in
aix3*)
# Note: this linker hardcodes the directories in LIBPATH if there
# are no directories specified by -L.
hardcode_minus_L=yes
if test "$GCC" = yes; then
# Neither direct hardcoding nor static linking is supported with a
# broken collect2.
hardcode_direct=unsupported
fi
;;
aix[4-9]*)
if test "$host_cpu" = ia64; then
# On IA64, the linker does run time linking by default, so we don't
# have to do anything special.
aix_use_runtimelinking=no
else
aix_use_runtimelinking=no
# Test if we are trying to use run time linking or normal
# AIX style linking. If -brtl is somewhere in LDFLAGS, we
# need to do runtime linking.
case $host_os in aix4.[23]|aix4.[23].*|aix[5-9]*)
for ld_flag in $LDFLAGS; do
if (test $ld_flag = "-brtl" || test $ld_flag = "-Wl,-brtl"); then
aix_use_runtimelinking=yes
break
fi
done
;;
esac
fi
hardcode_direct=yes
hardcode_libdir_separator=':'
if test "$GCC" = yes; then
case $host_os in aix4.[012]|aix4.[012].*)
collect2name=`${CC} -print-prog-name=collect2`
if test -f "$collect2name" && \
strings "$collect2name" | grep resolve_lib_name >/dev/null
then
# We have reworked collect2
:
else
# We have old collect2
hardcode_direct=unsupported
hardcode_minus_L=yes
hardcode_libdir_flag_spec='-L$libdir'
hardcode_libdir_separator=
fi
;;
esac
fi
# Begin _LT_AC_SYS_LIBPATH_AIX.
echo 'int main () { return 0; }' > conftest.c
${CC} ${LDFLAGS} conftest.c -o conftest
aix_libpath=`dump -H conftest 2>/dev/null | sed -n -e '/Import File Strings/,/^$/ { /^0/ { s/^0 *\(.*\)$/\1/; p; }
}'`
if test -z "$aix_libpath"; then
aix_libpath=`dump -HX64 conftest 2>/dev/null | sed -n -e '/Import File Strings/,/^$/ { /^0/ { s/^0 *\(.*\)$/\1/; p; }
}'`
fi
if test -z "$aix_libpath"; then
aix_libpath="/usr/lib:/lib"
fi
rm -f conftest.c conftest
# End _LT_AC_SYS_LIBPATH_AIX.
if test "$aix_use_runtimelinking" = yes; then
hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:'"$aix_libpath"
else
if test "$host_cpu" = ia64; then
hardcode_libdir_flag_spec='${wl}-R $libdir:/usr/lib:/lib'
else
hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:'"$aix_libpath"
fi
fi
;;
amigaos*)
case "$host_cpu" in
powerpc)
;;
m68k)
hardcode_libdir_flag_spec='-L$libdir'
hardcode_minus_L=yes
;;
esac
;;
bsdi[45]*)
;;
cygwin* | mingw* | pw32* | cegcc*)
# When not using gcc, we currently assume that we are using
# Microsoft Visual C++.
# hardcode_libdir_flag_spec is actually meaningless, as there is
# no search path for DLLs.
hardcode_libdir_flag_spec=' '
libext=lib
;;
darwin* | rhapsody*)
hardcode_direct=no
if { case $cc_basename in ifort*) true;; *) test "$GCC" = yes;; esac; }; then
:
else
ld_shlibs=no
fi
;;
dgux*)
hardcode_libdir_flag_spec='-L$libdir'
;;
freebsd2.[01]*)
hardcode_direct=yes
hardcode_minus_L=yes
;;
freebsd* | dragonfly*)
hardcode_libdir_flag_spec='-R$libdir'
hardcode_direct=yes
;;
hpux9*)
hardcode_libdir_flag_spec='${wl}+b ${wl}$libdir'
hardcode_libdir_separator=:
hardcode_direct=yes
# hardcode_minus_L: Not really in the search PATH,
# but as the default location of the library.
hardcode_minus_L=yes
;;
hpux10*)
if test "$with_gnu_ld" = no; then
hardcode_libdir_flag_spec='${wl}+b ${wl}$libdir'
hardcode_libdir_separator=:
hardcode_direct=yes
# hardcode_minus_L: Not really in the search PATH,
# but as the default location of the library.
hardcode_minus_L=yes
fi
;;
hpux11*)
if test "$with_gnu_ld" = no; then
hardcode_libdir_flag_spec='${wl}+b ${wl}$libdir'
hardcode_libdir_separator=:
case $host_cpu in
hppa*64*|ia64*)
hardcode_direct=no
;;
*)
hardcode_direct=yes
# hardcode_minus_L: Not really in the search PATH,
# but as the default location of the library.
hardcode_minus_L=yes
;;
esac
fi
;;
irix5* | irix6* | nonstopux*)
hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir'
hardcode_libdir_separator=:
;;
netbsd*)
hardcode_libdir_flag_spec='-R$libdir'
hardcode_direct=yes
;;
newsos6)
hardcode_direct=yes
hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir'
hardcode_libdir_separator=:
;;
*nto* | *qnx*)
;;
openbsd*)
if test -f /usr/libexec/ld.so; then
hardcode_direct=yes
if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then
hardcode_libdir_flag_spec='${wl}-rpath,$libdir'
else
case "$host_os" in
openbsd[01].* | openbsd2.[0-7] | openbsd2.[0-7].*)
hardcode_libdir_flag_spec='-R$libdir'
;;
*)
hardcode_libdir_flag_spec='${wl}-rpath,$libdir'
;;
esac
fi
else
ld_shlibs=no
fi
;;
os2*)
hardcode_libdir_flag_spec='-L$libdir'
hardcode_minus_L=yes
;;
osf3*)
hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir'
hardcode_libdir_separator=:
;;
osf4* | osf5*)
if test "$GCC" = yes; then
hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir'
else
# Both cc and cxx compiler support -rpath directly
hardcode_libdir_flag_spec='-rpath $libdir'
fi
hardcode_libdir_separator=:
;;
solaris*)
hardcode_libdir_flag_spec='-R$libdir'
;;
sunos4*)
hardcode_libdir_flag_spec='-L$libdir'
hardcode_direct=yes
hardcode_minus_L=yes
;;
sysv4)
case $host_vendor in
sni)
hardcode_direct=yes # is this really true???
;;
siemens)
hardcode_direct=no
;;
motorola)
hardcode_direct=no #Motorola manual says yes, but my tests say they lie
;;
esac
;;
sysv4.3*)
;;
sysv4*MP*)
if test -d /usr/nec; then
ld_shlibs=yes
fi
;;
sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[01].[10]* | unixware7* | sco3.2v5.0.[024]*)
;;
sysv5* | sco3.2v5* | sco5v6*)
hardcode_libdir_flag_spec='`test -z "$SCOABSPATH" && echo ${wl}-R,$libdir`'
hardcode_libdir_separator=':'
;;
uts4*)
hardcode_libdir_flag_spec='-L$libdir'
;;
*)
ld_shlibs=no
;;
esac
fi
# Check dynamic linker characteristics
# Code taken from libtool.m4's _LT_SYS_DYNAMIC_LINKER.
# Unlike libtool.m4, here we don't care about _all_ names of the library, but
# only about the one the linker finds when passed -lNAME. This is the last
# element of library_names_spec in libtool.m4, or possibly two of them if the
# linker has special search rules.
library_names_spec= # the last element of library_names_spec in libtool.m4
libname_spec='lib$name'
case "$host_os" in
aix3*)
library_names_spec='$libname.a'
;;
aix[4-9]*)
library_names_spec='$libname$shrext'
;;
amigaos*)
case "$host_cpu" in
powerpc*)
library_names_spec='$libname$shrext' ;;
m68k)
library_names_spec='$libname.a' ;;
esac
;;
beos*)
library_names_spec='$libname$shrext'
;;
bsdi[45]*)
library_names_spec='$libname$shrext'
;;
cygwin* | mingw* | pw32* | cegcc*)
shrext=.dll
library_names_spec='$libname.dll.a $libname.lib'
;;
darwin* | rhapsody*)
shrext=.dylib
library_names_spec='$libname$shrext'
;;
dgux*)
library_names_spec='$libname$shrext'
;;
freebsd[23].*)
library_names_spec='$libname$shrext$versuffix'
;;
freebsd* | dragonfly*)
library_names_spec='$libname$shrext'
;;
gnu*)
library_names_spec='$libname$shrext'
;;
haiku*)
library_names_spec='$libname$shrext'
;;
hpux9* | hpux10* | hpux11*)
case $host_cpu in
ia64*)
shrext=.so
;;
hppa*64*)
shrext=.sl
;;
*)
shrext=.sl
;;
esac
library_names_spec='$libname$shrext'
;;
interix[3-9]*)
library_names_spec='$libname$shrext'
;;
irix5* | irix6* | nonstopux*)
library_names_spec='$libname$shrext'
case "$host_os" in
irix5* | nonstopux*)
libsuff= shlibsuff=
;;
*)
case $LD in
*-32|*"-32 "|*-melf32bsmip|*"-melf32bsmip ") libsuff= shlibsuff= ;;
*-n32|*"-n32 "|*-melf32bmipn32|*"-melf32bmipn32 ") libsuff=32 shlibsuff=N32 ;;
*-64|*"-64 "|*-melf64bmip|*"-melf64bmip ") libsuff=64 shlibsuff=64 ;;
*) libsuff= shlibsuff= ;;
esac
;;
esac
;;
linux*oldld* | linux*aout* | linux*coff*)
;;
linux* | k*bsd*-gnu | kopensolaris*-gnu)
library_names_spec='$libname$shrext'
;;
knetbsd*-gnu)
library_names_spec='$libname$shrext'
;;
netbsd*)
library_names_spec='$libname$shrext'
;;
newsos6)
library_names_spec='$libname$shrext'
;;
*nto* | *qnx*)
library_names_spec='$libname$shrext'
;;
openbsd*)
library_names_spec='$libname$shrext$versuffix'
;;
os2*)
libname_spec='$name'
shrext=.dll
library_names_spec='$libname.a'
;;
osf3* | osf4* | osf5*)
library_names_spec='$libname$shrext'
;;
rdos*)
;;
solaris*)
library_names_spec='$libname$shrext'
;;
sunos4*)
library_names_spec='$libname$shrext$versuffix'
;;
sysv4 | sysv4.3*)
library_names_spec='$libname$shrext'
;;
sysv4*MP*)
library_names_spec='$libname$shrext'
;;
sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*)
library_names_spec='$libname$shrext'
;;
tpf*)
library_names_spec='$libname$shrext'
;;
uts4*)
library_names_spec='$libname$shrext'
;;
esac
sed_quote_subst='s/\(["`$\\]\)/\\\1/g'
escaped_wl=`echo "X$wl" | sed -e 's/^X//' -e "$sed_quote_subst"`
shlibext=`echo "$shrext" | sed -e 's,^\.,,'`
escaped_libname_spec=`echo "X$libname_spec" | sed -e 's/^X//' -e "$sed_quote_subst"`
escaped_library_names_spec=`echo "X$library_names_spec" | sed -e 's/^X//' -e "$sed_quote_subst"`
escaped_hardcode_libdir_flag_spec=`echo "X$hardcode_libdir_flag_spec" | sed -e 's/^X//' -e "$sed_quote_subst"`
LC_ALL=C sed -e 's/^\([a-zA-Z0-9_]*\)=/acl_cv_\1=/' <